file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
config.go | package precreator
import (
"errors"
"time"
"github.com/ivopetiz/influxdb/monitor/diagnostics"
"github.com/ivopetiz/influxdb/toml"
)
const (
// DefaultCheckInterval is the shard precreation check time if none is specified.
DefaultCheckInterval = 10 * time.Minute
// DefaultAdvancePeriod is the default period ahead of the endtime of a shard group
// that its successor group is created.
DefaultAdvancePeriod = 30 * time.Minute
)
// Config represents the configuration for shard precreation.
type Config struct {
Enabled bool `toml:"enabled"`
CheckInterval toml.Duration `toml:"check-interval"`
AdvancePeriod toml.Duration `toml:"advance-period"`
}
// NewConfig returns a new Config with defaults.
func NewConfig() Config |
// Validate returns an error if the Config is invalid.
func (c Config) Validate() error {
if !c.Enabled {
return nil
}
// TODO: Should we enforce a minimum interval?
// Polling every nanosecond, for instance, will greatly impact performance.
if c.CheckInterval <= 0 {
return errors.New("check-interval must be positive")
}
if c.AdvancePeriod <= 0 {
return errors.New("advance-period must be positive")
}
return nil
}
// Diagnostics returns a diagnostics representation of a subset of the Config.
func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) {
if !c.Enabled {
return diagnostics.RowFromMap(map[string]interface{}{
"enabled": false,
}), nil
}
return diagnostics.RowFromMap(map[string]interface{}{
"enabled": true,
"check-interval": c.CheckInterval,
"advance-period": c.AdvancePeriod,
}), nil
}
| {
return Config{
Enabled: true,
CheckInterval: toml.Duration(DefaultCheckInterval),
AdvancePeriod: toml.Duration(DefaultAdvancePeriod),
}
} |
day_4.rs | use regex::RegexSet;
mod utils;
fn main() {
let data = utils::load_input("./data/day_4.txt").unwrap();
let passports: Vec<&str> = data.split("\n\n").collect();
let set_1 = RegexSet::new(&[
r"byr:", r"iyr:", r"eyr:", r"hgt:", r"hcl:", r"ecl:", r"pid:",
])
.unwrap();
let set_2 = RegexSet::new(&[
r"\bbyr:(19[2-9][0-9]|200[0-2])\b",
r"\biyr:(201[0-9]|2020)\b",
r"\beyr:(202[0-9]|2030)\b",
r"\bhgt:(1[5-8][0-9]cm|19[0-3]cm|59in|6[0-9]in|7[0-6]in)\b",
r"\bhcl:#([a-f]|[0-9]){6}\b",
r"\becl:(amb|blu|brn|gry|grn|hzl|oth)\b",
r"\bpid:[0-9]{9}\b",
])
.unwrap();
let mut valid_passport_count_1 = 0;
let mut valid_passport_count_2 = 0;
for passport in passports.iter() {
let matches: Vec<_> = set_1.matches(passport).iter().collect();
if matches.len() == set_1.len() |
let matches: Vec<_> = set_2.matches(passport).iter().collect();
if matches.len() == set_2.len() {
valid_passport_count_2 += 1;
}
}
println!("Answer 1/2: {}", valid_passport_count_1);
println!("Answer 2/2: {}", valid_passport_count_2);
}
| {
valid_passport_count_1 += 1;
} |
test.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Testing candidates
//
// After candidates have been simplified, the only match pairs that
// remain are those that require some sort of test. The functions here
// identify what tests are needed, perform the tests, and then filter
// the candidates based on the result.
use build::Builder;
use build::matches::{Candidate, MatchPair, Test, TestKind};
use hair::*;
use rustc_data_structures::fnv::FnvHashMap;
use rustc::middle::const_val::ConstVal;
use rustc::ty::{self, Ty};
use rustc::mir::repr::*;
use syntax::codemap::Span;
impl<'a,'tcx> Builder<'a,'tcx> {
/// Identifies what test is needed to decide if `match_pair` is applicable.
///
/// It is a bug to call this with a simplifyable pattern.
pub fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> |
pub fn add_cases_to_switch<'pat>(&mut self,
test_lvalue: &Lvalue<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
switch_ty: Ty<'tcx>,
options: &mut Vec<ConstVal>,
indices: &mut FnvHashMap<ConstVal, usize>)
-> bool
{
let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) {
Some(match_pair) => match_pair,
_ => { return false; }
};
match *match_pair.pattern.kind {
PatternKind::Constant { ref value } => {
// if the lvalues match, the type should match
assert_eq!(match_pair.pattern.ty, switch_ty);
indices.entry(value.clone())
.or_insert_with(|| {
options.push(value.clone());
options.len() - 1
});
true
}
PatternKind::Range { .. } |
PatternKind::Variant { .. } |
PatternKind::Slice { .. } |
PatternKind::Array { .. } |
PatternKind::Wild |
PatternKind::Binding { .. } |
PatternKind::Leaf { .. } |
PatternKind::Deref { .. } => {
// don't know how to add these patterns to a switch
false
}
}
}
/// Generates the code to perform a test.
pub fn perform_test(&mut self,
block: BasicBlock,
lvalue: &Lvalue<'tcx>,
test: &Test<'tcx>)
-> Vec<BasicBlock> {
let scope_id = self.innermost_scope_id();
match test.kind {
TestKind::Switch { adt_def } => {
let num_enum_variants = self.hir.num_variants(adt_def);
let target_blocks: Vec<_> =
(0..num_enum_variants).map(|_| self.cfg.start_new_block())
.collect();
self.cfg.terminate(block, scope_id, test.span, TerminatorKind::Switch {
discr: lvalue.clone(),
adt_def: adt_def,
targets: target_blocks.clone()
});
target_blocks
}
TestKind::SwitchInt { switch_ty, ref options, indices: _ } => {
let otherwise = self.cfg.start_new_block();
let targets: Vec<_> =
options.iter()
.map(|_| self.cfg.start_new_block())
.chain(Some(otherwise))
.collect();
self.cfg.terminate(block,
scope_id,
test.span,
TerminatorKind::SwitchInt {
discr: lvalue.clone(),
switch_ty: switch_ty,
values: options.clone(),
targets: targets.clone(),
});
targets
}
TestKind::Eq { ref value, mut ty } => {
let mut val = Operand::Consume(lvalue.clone());
// If we're using b"..." as a pattern, we need to insert an
// unsizing coercion, as the byte string has the type &[u8; N].
let expect = if let ConstVal::ByteStr(ref bytes) = *value {
let tcx = self.hir.tcx();
// Unsize the lvalue to &[u8], too, if necessary.
if let ty::TyRef(region, mt) = ty.sty {
if let ty::TyArray(_, _) = mt.ty.sty {
ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
let val_slice = self.temp(ty);
self.cfg.push_assign(block, scope_id, test.span, &val_slice,
Rvalue::Cast(CastKind::Unsize, val, ty));
val = Operand::Consume(val_slice);
}
}
assert!(ty.is_slice());
let array_ty = tcx.mk_array(tcx.types.u8, bytes.len());
let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty);
let array = self.literal_operand(test.span, array_ref, Literal::Value {
value: value.clone()
});
let slice = self.temp(ty);
self.cfg.push_assign(block, scope_id, test.span, &slice,
Rvalue::Cast(CastKind::Unsize, array, ty));
Operand::Consume(slice)
} else {
self.literal_operand(test.span, ty, Literal::Value {
value: value.clone()
})
};
// Use PartialEq::eq for &str and &[u8] slices, instead of BinOp::Eq.
let fail = self.cfg.start_new_block();
if let ty::TyRef(_, mt) = ty.sty {
assert!(ty.is_slice());
let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap();
let ty = mt.ty;
let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]);
let bool_ty = self.hir.bool_ty();
let eq_result = self.temp(bool_ty);
let eq_block = self.cfg.start_new_block();
let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, scope_id, test.span, TerminatorKind::Call {
func: Operand::Constant(Constant {
span: test.span,
ty: mty,
literal: method
}),
args: vec![val, expect],
destination: Some((eq_result.clone(), eq_block)),
cleanup: cleanup,
});
// check the result
let block = self.cfg.start_new_block();
self.cfg.terminate(eq_block, scope_id, test.span, TerminatorKind::If {
cond: Operand::Consume(eq_result),
targets: (block, fail),
});
vec![block, fail]
} else {
let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val);
vec![block, fail]
}
}
TestKind::Range { ref lo, ref hi, ty } => {
// Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
let lo = self.literal_operand(test.span, ty.clone(), lo.clone());
let hi = self.literal_operand(test.span, ty.clone(), hi.clone());
let val = Operand::Consume(lvalue.clone());
let fail = self.cfg.start_new_block();
let block = self.compare(block, fail, test.span, BinOp::Le, lo, val.clone());
let block = self.compare(block, fail, test.span, BinOp::Le, val, hi);
vec![block, fail]
}
TestKind::Len { len, op } => {
let (usize_ty, bool_ty) = (self.hir.usize_ty(), self.hir.bool_ty());
let (actual, result) = (self.temp(usize_ty), self.temp(bool_ty));
// actual = len(lvalue)
self.cfg.push_assign(block, scope_id, test.span,
&actual, Rvalue::Len(lvalue.clone()));
// expected = <N>
let expected = self.push_usize(block, scope_id, test.span, len);
// result = actual == expected OR result = actual < expected
self.cfg.push_assign(block,
scope_id,
test.span,
&result,
Rvalue::BinaryOp(op,
Operand::Consume(actual),
Operand::Consume(expected)));
// branch based on result
let target_blocks: Vec<_> = vec![self.cfg.start_new_block(),
self.cfg.start_new_block()];
self.cfg.terminate(block, scope_id, test.span, TerminatorKind::If {
cond: Operand::Consume(result),
targets: (target_blocks[0], target_blocks[1])
});
target_blocks
}
}
}
fn compare(&mut self,
block: BasicBlock,
fail_block: BasicBlock,
span: Span,
op: BinOp,
left: Operand<'tcx>,
right: Operand<'tcx>) -> BasicBlock {
let bool_ty = self.hir.bool_ty();
let result = self.temp(bool_ty);
// result = op(left, right)
let scope_id = self.innermost_scope_id();
self.cfg.push_assign(block, scope_id, span, &result,
Rvalue::BinaryOp(op, left, right));
// branch based on result
let target_block = self.cfg.start_new_block();
self.cfg.terminate(block, scope_id, span, TerminatorKind::If {
cond: Operand::Consume(result),
targets: (target_block, fail_block)
});
target_block
}
/// Given that we are performing `test` against `test_lvalue`,
/// this job sorts out what the status of `candidate` will be
/// after the test. The `resulting_candidates` vector stores, for
/// each possible outcome of `test`, a vector of the candidates
/// that will result. This fn should add a (possibly modified)
/// clone of candidate into `resulting_candidates` wherever
/// appropriate.
///
/// So, for example, if this candidate is `x @ Some(P0)` and the
/// test is a variant test, then we would add `(x as Option).0 @
/// P0` to the `resulting_candidates` entry corresponding to the
/// variant `Some`.
///
/// However, in some cases, the test may just not be relevant to
/// candidate. For example, suppose we are testing whether `foo.x == 22`,
/// but in one match arm we have `Foo { x: _, ... }`... in that case,
/// the test for what value `x` has has no particular relevance
/// to this candidate. In such cases, this function just returns false
/// without doing anything. This is used by the overall `match_candidates`
/// algorithm to structure the match as a whole. See `match_candidates` for
/// more details.
///
/// FIXME(#29623). In some cases, we have some tricky choices to
/// make. for example, if we are testing that `x == 22`, but the
/// candidate is `x @ 13..55`, what should we do? In the event
/// that the test is true, we know that the candidate applies, but
/// in the event of false, we don't know that it *doesn't*
/// apply. For now, we return false, indicate that the test does
/// not apply to this candidate, but it might be we can get
/// tighter match code if we do something a bit different.
pub fn sort_candidate<'pat>(&mut self,
test_lvalue: &Lvalue<'tcx>,
test: &Test<'tcx>,
candidate: &Candidate<'pat, 'tcx>,
resulting_candidates: &mut [Vec<Candidate<'pat, 'tcx>>])
-> bool {
// Find the match_pair for this lvalue (if any). At present,
// afaik, there can be at most one. (In the future, if we
// adopted a more general `@` operator, there might be more
// than one, but it'd be very unusual to have two sides that
// both require tests; you'd expect one side to be simplified
// away.)
let tested_match_pair = candidate.match_pairs.iter()
.enumerate()
.filter(|&(_, mp)| mp.lvalue == *test_lvalue)
.next();
let (match_pair_index, match_pair) = match tested_match_pair {
Some(pair) => pair,
None => {
// We are not testing this lvalue. Therefore, this
// candidate applies to ALL outcomes.
return false;
}
};
match test.kind {
// If we are performing a variant switch, then this
// informs variant patterns, but nothing else.
TestKind::Switch { adt_def: tested_adt_def } => {
match *match_pair.pattern.kind {
PatternKind::Variant { adt_def, variant_index, ref subpatterns } => {
assert_eq!(adt_def, tested_adt_def);
let new_candidate =
self.candidate_after_variant_switch(match_pair_index,
adt_def,
variant_index,
subpatterns,
candidate);
resulting_candidates[variant_index].push(new_candidate);
true
}
_ => {
false
}
}
}
// If we are performing a switch over integers, then this informs integer
// equality, but nothing else.
//
// FIXME(#29623) we could use TestKind::Range to rule
// things out here, in some cases.
TestKind::SwitchInt { switch_ty: _, options: _, ref indices } => {
match *match_pair.pattern.kind {
PatternKind::Constant { ref value }
if is_switch_ty(match_pair.pattern.ty) => {
let index = indices[value];
let new_candidate = self.candidate_without_match_pair(match_pair_index,
candidate);
resulting_candidates[index].push(new_candidate);
true
}
_ => {
false
}
}
}
// If we are performing a length check, then this
// informs slice patterns, but nothing else.
TestKind::Len { .. } => {
let pattern_test = self.test(&match_pair);
match *match_pair.pattern.kind {
PatternKind::Slice { .. } if pattern_test.kind == test.kind => {
let mut new_candidate = candidate.clone();
// Set up the MatchKind to simplify this like an array.
new_candidate.match_pairs[match_pair_index]
.slice_len_checked = true;
resulting_candidates[0].push(new_candidate);
true
}
_ => false
}
}
TestKind::Eq { .. } |
TestKind::Range { .. } => {
// These are all binary tests.
//
// FIXME(#29623) we can be more clever here
let pattern_test = self.test(&match_pair);
if pattern_test.kind == test.kind {
let new_candidate = self.candidate_without_match_pair(match_pair_index,
candidate);
resulting_candidates[0].push(new_candidate);
true
} else {
false
}
}
}
}
fn candidate_without_match_pair<'pat>(&mut self,
match_pair_index: usize,
candidate: &Candidate<'pat, 'tcx>)
-> Candidate<'pat, 'tcx> {
let other_match_pairs =
candidate.match_pairs.iter()
.enumerate()
.filter(|&(index, _)| index != match_pair_index)
.map(|(_, mp)| mp.clone())
.collect();
Candidate {
span: candidate.span,
match_pairs: other_match_pairs,
bindings: candidate.bindings.clone(),
guard: candidate.guard.clone(),
arm_index: candidate.arm_index,
}
}
fn candidate_after_variant_switch<'pat>(&mut self,
match_pair_index: usize,
adt_def: ty::AdtDef<'tcx>,
variant_index: usize,
subpatterns: &'pat [FieldPattern<'tcx>],
candidate: &Candidate<'pat, 'tcx>)
-> Candidate<'pat, 'tcx> {
let match_pair = &candidate.match_pairs[match_pair_index];
// So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`,
// we want to create a set of derived match-patterns like
// `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
let elem = ProjectionElem::Downcast(adt_def, variant_index);
let downcast_lvalue = match_pair.lvalue.clone().elem(elem); // `(x as Variant)`
let consequent_match_pairs =
subpatterns.iter()
.map(|subpattern| {
// e.g., `(x as Variant).0`
let lvalue = downcast_lvalue.clone().field(subpattern.field,
subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1`
MatchPair::new(lvalue, &subpattern.pattern)
});
// In addition, we need all the other match pairs from the old candidate.
let other_match_pairs =
candidate.match_pairs.iter()
.enumerate()
.filter(|&(index, _)| index != match_pair_index)
.map(|(_, mp)| mp.clone());
let all_match_pairs = consequent_match_pairs.chain(other_match_pairs).collect();
Candidate {
span: candidate.span,
match_pairs: all_match_pairs,
bindings: candidate.bindings.clone(),
guard: candidate.guard.clone(),
arm_index: candidate.arm_index,
}
}
fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! {
span_bug!(match_pair.pattern.span,
"simplifyable pattern found: {:?}",
match_pair.pattern)
}
}
fn is_switch_ty<'tcx>(ty: Ty<'tcx>) -> bool {
ty.is_integral() || ty.is_char() || ty.is_bool()
}
| {
match *match_pair.pattern.kind {
PatternKind::Variant { ref adt_def, variant_index: _, subpatterns: _ } => {
Test {
span: match_pair.pattern.span,
kind: TestKind::Switch { adt_def: adt_def.clone() },
}
}
PatternKind::Constant { .. }
if is_switch_ty(match_pair.pattern.ty) => {
// for integers, we use a SwitchInt match, which allows
// us to handle more cases
Test {
span: match_pair.pattern.span,
kind: TestKind::SwitchInt {
switch_ty: match_pair.pattern.ty,
// these maps are empty to start; cases are
// added below in add_cases_to_switch
options: vec![],
indices: FnvHashMap(),
}
}
}
PatternKind::Constant { ref value } => {
Test {
span: match_pair.pattern.span,
kind: TestKind::Eq {
value: value.clone(),
ty: match_pair.pattern.ty.clone()
}
}
}
PatternKind::Range { ref lo, ref hi } => {
Test {
span: match_pair.pattern.span,
kind: TestKind::Range {
lo: lo.clone(),
hi: hi.clone(),
ty: match_pair.pattern.ty.clone(),
},
}
}
PatternKind::Slice { ref prefix, ref slice, ref suffix }
if !match_pair.slice_len_checked => {
let len = prefix.len() + suffix.len();
let op = if slice.is_some() {
BinOp::Ge
} else {
BinOp::Eq
};
Test {
span: match_pair.pattern.span,
kind: TestKind::Len { len: len as u64, op: op },
}
}
PatternKind::Array { .. } |
PatternKind::Slice { .. } |
PatternKind::Wild |
PatternKind::Binding { .. } |
PatternKind::Leaf { .. } |
PatternKind::Deref { .. } => {
self.error_simplifyable(match_pair)
}
}
} |
host_port_range.rs | // Generated from definition io.k8s.api.extensions.v1beta1.HostPortRange
/// Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct HostPortRange {
/// max is the end of the range, inclusive.
pub max: i32,
/// min is the start of the range, inclusive.
pub min: i32,
}
impl<'de> serde::Deserialize<'de> for HostPortRange {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_max,
Key_min,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"max" => Field::Key_max,
"min" => Field::Key_min,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = HostPortRange;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("HostPortRange")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_max: Option<i32> = None;
let mut value_min: Option<i32> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_max => value_max = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_min => value_min = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => | ,
}
}
Ok(HostPortRange {
max: value_max.ok_or_else(|| serde::de::Error::missing_field("max"))?,
min: value_min.ok_or_else(|| serde::de::Error::missing_field("min"))?,
})
}
}
deserializer.deserialize_struct(
"HostPortRange",
&[
"max",
"min",
],
Visitor,
)
}
}
impl serde::Serialize for HostPortRange {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"HostPortRange",
2,
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "max", &self.max)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "min", &self.min)?;
serde::ser::SerializeStruct::end(state)
}
}
| { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; } |
__main__.py | from metamvc.DatabaseUserData import DatabaseUserData
def main():
| DatabaseUserData() |
|
global_shortcut.rs | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use super::InvokeContext;
use crate::{api::ipc::CallbackFn, Runtime};
use serde::Deserialize;
use tauri_macros::{module_command_handler, CommandModule};
#[cfg(global_shortcut_all)]
use crate::runtime::GlobalShortcutManager;
/// The API descriptor.
#[derive(Deserialize, CommandModule)]
#[serde(tag = "cmd", rename_all = "camelCase")]
pub enum Cmd {
/// Register a global shortcut.
Register {
shortcut: String,
handler: CallbackFn,
},
/// Register a list of global shortcuts.
RegisterAll {
shortcuts: Vec<String>,
handler: CallbackFn,
},
/// Unregister a global shortcut.
Unregister { shortcut: String },
/// Unregisters all registered shortcuts.
UnregisterAll,
/// Determines whether the given hotkey is registered or not.
IsRegistered { shortcut: String },
}
impl Cmd {
#[module_command_handler(global_shortcut_all, "globalShortcut > all")]
fn register<R: Runtime>(
context: InvokeContext<R>,
shortcut: String,
handler: CallbackFn,
) -> super::Result<()> {
let mut manager = context.window.app_handle.global_shortcut_manager();
register_shortcut(context.window, &mut manager, shortcut, handler)?;
Ok(())
}
#[module_command_handler(global_shortcut_all, "globalShortcut > all")]
fn register_all<R: Runtime>(
context: InvokeContext<R>,
shortcuts: Vec<String>,
handler: CallbackFn,
) -> super::Result<()> {
let mut manager = context.window.app_handle.global_shortcut_manager();
for shortcut in shortcuts {
register_shortcut(context.window.clone(), &mut manager, shortcut, handler)?;
}
Ok(())
}
#[module_command_handler(global_shortcut_all, "globalShortcut > all")]
fn unregister<R: Runtime>(context: InvokeContext<R>, shortcut: String) -> super::Result<()> {
context
.window
.app_handle
.global_shortcut_manager()
.unregister(&shortcut)
.map_err(crate::error::into_anyhow)?;
Ok(())
}
#[module_command_handler(global_shortcut_all, "globalShortcut > all")]
fn unregister_all<R: Runtime>(context: InvokeContext<R>) -> super::Result<()> {
context
.window
.app_handle
.global_shortcut_manager()
.unregister_all()
.map_err(crate::error::into_anyhow)?;
Ok(())
}
#[module_command_handler(global_shortcut_all, "globalShortcut > all")]
fn is_registered<R: Runtime>(context: InvokeContext<R>, shortcut: String) -> super::Result<bool> {
context
.window
.app_handle
.global_shortcut_manager()
.is_registered(&shortcut)
.map_err(crate::error::into_anyhow)
}
}
#[cfg(global_shortcut_all)]
fn | <R: Runtime>(
window: crate::Window<R>,
manager: &mut R::GlobalShortcutManager,
shortcut: String,
handler: CallbackFn,
) -> super::Result<()> {
let accelerator = shortcut.clone();
manager
.register(&shortcut, move || {
let callback_string = crate::api::ipc::format_callback(handler, &accelerator)
.expect("unable to serialize shortcut string to json");
let _ = window.eval(callback_string.as_str());
})
.map_err(crate::error::into_anyhow)?;
Ok(())
}
#[cfg(test)]
mod tests {
use crate::api::ipc::CallbackFn;
#[tauri_macros::module_command_test(global_shortcut_all, "globalShortcut > all")]
#[quickcheck_macros::quickcheck]
fn register(shortcut: String, handler: CallbackFn) {
let ctx = crate::test::mock_invoke_context();
super::Cmd::register(ctx.clone(), shortcut.clone(), handler).unwrap();
assert!(super::Cmd::is_registered(ctx, shortcut).unwrap());
}
#[tauri_macros::module_command_test(global_shortcut_all, "globalShortcut > all")]
#[quickcheck_macros::quickcheck]
fn register_all(shortcuts: Vec<String>, handler: CallbackFn) {
let ctx = crate::test::mock_invoke_context();
super::Cmd::register_all(ctx.clone(), shortcuts.clone(), handler).unwrap();
for shortcut in shortcuts {
assert!(super::Cmd::is_registered(ctx.clone(), shortcut).unwrap(),);
}
}
#[tauri_macros::module_command_test(global_shortcut_all, "globalShortcut > all")]
#[quickcheck_macros::quickcheck]
fn unregister(shortcut: String) {
let ctx = crate::test::mock_invoke_context();
super::Cmd::register(ctx.clone(), shortcut.clone(), CallbackFn(0)).unwrap();
super::Cmd::unregister(ctx.clone(), shortcut.clone()).unwrap();
assert!(!super::Cmd::is_registered(ctx, shortcut).unwrap());
}
#[tauri_macros::module_command_test(global_shortcut_all, "globalShortcut > all")]
#[quickcheck_macros::quickcheck]
fn unregister_all() {
let shortcuts = vec!["CTRL+X".to_string(), "SUPER+C".to_string(), "D".to_string()];
let ctx = crate::test::mock_invoke_context();
super::Cmd::register_all(ctx.clone(), shortcuts.clone(), CallbackFn(0)).unwrap();
super::Cmd::unregister_all(ctx.clone()).unwrap();
for shortcut in shortcuts {
assert!(!super::Cmd::is_registered(ctx.clone(), shortcut).unwrap(),);
}
}
#[tauri_macros::module_command_test(global_shortcut_all, "globalShortcut > all")]
#[quickcheck_macros::quickcheck]
fn is_registered(shortcut: String) {
let ctx = crate::test::mock_invoke_context();
assert!(!super::Cmd::is_registered(ctx.clone(), shortcut.clone()).unwrap(),);
super::Cmd::register(ctx.clone(), shortcut.clone(), CallbackFn(0)).unwrap();
assert!(super::Cmd::is_registered(ctx, shortcut).unwrap());
}
}
| register_shortcut |
Navigation.py | # -*-coding:Utf-8 -*
from mplotlab import App
from matplotlib.backend_bases import NavigationToolbar2
import wx
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class Navigation(NavigationToolbar2):
def __init__(self,*a,**k):
NavigationToolbar2.__init__(self, *a,**k)
def _init_toolbar(self,*args,**kwargs):
pass
def set_message(self,s):
""" display in the status bar
the mouseover data (x,y)
"""
try:
App().mainWin.GetStatusBar().SetStatusText(s,0)
except:
pass
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
| self.canvas.SetCursor( cursor )
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
self.wxoverlay = wx.Overlay()
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
self.wxoverlay.Reset()
del self.wxoverlay
def draw_rubberband(self, event, x0, y0, x1, y1):
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b = color.Get()
color.Set(r,g,b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangleRect(rect) | |
traefik_test.go | package addons
import (
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/stretchr/testify/require"
v12 "k8s.io/api/apps/v1"
)
func Test_shouldNotContainTraefikResourcesWhenDisabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "false",
}),
)
for _, i := range traefikResources {
require.False(t, helmChartParser.Contains(SearchResourceOption{
Name: i.Name,
Kind: i.Kind,
}))
}
}
func Test_shouldContainTraefikResourcesWhenEnabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
}),
)
for _, i := range traefikResources {
require.True(t, helmChartParser.Contains(SearchResourceOption{
Name: i.Name,
Kind: i.Kind,
}))
}
}
func Test_shouldBeAbleToSetUpServiceTypeAsLoadBalancer(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.service.type": "LoadBalancer",
}),
)
var d DeploymentMetadata
var list string
for _, slice := range helmChartParser.SlicedResource {
helm.UnmarshalK8SYaml(helmChartParser.T, slice, &d)
if d.Kind == "List" {
list = slice
break
}
}
require.True(t, len(list) != 0)
require.Contains(t, list, "LoadBalancer")
}
func Test_shouldBeAbleToSetUpServiceTypeAsNodePort(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.service.type": "NodePort",
}),
)
var d DeploymentMetadata
var list string
for _, slice := range helmChartParser.SlicedResource {
helm.UnmarshalK8SYaml(helmChartParser.T, slice, &d)
if d.Kind == "List" {
list = slice
break
}
}
require.True(t, len(list) != 0)
require.Contains(t, list, "NodePort")
}
func Test_hasRoleWhenRbacEnabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.rbac.enabled": "true",
}),
)
require.True(t, helmChartParser.Contains(SearchResourceOption{
Name: "pega-traefik",
Kind: "ClusterRole",
}))
require.True(t, helmChartParser.Contains(SearchResourceOption{
Name: "pega-traefik",
Kind: "ServiceAccount",
}))
require.True(t, helmChartParser.Contains(SearchResourceOption{
Name: "pega-traefik",
Kind: "ClusterRoleBinding",
}))
}
func Test_noRoleWhenRbacDisabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.rbac.enabled": "false",
}),
)
require.False(t, helmChartParser.Contains(SearchResourceOption{
Name: "pega-traefik",
Kind: "ClusterRole",
}))
}
func Test_hasSecretWhenSSLEnabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.ports.websecure.tls.enabled": "true",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.Contains(t, deployment.Spec.Template.Spec.Containers[0].Args, "--entrypoints.websecure.http.tls=true")
}
func Test_hasNoSecretWhenSSLEnabled(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.ports.websecure.tls.enabled": "false",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.NotContains(t, deployment.Spec.Template.Spec.Containers[0].Args, "--entrypoints.websecure.http.tls=true")
}
func Test_checkResourceRequests(t *testing.T) |
func Test_checkResourceLimits(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.resources.limits.cpu": "600m",
"traefik.resources.limits.memory": "600Mi",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.Equal(t, "600m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
require.Equal(t, "600Mi", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())
}
func Test_checkDefaultResourceRequests(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.Equal(t, "200m", deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String())
require.Equal(t, "200Mi", deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String())
}
func Test_checkDefaultResourceLimits(t *testing.T) {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.Equal(t, "500m", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())
require.Equal(t, "500Mi", deployment.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())
}
var traefikResources = []SearchResourceOption{
{
Name: "pega-traefik",
Kind: "ServiceAccount",
},
{
Name: "pega-traefik",
Kind: "ClusterRole",
},
{
Name: "pega-traefik",
Kind: "ClusterRoleBinding",
},
{
Name: "pega-traefik",
Kind: "Deployment",
},
{
Name: "pega-traefik",
Kind: "List",
},
{
Name: "pega-traefik-dashboard",
Kind: "IngressRoute",
},
}
| {
helmChartParser := NewHelmConfigParser(
NewHelmTest(t, helmChartRelativePath, map[string]string{
"traefik.enabled": "true",
"traefik.resources.requests.cpu": "300m",
"traefik.resources.requests.memory": "300Mi",
}),
)
var deployment v12.Deployment
helmChartParser.Find(SearchResourceOption{
Name: "pega-traefik",
Kind: "Deployment",
}, &deployment)
require.Equal(t, "300m", deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().String())
require.Equal(t, "300Mi", deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String())
} |
billing_ee.go | // +build ee
package billing
import (
"net/http"
"github.com/porter-dev/porter/api/server/shared"
"github.com/porter-dev/porter/api/server/shared/config"
"github.com/porter-dev/porter/ee/api/server/handlers/billing"
)
var NewBillingGetTokenHandler func(
config *config.Config,
decoderValidator shared.RequestDecoderValidator,
writer shared.ResultWriter,
) http.Handler
var NewBillingWebhookHandler func(
config *config.Config,
decoderValidator shared.RequestDecoderValidator,
) http.Handler
var NewBillingAddProjectHandler func(
config *config.Config,
decoderValidator shared.RequestDecoderValidator,
) http.Handler
func init() | {
NewBillingGetTokenHandler = billing.NewBillingGetTokenHandler
NewBillingWebhookHandler = billing.NewBillingWebhookHandler
NewBillingAddProjectHandler = billing.NewBillingAddProjectHandler
} |
|
aamonitor_filtered.py | #!/bin/env python
#==========================================================================
# (c) 2004-2005 Total Phase, Inc.
#--------------------------------------------------------------------------
# Project : Aardvark Sample Code
# File : aamonitor_filtered.py
#--------------------------------------------------------------------------
# Perform I2C monitoring functions with the Aardvark I2C/SPI adapter with
# the ability to filter the data based on slave address.
#--------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==========================================================================
#==========================================================================
# IMPORTS
#==========================================================================
import sys, time
from aardvark_py import *
#==========================================================================
# CONSTANTS
#==========================================================================
BUFFER_SIZE = 32767
TIMEFORMAT = "%Y-%m-%d %H:%M:%S"
#==========================================================================
# FUNCTIONS
#==========================================================================
def dump (handle, filter_addr, timeout):
# Wait for data on the bus
print "Waiting %d ms for first transaction..." % timeout
print " Filtering on 0x%03x" % filter_addr
result = aa_async_poll(handle, timeout)
if (result == AA_ASYNC_NO_DATA):
print " no data pending."
return
print " data received"
last_data0 = 0
last_data1 = 0
# Loop until aa_async_poll times out
while 1:
# Read the next monitor transaction.
# This function has an internal timeout (see datasheet), though
# since we have already checked for data using aa_async_poll,
# the timeout should never be exercised.
(status, data) = aa_i2c_monitor_read(handle, BUFFER_SIZE)
if (status < 0):
print "error: %s" % aa_status_string(status)
return
# The display flag indicates if the filtered address has been matched
# and the data should be displayed.
display = 0
# The display_buffer is used to hold the start condition because it
# is sent before the address is known, so the output needs to be
# cached to display later.
display_buffer = ""
for i in range(len(data)):
if (data[i] == AA_I2C_MONITOR_CMD_START):
# Generate a timestamp. This time stamp does not accurately
# reflect the actual time that the transaction occurred, but
# is generated to give the user a relative time for the
# transaction.
fmtstamp = time.strftime(TIMEFORMAT, time.localtime(time.time()))
# Cache the start condition
display_buffer = "\n%s : [S] " % fmtstamp
elif (data[i] == AA_I2C_MONITOR_CMD_STOP):
if display:
sys.stdout.write("[P]\n")
# After a stop condition, reset the display flag for
# next message
display = 0
else:
nack = (data[i] & AA_I2C_MONITOR_NACK)
if nack: nack_str = "*"
else: nack_str = ""
# 7-bit addresses
if (last_data0 == AA_I2C_MONITOR_CMD_START and
((data[i] & 0xf8) != 0xf0 or nack)):
# Test to see if 7-bit address matches
if ((data[i] & 0xff) >> 1 == filter_addr):
# If the address matches, the set display to 1
display = 1
# Write out the start condition
sys.stdout.write(display_buffer)
# And reset the buffer
display_buffer = ""
# Now process regularly
if (data[i] & 0x01): dir_str = "r"
else: dir_str = "w"
sys.stdout.write("<%02x:%s>%s " %
((data[i] & 0xff) >> 1,
dir_str,
nack_str
))
# 10-bit addresses
# See Philips specification for more details.
elif (last_data1 == AA_I2C_MONITOR_CMD_START and
(last_data0 & 0xf8) == 0xf0):
# Test to see if 10-bit address matches
if (((last_data0 << 7) & 0x300) | (data[i] & 0xff)) == filter_addr:
# If the address matches, the set display to 1
display = 1
# Write out the start condition
sys.stdout.write(display_buffer)
# Reset the buffer
display_buffer = ""
if (last_data0 & 0x01): dir_str = "r"
else: dir_str = "w"
sys.stdout.write("<%03x:%s>%s " %
(((last_data0 << 7) & 0x300) | (data[i] & 0xff),
dir_str,
nack_str
))
# Normal data
elif (last_data0 != AA_I2C_MONITOR_CMD_START):
if display:
sys.stdout.write("%02x%s " % (data[i] & 0xff, nack_str))
last_data1 = last_data0
last_data0 = data[i]
sys.stdout.flush()
# print "\nWaiting %d ms for subsequent transaction..." % INTERVAL_TIMEOUT
# Use aa_async_poll to wait for the next transaction
result = aa_async_poll(handle, timeout)
if (result == AA_ASYNC_NO_DATA):
print " No more data pending."
break
#==========================================================================
# MAIN PROGRAM
#==========================================================================
if (len(sys.argv) < 4):
print "usage: aamonitor PORT ADDR TIMEOUT"
print " where:"
print " PORT is the Aardvark adapter port number"
print " ADDR is the slave address as an integer"
print " TIMEOUT is the timeout interval in ms"
sys.exit()
port = int(sys.argv[1])
filter_addr = int(sys.argv[2], 0)
timeout = int(sys.argv[3])
# Open the device
handle = aa_open(port)
if (handle <= 0):
print "Unable to open Aardvark device on port %d" % port
print "Error code = %d" % handle
sys.exit()
# Ensure that the I2C subsystem is enabled
aa_configure(handle, AA_CONFIG_SPI_I2C)
# Disable the I2C bus pullup resistors (2.2k resistors).
# This command is only effective on v2.0 hardware or greater.
# The pullup resistors on the v1.02 hardware are enabled by default.
aa_i2c_pullup(handle, AA_I2C_PULLUP_NONE)
# Disable the Aardvark adapter's power pins.
|
# Enable the monitor
result = aa_i2c_monitor_enable(handle)
if (result < 0):
print "error: %s\n" % aa_status_string(result)
sys.exit()
print "Enabled I2C monitor."
# Watch the I2C port
dump(handle, filter_addr, timeout)
# Disable the slave and close the device
aa_i2c_monitor_disable(handle)
aa_close(handle) | # This command is only effective on v2.0 hardware or greater.
# The power pins on the v1.02 hardware are not enabled by default.
aa_target_power(handle, AA_TARGET_POWER_NONE)
|
db_dump.py | import getopt, os, time, re, gzip, json, traceback
import sys, uuid
from config import DBConfig, Config
from part import PartitionedList
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.dialects.oracle import RAW, CLOB
from sqlalchemy.dialects.mysql import BINARY
from sqlalchemy.types import TypeDecorator, CHAR, String
from stats import Stats
Version = "1.1"
t0 = time.time()
#from sqlalchemy import schema
Usage = """
python db_dump.py [options] -c <config.yaml> <rse_name>
-c <config file> -- required
-d <db config file> -- required - uses rucio.cfg format. Must contain "default" and "schema" under [databse]
-v -- verbose
-n <nparts>
-f <state>:<prefix> -- filter files with given state to the files set with prefix
state can be either combination of capital letters or "*"
can be repeated ( -f A:/path1 -f CD:/path2 )
use "*" for state to send all the files to the output set ( -f *:/path )
-l -- include more columns, otherwise physical path only, automatically on if -a is used
-z -- produce gzipped output
-s <stats file> -- write stats into JSON file
-S <key> -- add dump stats to stats under the key
-m <N files> -- stop after N files
"""
class GUID(TypeDecorator):
"""
Platform-independent GUID type.
Uses PostgreSQL's UUID type,
uses Oracle's RAW type,
uses MySQL's BINARY type,
otherwise uses CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
elif dialect.name == 'oracle':
return dialect.type_descriptor(RAW(16))
elif dialect.name == 'mysql':
return dialect.type_descriptor(BINARY(16))
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value).lower()
elif dialect.name == 'oracle':
return uuid.UUID(value).bytes
elif dialect.name == 'mysql':
return uuid.UUID(value).bytes
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
|
opts, args = getopt.getopt(sys.argv[1:], "f:c:ln:vd:s:S:zm:")
filters = {}
all_states = set()
for opt, val in opts:
if opt == '-f':
states, prefix = val.split(':')
filters[states] = prefix
all_states |= set(states)
opts = dict(opts)
if not args or (not "-c" in opts and not "-d" in opts):
print (Usage)
sys.exit(2)
verbose = "-v" in opts
long_output = "-l" in opts
out_prefix = opts.get("-o")
zout = "-z" in opts
stats_file = opts.get("-s")
stats_key = opts.get("-S", "db_dump")
stop_after = int(opts.get("-m", 0)) or None
rse_name = args[0]
if "-d" in opts:
dbconfig = DBConfig.from_cfg(opts["-d"])
else:
dbconfig = DBConfig.from_yaml(opts["-c"])
#print("dbconfig: url:", dbconfig.DBURL, "schema:", dbconfig.Schema)
config = Config(opts["-c"])
stats = None if stats_file is None else Stats(stats_file)
if stats:
stats[stats_key] = {
"status":"started",
"version":Version,
"rse":rse_name,
"start_time":t0,
"end_time":None,
"files":None,
"elapsed":None,
"directories":None,
"exception":[]
}
try:
Base = declarative_base()
if dbconfig.Schema:
Base.metadata.schema = dbconfig.Schema
class Replica(Base):
__tablename__ = "replicas"
path = Column(String)
state = Column(String)
rse_id = Column(GUID(), primary_key=True)
scope = Column(String, primary_key=True)
name = Column(String, primary_key=True)
class RSE(Base):
__tablename__ = "rses"
id = Column(GUID(), primary_key=True)
rse = Column(String)
if "-n" in opts:
nparts = int(opts["-n"])
else:
nparts = config.nparts(rse_name) or 1
subdir = config.dbdump_root(rse_name) or "/"
if not subdir.endswith("/"): subdir = subdir + "/"
print(f"Filtering files under {subdir} only")
_, ignore_file_patterns = config.ignore_patterns(rse_name)
engine = create_engine(dbconfig.DBURL, echo=verbose)
Session = sessionmaker(bind=engine)
session = Session()
rse = session.query(RSE).filter(RSE.rse == rse_name).first()
if rse is None:
print ("RSE %s not found" % (rse_name,))
sys.exit(1)
rse_id = rse.id
#print ("rse_id:", type(rse_id), rse_id)
batch = 100000
outputs = {
states:PartitionedList.create(nparts, prefix, zout) for states, prefix in filters.items()
}
all_replicas = '*' in all_states
replicas = session.query(Replica).filter(Replica.rse_id==rse_id).yield_per(batch)
if all_replicas:
sys.stderr.write("including all replias\n")
else:
print("including replicas in states:", list(all_states), file=sys.stderr)
replicas = replicas.filter(Replica.state.in_(list(all_states)))
dirs = set()
n = 0
filter_re = config.dbdump_param(rse, "filter")
if filter_re:
filter_re = re.compile(filter_re)
for r in replicas:
path = r.name
state = r.state
if not path.startswith(subdir):
continue
if filter_re is not None:
if not filter_re.search(path):
continue
if any(p.match(path) for p in ignore_file_patterns):
continue
words = path.rsplit("/", 1)
if len(words) == 1:
dirp = "/"
else:
dirp = words[0]
dirs.add(dirp)
for s, out_list in outputs.items():
if state in s or s == '*':
if long_output:
out_list.add("%s\t%s\t%s\t%s\t%s" % (rse_name, r.scope, r.name, path or "null", r.state))
else:
out_list.add(path or "null")
n += 1
if n % batch == 0:
print(n)
if stop_after is not None and n >= stop_after:
print(f"stopped after {stop_after} files", file=sys.stderr)
break
for out_list in outputs.values():
out_list.close()
sys.stderr.write("Found %d files in %d directories\n" % (n, len(dirs)))
t1 = time.time()
t = int(t1 - t0)
s = t % 60
m = t // 60
sys.stderr.write("Elapsed time: %dm%02ds\n" % (m, s))
except:
lines = traceback.format_exc().split("\n")
t1 = time.time()
if stats is not None:
stats[stats_key].update({
"status":"failed",
"end_time":t1,
"exception":lines
})
stats.save()
else:
if stats is not None:
stats[stats_key].update({
"status":"done",
"end_time":t1,
"files":n,
"elapsed":t1-t0,
"directories":len(dirs)
})
stats.save()
| if value is None:
return value
elif dialect.name == 'oracle':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
elif dialect.name == 'mysql':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
else:
return str(uuid.UUID(value)).replace('-', '').lower() |
ItemForm.js | import React from 'react';
class | extends React.Component {
constructor(props) {
super(props);
this.state = {
formValue:''
};
this.createItem = this.createItem.bind(this);
this.changeHandler = this.changeHandler.bind(this);
}
createItem(event){
event.preventDefault();
this.props.onCreated({name: this.state.formValue})
this.setState({
formValue: ''
})
}
changeHandler(event){
this.setState({
formValue: event.target.value
})
}
render() {
return (
<form onSubmit={this.createItem}>
<fieldset>
<legend>Enter product name</legend>
<input value = {this.state.formValue} placeholder =" " type="text" onChange={this.changeHandler}/>
<button>Submit</button>
</fieldset>
</form>
);
}
}
export default ItemForm; | ItemForm |
utils.rs | extern crate fs2;
use std::fs::{File, OpenOptions};
use std::io;
use std::path::Path;
cfg_if! {
if #[cfg(unix)] {
#[cfg(linux)]
extern crate thread_priority;
use std::process::Command;
use std::os::unix::fs::OpenOptionsExt;
use utils::fs2::FileExt;
#[cfg(linux)]
use self::thread_priority::*;
const O_DIRECT: i32 = 0o0_040_000;
pub fn set_low_prio() {
// todo: low prio for macos
#[cfg(linux)]
let thread_id = thread_native_id();
#[cfg(linux)]
set_thread_priority(
thread_id,
ThreadPriority::Min,
ThreadSchedulePolicy::Normal(NormalThreadSchedulePolicy::Normal)
).unwrap();
}
pub fn open_using_direct_io<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.write(true)
.create(true)
.custom_flags(O_DIRECT)
.open(path)
}
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.write(true)
.create(true)
.open(path)
}
pub fn open_r<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.read(true)
.open(path)
}
// On unix, get the device id from 'df' command
fn get_device_id_unix(path: &str) -> String {
let output = Command::new("df")
.arg(path)
.output()
.expect("failed to execute 'df --output=source'");
let source = String::from_utf8(output.stdout).expect("not utf8");
source.split('\n').collect::<Vec<&str>>()[1].split(' ').collect::<Vec<&str>>()[0].to_string()
}
// On macos, use df and 'diskutil info <device>' to get the Device Block Size line
// and extract the size
fn get_sector_size_macos(path: &str) -> u64 {
let source = get_device_id_unix(path);
let output = Command::new("diskutil")
.arg("info")
.arg(source)
.output()
.expect("failed to execute 'diskutil info'");
let source = String::from_utf8(output.stdout).expect("not utf8");
let mut sector_size: u64 = 0;
for line in source.split('\n').collect::<Vec<&str>>() {
if line.trim().starts_with("Device Block Size") {
// e.g. in reverse: "Bytes 512 Size Block Device"
let source = line.rsplit(' ').collect::<Vec<&str>>()[1];
sector_size = source.parse::<u64>().unwrap();
}
}
if sector_size == 0 {
panic!("Abort: Unable to determine disk physical sector size from diskutil info")
}
sector_size
}
// On unix, use df and lsblk to extract the device sector size
fn get_sector_size_unix(path: &str) -> u64 {
let source = get_device_id_unix(path);
let output = Command::new("lsblk")
.arg(source)
.arg("-o")
.arg("PHY-SeC")
.output()
.expect("failed to execute 'lsblk -o LOG-SeC'");
let sector_size = String::from_utf8(output.stdout).expect("not utf8");
let sector_size = sector_size.split('\n').collect::<Vec<&str>>().get(1).unwrap_or_else(|| {
println!("failed to determine sector size, defaulting to 4096.");
&"4096"
}).trim();
sector_size.parse::<u64>().unwrap()
}
pub fn get_sector_size(path: &str) -> u64 {
if cfg!(target_os = "macos") {
get_sector_size_macos(path)
} else {
get_sector_size_unix(path)
}
}
pub fn preallocate(file: &Path, size_in_bytes: u64, use_direct_io: bool) {
let file = if use_direct_io {
open_using_direct_io(&file)
} else {
open(&file)
};
let file = file.unwrap();
file.allocate(size_in_bytes).unwrap();
}
} else {
extern crate winapi;
extern crate core;
use std::ffi::CString;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::io::AsRawHandle;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::fs::OpenOptionsExt;
use utils::core::mem::size_of_val;
use utils::winapi::um::errhandlingapi::GetLastError;
use utils::winapi::um::fileapi::{GetDiskFreeSpaceA,SetFileValidData};
use utils::winapi::um::handleapi::CloseHandle;
use utils::winapi::um::processthreadsapi::{SetThreadIdealProcessor,GetCurrentThread,OpenProcessToken,GetCurrentProcess,SetPriorityClass};
use utils::winapi::um::securitybaseapi::AdjustTokenPrivileges;
use utils::winapi::um::winbase::LookupPrivilegeValueW;
use utils::winapi::um::winnt::{LUID,TOKEN_ADJUST_PRIVILEGES,TOKEN_PRIVILEGES,LUID_AND_ATTRIBUTES,SE_PRIVILEGE_ENABLED,SE_MANAGE_VOLUME_NAME};
const FILE_FLAG_NO_BUFFERING: u32 = 0x2000_0000;
const FILE_FLAG_WRITE_THROUGH: u32 = 0x8000_0000;
const BELOW_NORMAL_PRIORITY_CLASS: u32 = 0x0000_4000;
pub fn open_using_direct_io<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.write(true)
.create(true)
.custom_flags(FILE_FLAG_NO_BUFFERING)
.open(path)
}
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.write(true)
.create(true)
.custom_flags(FILE_FLAG_WRITE_THROUGH)
.open(path)
}
pub fn open_r<P: AsRef<Path>>(path: P) -> io::Result<File> {
OpenOptions::new()
.read(true)
.open(path)
}
pub fn preallocate(file: &Path, size_in_bytes: u64, use_direct_io: bool) {
let mut result = true;
result &= obtain_priviledge();
let file = if use_direct_io {
open_using_direct_io(&file)
} else {
open(&file)
};
let file = file.unwrap();
file.set_len(size_in_bytes).unwrap();
if result {
let handle = file.as_raw_handle();
unsafe{
let temp = SetFileValidData(handle, size_in_bytes as i64);
result &= temp == 1;
}
}
if !result {
println!("FAILED, administrative rights missing");
print!("Slow file pre-allocation...");
}
}
pub fn obtain_priviledge() -> bool {
let mut result = true;
let privilege_encoded: Vec<u16> = OsStr::new(SE_MANAGE_VOLUME_NAME)
.encode_wide()
.chain(once(0))
.collect();
let luid = LUID{
HighPart: 0i32,
LowPart: 0u32
};
unsafe {
let mut htoken = null_mut();
let mut tp = TOKEN_PRIVILEGES{
PrivilegeCount: 1,
Privileges: [LUID_AND_ATTRIBUTES{
Luid: luid,
Attributes: SE_PRIVILEGE_ENABLED,
}]
};
let temp = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &mut htoken);
result &= temp == 1;
let temp = LookupPrivilegeValueW(null_mut(), privilege_encoded.as_ptr(), &mut tp.Privileges[0].Luid);
result &= temp == 1;
let temp = AdjustTokenPrivileges(htoken, 0, &mut tp, size_of_val(&tp) as u32, null_mut(), null_mut());
CloseHandle(htoken);
result &= temp == 1;
result &=
GetLastError() == 0u32
}
result
}
pub fn get_sector_size(path: &str) -> u64 {
let path_encoded = Path::new(path);
let parent_path_encoded = CString::new(path_encoded.to_str().unwrap()).unwrap();
let mut sectors_per_cluster = 0u32;
let mut bytes_per_sector = 0u32;
let mut number_of_free_cluster = 0u32;
let mut total_number_of_cluster = 0u32;
if unsafe {
GetDiskFreeSpaceA(
parent_path_encoded.as_ptr(),
&mut sectors_per_cluster,
&mut bytes_per_sector,
&mut number_of_free_cluster,
&mut total_number_of_cluster
)
} == 0 {
panic!("get sector size, filename={}",path);
};
u64::from(bytes_per_sector)
}
pub fn set_thread_ideal_processor(id: usize){
// Set core affinity for current thread.
unsafe {
SetThreadIdealProcessor(
GetCurrentThread(),
id as u32
);
}
}
pub fn set_low_prio() {
unsafe{
SetPriorityClass(GetCurrentProcess(),BELOW_NORMAL_PRIORITY_CLASS);
}
}
}
}
pub fn | (path: &str) -> u64 {
fs2::free_space(Path::new(&path)).unwrap()
}
| free_disk_space |
test_base.py | import abc
import datetime
from collections import OrderedDict
from typing import Any, Dict
import dateparser
from django.contrib.gis.geos import Point
from pytz import unicode
from rest_framework import status
from rest_framework.test import APITestCase
from care.facility.models import (
CATEGORY_CHOICES,
DISEASE_CHOICES_MAP,
SYMPTOM_CHOICES,
Disease,
DiseaseStatusEnum,
Facility,
LocalBody,
PatientConsultation,
PatientRegistration,
User,
)
from care.users.models import District, State
from config.tests.helper import EverythingEquals, mock_equal
class TestBase(APITestCase):
"""
Base class for tests, handles most of the test setup and tools for setting up data
"""
maxDiff = None
@classmethod
def create_user(cls, district: District, username: str = "user", **kwargs):
data = {
"email": f"{username}@somedomain.com",
"phone_number": "5554446667",
"age": 30,
"gender": 2,
"verified": True,
"username": username,
"password": "bar",
"district": district,
"user_type": User.TYPE_VALUE_MAP["Staff"],
}
data.update(kwargs)
return User.objects.create_user(**data)
@classmethod
def create_super_user(cls, district: District, username: str = "superuser"):
user = cls.create_user(district=district, username=username, user_type=User.TYPE_VALUE_MAP["DistrictAdmin"],)
user.is_superuser = True
user.save()
return user
@classmethod
def create_district(cls, state: State):
|
@classmethod
def create_state(cls):
return State.objects.create(name=f"State{datetime.datetime.now().timestamp()}")
@classmethod
def create_facility(cls, district: District, user: User = None, **kwargs):
user = user or cls.user
data = {
"name": "Foo",
"district": district,
"facility_type": 1,
"address": "8/88, 1st Cross, 1st Main, Boo Layout",
"location": Point(24.452545, 49.878248),
"oxygen_capacity": 10,
"phone_number": "9998887776",
"created_by": user,
}
data.update(kwargs)
f = Facility(**data)
f.save()
return f
@classmethod
def create_patient(cls, **kwargs):
patient_data = cls.get_patient_data().copy()
patient_data.update(kwargs)
medical_history = patient_data.pop("medical_history", [])
district_id = patient_data.pop("district", None)
state_id = patient_data.pop("state", None)
patient_data.update(
{
"district_id": district_id,
"state_id": state_id,
"disease_status": getattr(DiseaseStatusEnum, patient_data["disease_status"]).value,
}
)
patient = PatientRegistration.objects.create(**patient_data)
diseases = [
Disease.objects.create(patient=patient, disease=DISEASE_CHOICES_MAP[mh["disease"]], details=mh["details"])
for mh in medical_history
]
patient.medical_history.set(diseases)
return patient
@classmethod
def get_user_data(cls, district: District = None, user_type: str = None):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: District
user_type: str(A valid mapping for the integer types mentioned inside the models)
"""
district = district or cls.district
user_type = user_type or User.TYPE_VALUE_MAP["Staff"]
return {
"user_type": user_type,
"district": district,
"state": district.state,
"phone_number": "8887776665",
"gender": 2,
"age": 30,
"email": "[email protected]",
"username": "user",
"password": "bar",
}
@classmethod
def get_facility_data(cls, district):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: int
An id for the instance of District object created
user_type: str
A valid mapping for the integer types mentioned inside the models
"""
return {
"name": "Foo",
"district": (district or cls.district).id,
"facility_type": 1,
"address": f"Address {datetime.datetime.now().timestamp}",
"location": {"latitude": 49.878248, "longitude": 24.452545},
"oxygen_capacity": 10,
"phone_number": "9998887776",
"capacity": [],
}
@classmethod
def get_patient_data(cls, district=None, state=None):
return {
"name": "Foo",
"age": 32,
"date_of_birth": datetime.date(1992, 4, 1),
"gender": 2,
"is_medical_worker": True,
"blood_group": "O+",
"ongoing_medication": "",
"date_of_return": datetime.datetime(2020, 4, 1, 15, 30, 00),
"disease_status": "SUSPECTED",
"phone_number": "+918888888888",
"address": "Global citizen",
"contact_with_confirmed_carrier": True,
"contact_with_suspected_carrier": True,
"estimated_contact_date": None,
"past_travel": False,
"countries_travelled": "",
"present_health": "Fine",
"has_SARI": False,
"is_active": True,
"state": (state or cls.state).id,
"district": (district or cls.district).id,
"local_body": None,
"number_of_aged_dependents": 2,
"number_of_chronic_diseased_dependents": 1,
"medical_history": [{"disease": "Diabetes", "details": "150 count"}],
"date_of_receipt_of_information": datetime.datetime(2020, 4, 1, 15, 30, 00),
}
@classmethod
def setUpClass(cls) -> None:
super(TestBase, cls).setUpClass()
cls.state = cls.create_state()
cls.district = cls.create_district(cls.state)
cls.user_type = User.TYPE_VALUE_MAP["Staff"]
cls.user = cls.create_user(cls.district)
cls.super_user = cls.create_super_user(district=cls.district)
cls.facility = cls.create_facility(cls.district)
cls.patient = cls.create_patient()
cls.user_data = cls.get_user_data(cls.district, cls.user_type)
cls.facility_data = cls.get_facility_data(cls.district)
cls.patient_data = cls.get_patient_data(cls.district)
def setUp(self) -> None:
self.client.force_login(self.user)
@abc.abstractmethod
def get_base_url(self):
"""
Should return the base url of the testing viewset
WITHOUT trailing slash
eg: return "api/v1/facility"
:return: str
"""
raise NotImplementedError()
def get_url(self, entry_id=None, action=None, *args, **kwargs):
url = self.get_base_url(*args, **kwargs)
if entry_id is not None:
url = f"{url}/{entry_id}"
if action is not None:
url = f"{url}/{action}"
return f"{url}/"
@classmethod
def clone_object(cls, obj, save=True):
new_obj = obj._meta.model.objects.get(pk=obj.id)
new_obj.pk = None
new_obj.id = None
if save:
new_obj.save()
return new_obj
@abc.abstractmethod
def get_list_representation(self, obj) -> dict:
"""
Returns the dict representation of the obj in list API
:param obj: Object to be represented
:return: dict
"""
raise NotImplementedError()
@abc.abstractmethod
def get_detail_representation(self, obj=None) -> dict:
"""
Returns the dict representation of the obj in detail/retrieve API
:param obj: Object to be represented
:param data: data
:return: dict
"""
raise NotImplementedError()
def get_local_body_district_state_representation(self, obj):
"""
Returns the local body, district and state representation for the obj.
The obj is expected to have `local_body`, `district` and `state` in it's attributes
Eg: Facility, Patient, User
:param obj: Any object which has `local_body`, `district` and `state` in attrs
:return:
"""
response = {}
response.update(self.get_local_body_representation(getattr(obj, "local_body", None)))
response.update(self.get_district_representation(getattr(obj, "district", None)))
response.update(self.get_state_representation(getattr(obj, "state", None)))
return response
def get_local_body_representation(self, local_body: LocalBody):
if local_body is None:
return {"local_body": None, "local_body_object": None}
else:
return {
"local_body": local_body.id,
"local_body_object": {
"id": local_body.id,
"name": local_body.name,
"district": local_body.district.id,
},
}
def get_district_representation(self, district: District):
if district is None:
return {"district": None, "district_object": None}
return {
"district": district.id,
"district_object": {"id": district.id, "name": district.name, "state": district.state.id,},
}
def get_state_representation(self, state: State):
if state is None:
return {"state": None, "state_object": None}
return {"state": state.id, "state_object": {"id": state.id, "name": state.name}}
def assertDictEqual(self, first: Dict[Any, Any], second: Dict[Any, Any], msg: Any = ...) -> None:
first_dict = self._convert_to_matchable_types(first.copy())
second_dict = self._convert_to_matchable_types(second.copy())
return super(TestBase, self).assertDictEqual(first_dict, second_dict, msg)
def _convert_to_matchable_types(self, d):
def dict_to_matching_type(d: dict):
return {k: to_matching_type(k, v) for k, v in d.items()}
def to_matching_type(name: str, value):
if isinstance(value, (OrderedDict, dict)):
return dict_to_matching_type(dict(value))
elif isinstance(value, list):
return [to_matching_type("", v) for v in value]
elif "date" in name and not isinstance(value, (type(None), EverythingEquals)):
return_value = value
if isinstance(value, (str, unicode,)):
return_value = dateparser.parse(value)
return (
return_value.astimezone(tz=datetime.timezone.utc)
if isinstance(return_value, datetime.datetime)
else return_value
)
return value
return dict_to_matching_type(d)
def execute_list(self, user=None):
user = user or self.user
self.client.force_authenticate(user)
response = self.client.get(self.get_url(), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def get_facility_representation(self, facility):
if facility is None:
return facility
else:
return {
"id": facility.id,
"name": facility.name,
"facility_type": {"id": facility.facility_type, "name": facility.get_facility_type_display()},
**self.get_local_body_district_state_representation(facility),
}
@classmethod
def get_consultation_data(cls):
return {
"patient": cls.patient,
"facility": cls.facility,
"symptoms": [SYMPTOM_CHOICES[0][0], SYMPTOM_CHOICES[1][0]],
"other_symptoms": "No other symptoms",
"symptoms_onset_date": datetime.datetime(2020, 4, 7, 15, 30),
"category": CATEGORY_CHOICES[0][0],
"examination_details": "examination_details",
"existing_medication": "existing_medication",
"prescribed_medication": "prescribed_medication",
"suggestion": PatientConsultation.SUGGESTION_CHOICES[0][0],
"referred_to": None,
"admitted": False,
"admitted_to": None,
"admission_date": None,
"discharge_date": None,
"created_date": mock_equal,
"modified_date": mock_equal,
}
@classmethod
def create_consultation(cls, patient=None, facility=None, **kwargs) -> PatientConsultation:
data = cls.get_consultation_data()
kwargs.update({"patient": patient or cls.patient, "facility": facility or cls.facility})
data.update(kwargs)
return PatientConsultation.objects.create(**data)
| return District.objects.create(state=state, name=f"District{datetime.datetime.now().timestamp()}") |
top.go | package csvutil
import (
"io"
"github.com/pkg/errors"
)
// TopOption is option holder for Top.
type TopOption struct {
// Source file does not have header line. (default false)
NoHeader bool
// Encoding of source file. (default utf8)
Encoding string
// Encoding for output.
OutputEncoding string
// Headers is appending header list.
Headers []string
// Count is reading line count.
Count int
}
func (o TopOption) validate() error {
if o.Count <= 0 {
return errors.New("negative or zero count")
}
return nil
}
func (o TopOption) outputEncoding() string {
if o.OutputEncoding != "" {
return o.OutputEncoding
}
return o.Encoding
}
// Top reads lines from top.
func Top(r io.Reader, w io.Writer, o TopOption) error | {
if err := o.validate(); err != nil {
return errors.Wrap(err, "invalid option")
}
cr, bom := reader(r, o.Encoding)
cw := writer(w, bom, o.outputEncoding())
defer cw.Flush()
if !o.NoHeader {
hdr, err := cr.Read()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
cw.Write(hdr)
}
for i := 0; i < o.Count; i++ {
rec, err := cr.Read()
if err != nil {
if err == io.EOF {
break
}
return err
}
cw.Write(rec)
}
return nil
} |
|
list.rs |
use std::fmt;
use ::{ PResult, ParseResult, Parsed, Failed, Input };
/// Error for accumulating parsers.
#[derive( Debug, Copy, Clone, PartialEq, Eq )]
pub enum AccumulatorError<IE, SE, AE> {
/// The item parser returned an error at the given index.
ItemError {
/// The error returned from the item sub-parser.
error: IE,
/// The index at which the sub-parser returned the error.
index: usize,
},
/// The separator parser returned an error at the given index.
SeparatorError {
/// The error returned from the separator sub-parser.
error: SE,
/// The index at which the sub-parser returned the error.
index: usize,
},
/// The accumulating callback indicated an error by returning an `Err(_)` result
/// with the given error at the given index.
CallbackError {
/// The error returned from the accumulator function.
error: AE,
/// The index at which the accumulator function returned the error
index: usize,
},
}
impl<IE, SE, AE> fmt::Display for AccumulatorError<IE, SE, AE>
where IE: fmt::Display, SE: fmt::Display, AE: fmt::Display {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
match *self {
AccumulatorError::ItemError { ref error, index } =>
write!(out, "Item Error (index {}): {}", index, error),
AccumulatorError::SeparatorError { ref error, index } =>
write!(out, "Separator Error (index {}): {}", index, error),
AccumulatorError::CallbackError { ref error, index } =>
write!(out, "Callback Error (index {}): {}", index, error),
}
}
}
/// Error for the list parser.
#[derive( Debug, Copy, Clone, PartialEq, Eq )]
pub enum ListError<IE, SE> {
/// The item parser returned an error at the given index.
ItemError {
/// The error returned from the item sub-parser.
error: IE,
/// The index at which the sub-parser returned the error.
index: usize,
},
/// The separator parser returned an error at the given index.
SeparatorError {
/// The error returned from the separator sub-parser.
error: SE,
/// The index at which the sub-parser returned the error.
index: usize,
},
/// An explicit or implicit item count limit has been reached.
LimitExceeded {
/// The limit that was exceeded.
limit: usize,
},
}
impl<IE, SE> fmt::Display for ListError<IE, SE>
where IE: fmt::Display, SE: fmt::Display {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
match *self {
ListError::ItemError { ref error, index } =>
write!(out, "Item Error (index {}): {}", index, error),
ListError::SeparatorError { ref error, index } =>
write!(out, "Separator Error (index {}): {}", index, error),
ListError::LimitExceeded { limit } =>
write!(out, "List limit ({}) was exceeded", limit),
}
}
}
/// Size limit for list accumulation.
#[derive( Debug, Copy, Clone, PartialEq, Eq )]
pub enum ListLimit {
/// No limit.
Unlimited,
/// Limited to a specific number.
Limited(usize),
}
/// Produces a general accumulation parser without per-item errors.
///
/// Parsers produced by this function the same way like the ones produced by `accumulate`.
/// The only difference is that this version doesn't include per-item error handling.
///
/// # Examples
///
/// ```rust
/// use absorb as ab;
///
/// let parser = ab::accumulate_flat(
/// ab::char_exact(','),
/// ab::char_digit(10).unwrap(),
/// |_location| 0,
/// |sum, value, _location| sum + value.to_digit(10).unwrap(),
/// );
///
/// assert_eq!(
/// ab::get("2,3,4,5,6", &parser),
/// Ok((
/// 20,
/// ab::State::Incomplete,
/// "",
/// ))
/// );
/// ```
#[inline(never)]
pub fn accumulate_flat<'a, V, VF, AF, SF, IF, SY, SN, IY, IN, SE, IE>(
separator: SF,
item: IF,
init: VF,
accumulator: AF,
)
-> impl Fn(Input<'a>)
-> PResult<V, IN, AccumulatorError<IE, SE, !>>
where
SF: Fn(Input<'a>) -> PResult<SY, SN, SE>,
IF: Fn(Input<'a>) -> PResult<IY, IN, IE>,
VF: Fn(::Location) -> V,
AF: Fn(V, IY, ::Location) -> V {
accumulate(separator, item, init, move |v, y, l| Ok(accumulator(v, y, l)))
}
/// Produces a general accumulation parser.
///
/// A parser accumulating one or more values parsed by the `item` sub-parser, optionally
/// separated by the `separator` sub-parser.
///
/// The `init` function is used to construct the initial accumulator value. It receives
/// the current location as argument.
///
/// The `accumulator` function is used to accumulate the parsed values. It receives the
/// last (or initial) accumulator value, the current parsed result value, and the location
/// where the item was parsed as arguments. It has to return a new accumulator value as
/// `Ok(_)` result on success, and `Err(_)` if any error occured. Errors from the accumulator
/// function will be returned inside the `AccumulatorError::CallbackError` variant.
///
/// The kept index will wrap around on overflow.
///
/// # Examples
///
/// ```rust
/// use absorb as ab;
///
/// let parser = ab::accumulate(
/// ab::char_exact(','),
/// ab::char_digit(10).unwrap(),
/// |_location| 0,
/// |sum, value, _location| match value.to_digit(10) {
/// Some(value) => Ok(sum + value),
/// None => Err("unexpected invalid digit"),
/// },
/// );
///
/// assert_eq!(
/// ab::get("2,3,4,5,6", &parser),
/// Ok((
/// 20,
/// ab::State::Incomplete,
/// "",
/// ))
/// );
/// ```
#[inline(never)]
pub fn accumulate<'a, V, VF, AF, AE, SF, IF, SY, SN, IY, IN, SE, IE>(
separator: SF,
item: IF,
init: VF,
accumulator: AF,
)
-> impl Fn(Input<'a>)
-> PResult<V, IN, AccumulatorError<IE, SE, AE>>
where
SF: Fn(Input<'a>) -> PResult<SY, SN, SE>,
IF: Fn(Input<'a>) -> PResult<IY, IN, IE>,
VF: Fn(::Location) -> V,
AF: Fn(V, IY, ::Location) -> Result<V, AE> {
move |input| {
let (mut rest, mut accumulated, mut all_state) = {
let Parsed { value, rest, state } = match item(input) {
Err(error) => return Err(AccumulatorError::ItemError { error: error, index: 0 }),
Ok(ParseResult::Failed(failed)) => return Ok(ParseResult::Failed(failed)),
Ok(ParseResult::Parsed(parsed)) => parsed,
};
let prepared = match accumulator(init(input.location()), value, input.location()) {
Ok(value) => value,
Err(error) =>
return Err(AccumulatorError::CallbackError { error: error, index: 0 }),
};
(rest, prepared, state)
};
let mut index = 1;
loop {
let sep_rest = match separator(rest) {
Err(error) => return Err(AccumulatorError::SeparatorError {
error: error,
index: index - 1,
}),
Ok(ParseResult::Parsed(Parsed { rest: sep_rest, state: sep_state, .. })) => {
all_state = all_state.merge(sep_state);
sep_rest
},
Ok(ParseResult::Failed(Failed { state: sep_state, .. })) => | ,
};
rest = match item(sep_rest) {
Err(error) =>
return Err(AccumulatorError::ItemError { error: error, index: index }),
Ok(ParseResult::Parsed(Parsed { rest: it_rest, state: it_state, value })) => {
accumulated = match accumulator(accumulated, value, sep_rest.location()) {
Ok(new) => new,
Err(error) => return Err(AccumulatorError::CallbackError {
error: error,
index: index,
}),
};
all_state = all_state.merge(it_state);
it_rest
},
Ok(ParseResult::Failed(Failed { state: item_state, .. })) => {
all_state = all_state.merge(item_state);
break;
},
};
index = index.wrapping_add(1);
}
Ok(ParseResult::new_parsed(accumulated, all_state, rest))
}
}
/// Produces an accumulating parser specialized for `Vec<_>` results.
///
/// A parser collecting one or more values parsed by the `item` sub-parser, separated
/// by the `separator` sub-parser.
///
/// The `limit` parameter specifies the maximum number of items to collect before a
/// `ListError::LimitExceeded` value will be returned.
///
/// # Examples
///
/// ```rust
/// use absorb as ab;
/// let parser = ab::list(
/// ab::char_exact(','),
/// ab::str_identifier(),
/// ab::ListLimit::Limited(2048),
/// );
///
/// assert_eq!(
/// ab::get("foo,bar,baz", &parser),
/// Ok((
/// vec!["foo", "bar", "baz"],
/// ab::State::Incomplete,
/// "",
/// ))
/// );
/// ```
pub fn list<'a, SF, IF, SY, SN, IY, IN, SE, IE>(separator: SF, item: IF, limit: ListLimit)
-> impl Fn(Input<'a>)
-> PResult<'a, Vec<IY>, IN, ListError<IE, SE>>
where
SF: Fn(Input<'a>) -> PResult<'a, SY, SN, SE>,
IF: Fn(Input<'a>) -> PResult<'a, IY, IN, IE> {
struct HitLimit(usize);
::error_map(
accumulate(separator, item, |_| Vec::new(), move |mut items, item, _| {
if let ListLimit::Limited(limit) = limit {
if limit == items.len() {
return Err(HitLimit(limit));
}
}
items.push(item);
Ok(items)
}),
|error, _| match error {
AccumulatorError::CallbackError { error: HitLimit(limit), .. } =>
ListError::LimitExceeded { limit: limit },
AccumulatorError::ItemError { error, index } =>
ListError::ItemError { error: error, index: index },
AccumulatorError::SeparatorError { error, index } =>
ListError::SeparatorError { error: error, index: index },
},
)
}
#[cfg(test)]
mod tests {
#[test]
fn accumulate_flat() {
test!(
super::accumulate_flat(
::str_whitespace(),
::char_any(),
|loc| (vec![loc], 0),
|(mut locs, sum), item, loc| {
locs.push(loc);
(locs, sum + item.len_utf8())
},
),
"a b c" => (ok?: (vec![
::Location { position: 0, line: 1, column: 1 },
::Location { position: 0, line: 1, column: 1 },
::Location { position: 2, line: 1, column: 3 },
::Location { position: 4, line: 1, column: 5 },
], 3), ""),
"ab" => (ok: (vec![
::Location { position: 0, line: 1, column: 1 },
::Location { position: 0, line: 1, column: 1 },
], 1), "b"),
);
}
#[test]
fn accumulate() {
let cb_err: super::AccumulatorError<!, !, char> =
super::AccumulatorError::CallbackError { error: 'x', index: 2 };
test!(
super::accumulate(
::str_whitespace(),
::char_any(),
|loc| (vec![loc], 0),
|(mut locs, sum), item, loc|
if item == 'x' { Err(item) } else {
locs.push(loc);
Ok((locs, sum + item.len_utf8()))
},
),
"a b c" => (ok?: (vec![
::Location { position: 0, line: 1, column: 1 },
::Location { position: 0, line: 1, column: 1 },
::Location { position: 2, line: 1, column: 3 },
::Location { position: 4, line: 1, column: 5 },
], 3), ""),
"a b x" => (fatal: cb_err),
"ab" => (ok: (vec![
::Location { position: 0, line: 1, column: 1 },
::Location { position: 0, line: 1, column: 1 },
], 1), "b"),
);
}
#[test]
fn list() {
test!(super::list(::char_exact('.'), ::char_any(), super::ListLimit::Unlimited),
"" => (err?: ::NoChar, (0, 1, 1)),
"a" => (ok?: vec!['a'], ""),
"a." => (ok?: vec!['a'], "."),
"ab" => (ok: vec!['a'], "b"),
"a.b" => (ok?: vec!['a', 'b'], ""),
"a.b." => (ok?: vec!['a', 'b'], "."),
"a.b.cd" => (ok: vec!['a', 'b', 'c'], "d"),
);
{
let limited =
super::list(::char_exact('.'), ::char_any(), super::ListLimit::Limited(2));
let err_limited = super::ListError::LimitExceeded { limit: 2 };
test!(limited, "a.b.c" => (fatal: err_limited));
}
{
let limit = super::ListLimit::Limited(2);
let fail = super::list(
super::list(::nothing(), ::char_exact('.'), limit),
super::list(::nothing(), ::char_exact('x'), limit),
super::ListLimit::Unlimited,
);
let err_sep = super::ListError::SeparatorError {
error: super::ListError::LimitExceeded { limit: 2 },
index: 2,
};
let err_item = super::ListError::ItemError {
error: super::ListError::LimitExceeded { limit: 2 },
index: 2,
};
test!(&fail,
"x.x.x " => (ok: vec![vec!['x'], vec!['x'], vec!['x']], " "),
"x.x.x...x " => (fatal: err_sep),
"x.x.xxx.x " => (fatal: err_item),
);
}
}
}
| {
all_state = all_state.merge(sep_state);
break;
} |
middleware_auth.go | package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/pkg/auth"
osecrets "github.com/libopenstorage/openstorage/pkg/auth/secrets"
"github.com/libopenstorage/openstorage/volume"
volumedrivers "github.com/libopenstorage/openstorage/volume/drivers"
lsecrets "github.com/libopenstorage/secrets"
"github.com/portworx/sched-ops/k8s/core"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
// PVCNameLabelKey is used for kubernetes auth provider indicating the name of PVC
PVCNameLabelKey = "pvc"
// PVCNamespaceLabelKey is used for kubernetes auth provider indicating the namespace of the PVC
PVCNamespaceLabelKey = "namespace"
)
// NewAuthMiddleware returns a negroni implementation of an http middleware
// which will intercept the management APIs
func NewAuthMiddleware() *authMiddleware {
return &authMiddleware{}
}
type authMiddleware struct {
}
// newSecurityMiddleware based on auth configuration returns SecurityHandler or just
func newSecurityMiddleware(authenticators map[string]auth.Authenticator) func(next http.HandlerFunc) http.HandlerFunc {
if auth.Enabled() {
return func(next http.HandlerFunc) http.HandlerFunc {
return SecurityHandler(authenticators, next)
}
}
return func(next http.HandlerFunc) http.HandlerFunc {
return next
}
}
// SecurityHandler implements Authentication and Authorization check at the same time
// this functionality where not moved to separate functions because of simplicity
func SecurityHandler(authenticators map[string]auth.Authenticator, next http.HandlerFunc) http.HandlerFunc {
if authenticators == nil {
return next
}
return func(w http.ResponseWriter, r *http.Request) {
tokenHeader := r.Header.Get("Authorization")
tokens := strings.Split(tokenHeader, " ")
if len(tokens) < 2 {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, token is malformed"),
})
return
}
token := tokens[1]
// Determine issuer
issuer, err := auth.TokenIssuer(token)
if err != nil {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, %v", err),
})
return
}
// Use http.Request context for cancellation propagation
ctx := r.Context()
// Authenticate user
var claims *auth.Claims
if authenticator, exists := authenticators[issuer]; exists {
claims, err = authenticator.AuthenticateToken(ctx, token)
if err != nil {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, %s", err.Error()),
})
return
}
if claims == nil {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, wrong claims provided"),
})
}
} else {
w.WriteHeader(http.StatusUnauthorized)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, no authenticator for issuer %s", issuer),
})
return
}
// Check if user has admin role to access that endpoint
isSystemAdmin := false
for _, role := range claims.Roles {
if role == "system.admin" {
isSystemAdmin = true
break
}
}
if !isSystemAdmin {
w.WriteHeader(http.StatusForbidden)
json.NewEncoder(w).Encode(&api.ClusterResponse{
Error: fmt.Sprintf("Access denied, user must have admin access"),
})
return
}
next.ServeHTTP(w, r)
}
}
func (a *authMiddleware) createWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "create"
_, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
requestBody := a.getBody(r)
var dcReq api.VolumeCreateRequest
var dcRes api.VolumeCreateResponse
if err := json.NewDecoder(requestBody).Decode(&dcReq); err != nil {
next(w, r)
return
}
spec := dcReq.GetSpec()
locator := dcReq.GetLocator()
tokenSecretContext, err := a.parseSecret(spec.VolumeLabels, locator.VolumeLabels, true)
if err != nil {
a.log(locator.Name, fn).WithError(err).Error("failed to parse secret")
dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to parse secret: " + err.Error()}
json.NewEncoder(w).Encode(&dcRes)
return
}
if tokenSecretContext.SecretName == "" {
errorMessage := "Access denied, no secret found in the annotations of the persistent volume claim" +
" or storage class parameters"
a.log(locator.Name, fn).Error(errorMessage)
dcRes.VolumeResponse = &api.VolumeResponse{Error: errorMessage}
json.NewEncoder(w).Encode(&dcRes)
w.WriteHeader(http.StatusUnauthorized)
return
}
token, err := osecrets.GetToken(tokenSecretContext)
if err != nil {
a.log(locator.Name, fn).WithError(err).Error("failed to get token")
dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to get token: " + err.Error()}
json.NewEncoder(w).Encode(&dcRes)
return
} else {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) setWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "set"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
requestBody := a.getBody(r)
var (
req api.VolumeSetRequest
resp api.VolumeSetResponse
isOpDone bool
)
err = json.NewDecoder(requestBody).Decode(&req)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse the request")
next(w, r)
return
}
// Not checking tokens for the following APIs
// - Resize
// - Attach/Detach
// - Mount/Unmount
if req.Spec != nil && req.Spec.Size > 0 {
isOpDone = true
err = d.Set(volumeID, req.Locator, req.Spec)
}
for err == nil && req.Action != nil {
if req.Action.Attach != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE {
isOpDone = true
if req.Action.Attach == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON {
_, err = d.Attach(volumeID, req.Options)
} else {
err = d.Detach(volumeID, req.Options)
}
if err != nil {
break
}
}
if req.Action.Mount != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE {
isOpDone = true
if req.Action.Mount == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON {
if req.Action.MountPath == "" {
err = fmt.Errorf("Invalid mount path")
break
}
err = d.Mount(volumeID, req.Action.MountPath, req.Options)
} else {
err = d.Unmount(volumeID, req.Action.MountPath, req.Options)
}
if err != nil {
break
}
}
break
}
if isOpDone {
if err != nil {
processErrorForVolSetResponse(req.Action, err, &resp)
} else {
v, err := d.Inspect([]string{volumeID})
if err != nil {
processErrorForVolSetResponse(req.Action, err, &resp)
} else if v == nil || len(v) != 1 {
processErrorForVolSetResponse(
req.Action,
status.Errorf(codes.NotFound, "Volume with ID: %s is not found", volumeID),
&resp)
} else {
v0 := v[0]
resp.Volume = v0
}
}
json.NewEncoder(w).Encode(resp)
// Not calling the next handler
return
}
next(w, r)
}
func (a *authMiddleware) deleteWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "delete"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
vols, err := d.Inspect([]string{volumeID})
if err != nil || len(vols) == 0 || vols[0] == nil {
a.log(volumeID, fn).WithError(err).Error("Failed to get volume object")
next(w, r)
return
}
volumeResponse := &api.VolumeResponse{}
tokenSecretContext, err := a.parseSecret(vols[0].Spec.VolumeLabels, vols[0].Locator.VolumeLabels, false)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("failed to parse secret")
volumeResponse.Error = "failed to parse secret: " + err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
}
if tokenSecretContext.SecretName == "" {
errorMessage := fmt.Sprintf("Error, unable to get secret information from the volume."+
" You may need to re-add the following keys as volume labels to point to the secret: %s and %s",
osecrets.SecretNameKey, osecrets.SecretNamespaceKey)
a.log(volumeID, fn).Error(errorMessage)
volumeResponse = &api.VolumeResponse{Error: errorMessage}
json.NewEncoder(w).Encode(volumeResponse)
w.WriteHeader(http.StatusInternalServerError)
return
}
token, err := osecrets.GetToken(tokenSecretContext)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("failed to get token")
volumeResponse.Error = "failed to get token: " + err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
} else {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) inspectWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "inspect"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volumeID, err := a.parseID(r)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to parse volumeID")
next(w, r)
return
}
dk, err := d.Inspect([]string{volumeID})
if err != nil {
a.log(volumeID, fn).WithError(err).Error("Failed to inspect volume")
http.Error(w, err.Error(), http.StatusNotFound)
return
}
json.NewEncoder(w).Encode(dk)
}
func (a *authMiddleware) enumerateWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
fn := "enumerate"
d, authRequired := a.isTokenProcessingRequired(r)
if !authRequired {
next(w, r)
return
}
volIDs, ok := r.URL.Query()[api.OptVolumeID]
if !ok || len(volIDs[0]) < 1 {
a.log("", fn).Error("Failed to parse VolumeID")
return
}
volumeID := volIDs[0]
vols, err := d.Inspect([]string{volumeID})
if err != nil || len(vols) == 0 || vols[0] == nil {
a.log(volumeID, fn).WithError(err).Error("Failed to get volume object")
next(w, r)
return
}
volumeResponse := &api.VolumeResponse{}
tokenSecretContext, err := a.parseSecret(vols[0].Spec.VolumeLabels, vols[0].Locator.VolumeLabels, false)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("failed to parse secret")
volumeResponse.Error = "failed to parse secret: " + err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
}
if tokenSecretContext.SecretName == "" {
errorMessage := fmt.Sprintf("Error, unable to get secret information from the volume."+
" You may need to re-add the following keys as volume labels to point to the secret: %s and %s",
osecrets.SecretNameKey, osecrets.SecretNamespaceKey)
a.log(volumeID, fn).Error(errorMessage)
volumeResponse = &api.VolumeResponse{Error: errorMessage}
json.NewEncoder(w).Encode(volumeResponse)
w.WriteHeader(http.StatusInternalServerError)
return
}
token, err := osecrets.GetToken(tokenSecretContext)
if err != nil {
a.log(volumeID, fn).WithError(err).Error("failed to get token")
volumeResponse.Error = "failed to get token: " + err.Error()
json.NewEncoder(w).Encode(volumeResponse)
return
} else {
a.insertToken(r, token)
}
next(w, r)
}
func (a *authMiddleware) isTokenProcessingRequired(r *http.Request) (volume.VolumeDriver, bool) {
userAgent := r.Header.Get("User-Agent")
if len(userAgent) > 0 {
// Check if the request is coming from a container orchestrator
clientName := strings.Split(userAgent, "/")
if len(clientName) > 0 {
if strings.HasSuffix(clientName[0], schedDriverPostFix) {
d, err := volumedrivers.Get(clientName[0])
if err != nil {
return nil, false
}
return d, true
}
}
}
return nil, false
}
func (a *authMiddleware) insertToken(r *http.Request, token string) {
// Set the token in header
if auth.IsJwtToken(token) {
r.Header.Set("Authorization", "bearer "+token)
} else {
r.Header.Set("Authorization", "Basic "+token)
}
}
func (a *authMiddleware) parseID(r *http.Request) (string, error) {
if id, err := a.parseParam(r, "id"); err == nil {
return id, nil
}
return "", fmt.Errorf("could not parse snap ID")
}
func (a *authMiddleware) parseParam(r *http.Request, param string) (string, error) {
vars := mux.Vars(r)
if id, ok := vars[param]; ok {
return id, nil
}
return "", fmt.Errorf("could not parse %s", param)
}
func (a *authMiddleware) parseSecret(
specLabels, locatorLabels map[string]string,
fetchCOLabels bool,
) (*api.TokenSecretContext, error) {
if lsecrets.Instance().String() == lsecrets.TypeK8s && fetchCOLabels {
// For k8s fetch the actual annotations
pvcName, ok := locatorLabels[PVCNameLabelKey]
if !ok |
pvcNamespace, ok := locatorLabels[PVCNamespaceLabelKey]
if !ok {
// best effort to fetch the secret
return parseSecretFromLabels(specLabels, locatorLabels)
}
pvc, err := core.Instance().GetPersistentVolumeClaim(pvcName, pvcNamespace)
if err != nil {
return nil, err
}
secretName := pvc.ObjectMeta.Annotations[osecrets.SecretNameKey]
if len(secretName) == 0 {
return parseSecretFromLabels(specLabels, locatorLabels)
}
secretNamespace := pvc.ObjectMeta.Annotations[osecrets.SecretNamespaceKey]
return &api.TokenSecretContext{
SecretName: secretName,
SecretNamespace: secretNamespace,
}, nil
}
return parseSecretFromLabels(specLabels, locatorLabels)
}
func parseSecretFromLabels(specLabels, locatorLabels map[string]string) (*api.TokenSecretContext, error) {
// Locator labels take precendence
secretName := locatorLabels[osecrets.SecretNameKey]
secretNamespace := locatorLabels[osecrets.SecretNamespaceKey]
if secretName == "" {
secretName = specLabels[osecrets.SecretNameKey]
}
if secretName == "" {
return nil, fmt.Errorf("secret name is empty")
}
if secretNamespace == "" {
secretNamespace = specLabels[osecrets.SecretNamespaceKey]
}
return &api.TokenSecretContext{
SecretName: secretName,
SecretNamespace: secretNamespace,
}, nil
}
func (a *authMiddleware) log(id, fn string) *logrus.Entry {
return logrus.WithFields(map[string]interface{}{
"ID": id,
"Component": "auth-middleware",
"Function": fn,
})
}
func (a *authMiddleware) getBody(r *http.Request) io.ReadCloser {
// Make a copy of the reader so that the next handler
// has access to the body
buf, _ := ioutil.ReadAll(r.Body)
rdr1 := ioutil.NopCloser(bytes.NewBuffer(buf))
rdr2 := ioutil.NopCloser(bytes.NewBuffer(buf))
r.Body = rdr2
return rdr1
}
| {
// best effort to fetch the secret
return parseSecretFromLabels(specLabels, locatorLabels)
} |
instance.rs | use std::ffi::{CStr, CString};
use ash::prelude::VkResult;
pub use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0};
use ash::vk;
use super::debug_reporter;
use super::VkDebugReporter;
use super::VkEntry;
use super::Window;
use ash::extensions::ext::DebugReport;
/// Create one of these at startup. It never gets lost/destroyed.
pub struct VkInstance {
pub entry: VkEntry,
pub instance: ash::Instance,
pub debug_reporter: Option<VkDebugReporter>,
}
#[derive(Debug)]
pub enum VkCreateInstanceError {
InstanceError(ash::InstanceError), | }
impl std::error::Error for VkCreateInstanceError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match *self {
VkCreateInstanceError::InstanceError(ref e) => Some(e),
VkCreateInstanceError::VkError(ref e) => Some(e),
}
}
}
impl core::fmt::Display for VkCreateInstanceError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter,
) -> core::fmt::Result {
match *self {
VkCreateInstanceError::InstanceError(ref e) => e.fmt(fmt),
VkCreateInstanceError::VkError(ref e) => e.fmt(fmt),
}
}
}
impl From<ash::InstanceError> for VkCreateInstanceError {
fn from(result: ash::InstanceError) -> Self {
VkCreateInstanceError::InstanceError(result)
}
}
impl From<vk::Result> for VkCreateInstanceError {
fn from(result: vk::Result) -> Self {
VkCreateInstanceError::VkError(result)
}
}
impl VkInstance {
/// Creates a vulkan instance.
pub fn new(
entry: VkEntry,
window: &dyn Window,
app_name: &CString,
validation_layer_debug_report_flags: vk::DebugReportFlagsEXT,
) -> Result<VkInstance, VkCreateInstanceError> {
// Determine the supported version of vulkan that's available
let vulkan_version = match entry.try_enumerate_instance_version()? {
// Vulkan 1.1+
Some(version) => {
let major = vk::version_major(version);
let minor = vk::version_minor(version);
let patch = vk::version_patch(version);
(major, minor, patch)
}
// Vulkan 1.0
None => (1, 0, 0),
};
info!("Found Vulkan version: {:?}", vulkan_version);
// Get the available layers/extensions
let layers = entry.enumerate_instance_layer_properties()?;
debug!("Available Layers: {:#?}", layers);
let extensions = entry.enumerate_instance_extension_properties()?;
debug!("Available Extensions: {:#?}", extensions);
// Expected to be 1.1.0 or 1.0.0 depeneding on what we found in try_enumerate_instance_version
// https://vulkan.lunarg.com/doc/view/1.1.70.1/windows/tutorial/html/16-vulkan_1_1_changes.html
let api_version = vk::make_version(vulkan_version.0, vulkan_version.1, 0);
// Info that's exposed to the driver. In a real shipped product, this data might be used by
// the driver to make specific adjustments to improve performance
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkApplicationInfo.html
let appinfo = vk::ApplicationInfo::builder()
.application_name(app_name)
.application_version(0)
.engine_name(app_name)
.engine_version(0)
.api_version(api_version);
let mut layer_names = vec![];
let mut extension_names = window.extension_names()?;
if !validation_layer_debug_report_flags.is_empty() {
// Find the best validation layer that's available
let best_validation_layer = VkInstance::find_best_validation_layer(&layers);
if best_validation_layer.is_none() {
log::error!("Could not find an appropriate validation layer. Check that the vulkan SDK has been installed or disable validation.");
return Err(vk::Result::ERROR_LAYER_NOT_PRESENT.into());
}
let debug_extension = DebugReport::name();
let has_debug_extension = extensions.iter().any(|extension| unsafe {
debug_extension == CStr::from_ptr(extension.extension_name.as_ptr())
});
if !has_debug_extension {
log::error!("Could not find the debug extension. Check that the vulkan SDK has been installed or disable validation.");
return Err(vk::Result::ERROR_EXTENSION_NOT_PRESENT.into());
}
if let Some(best_validation_layer) = best_validation_layer {
if has_debug_extension {
layer_names.push(best_validation_layer);
extension_names.push(DebugReport::name());
}
}
}
if log_enabled!(log::Level::Debug) {
log::debug!("Using layers: {:?}", layer_names);
log::debug!("Using extensions: {:?}", extension_names);
}
let layer_names: Vec<_> = layer_names.iter().map(|x| x.as_ptr()).collect();
let extension_names: Vec<_> = extension_names.iter().map(|x| x.as_ptr()).collect();
// Create the instance
let create_info = vk::InstanceCreateInfo::builder()
.application_info(&appinfo)
.enabled_layer_names(&layer_names)
.enabled_extension_names(&extension_names);
info!("Creating vulkan instance");
let instance: ash::Instance = unsafe { entry.create_instance(&create_info, None)? };
// Setup the debug callback for the validation layer
let debug_reporter = if !validation_layer_debug_report_flags.is_empty() {
Some(Self::setup_vulkan_debug_callback(
&entry,
&instance,
validation_layer_debug_report_flags,
)?)
} else {
None
};
Ok(VkInstance {
entry,
instance,
debug_reporter,
})
}
fn find_best_validation_layer(layers: &[ash::vk::LayerProperties]) -> Option<&'static CStr> {
fn khronos_validation_layer_name() -> &'static CStr {
CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0")
.expect("Wrong extension string")
}
fn lunarg_validation_layer_name() -> &'static CStr {
CStr::from_bytes_with_nul(b"VK_LAYER_LUNARG_standard_validation\0")
.expect("Wrong extension string")
}
let khronos_validation_layer_name = khronos_validation_layer_name();
let lunarg_validation_layer_name = lunarg_validation_layer_name();
// Find the best validation layer that's available
let mut best_available_layer = None;
for layer in layers {
let layer_name = unsafe { CStr::from_ptr(layer.layer_name.as_ptr()) };
if layer_name == khronos_validation_layer_name {
best_available_layer = Some(khronos_validation_layer_name);
break;
}
if layer_name == lunarg_validation_layer_name {
best_available_layer = Some(lunarg_validation_layer_name);
}
}
best_available_layer
}
/// This is used to setup a debug callback for logging validation errors
fn setup_vulkan_debug_callback<E: EntryV1_0, I: InstanceV1_0>(
entry: &E,
instance: &I,
debug_report_flags: vk::DebugReportFlagsEXT,
) -> VkResult<VkDebugReporter> {
info!("Seting up vulkan debug callback");
let debug_info = vk::DebugReportCallbackCreateInfoEXT::builder()
.flags(debug_report_flags)
.pfn_callback(Some(debug_reporter::vulkan_debug_callback));
let debug_report_loader = ash::extensions::ext::DebugReport::new(entry, instance);
let debug_callback =
unsafe { debug_report_loader.create_debug_report_callback(&debug_info, None)? };
Ok(VkDebugReporter {
debug_report_loader,
debug_callback,
})
}
}
impl Drop for VkInstance {
fn drop(&mut self) {
trace!("destroying VkInstance");
std::mem::drop(self.debug_reporter.take());
unsafe {
self.instance.destroy_instance(None);
}
trace!("destroyed VkInstance");
}
} | VkError(vk::Result), |
FBetaScore.py | from typing import Union
import numpy as np
import pandas as pd
from oolearning.converters.TwoClassConverterBase import TwoClassConverterBase
from oolearning.enums.Metric import Metric
from oolearning.evaluators.TwoClassConfusionMatrix import TwoClassConfusionMatrix
from oolearning.evaluators.ScoreActualPredictedBase import ScoreActualPredictedBase
from oolearning.evaluators.UtilityFunctionMixin import UtilityFunctionMixin
class FBetaScore(UtilityFunctionMixin, ScoreActualPredictedBase):
def __init__(self,
converter: TwoClassConverterBase,
beta: float):
super().__init__()
self._converter = converter
self._beta = beta
@property
def name(self) -> str:
return Metric.FBETA_SCORE.value
def | (self,
actual_values: np.ndarray,
predicted_values: Union[np.ndarray, pd.DataFrame]) -> float:
predicted_classes = self._converter.convert(values=predicted_values)
return TwoClassConfusionMatrix(actual_classes=actual_values,
predicted_classes=predicted_classes,
positive_class=self._converter.positive_class).fbeta_score(beta=self._beta) # noqa
| _calculate |
demo_ip.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import asyncio
import pyppeteer
import time
import os
import random
from exe_js import js1, js3, js4, js5
# http://www.mamicode.com/info-detail-2302923.html
# https://segmentfault.com/a/1190000011627343
"""
{
proxy: "127.0.0.1:1234",
proxy-auth: "userx:passx",
proxy-type: "meh"
}
"""
def input_time_random(): | print("in main ")
print(os.environ.get('PYPPETEER_CHROMIUM_REVISION'))
browser = await pyppeteer.launch(
executablePath=r"D:\A\Desktop\项目+更新\node_project\chrome-win\chrome-win\chrome.exe",
headless=False,
args=[
'--proxy-server=118.24.156.214:8118'
],
timeout=30000)
page = await browser.newPage()
await page.setViewport({"width": 1000, "height": 780})
await page.setUserAgent("Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36")
await page.goto('http://httpbin.net/ip')
# await page.waitForNavigation({'waitUntil': 'load'}) # 有时候不需要
content = await page.content()
cookies = await page.cookies()
await page.screenshot({'path': 'example.png'})
dimensions = await page.evaluate('''() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio,
}
}''')
print(dimensions)
await browser.close()
return {'content': content, 'cookies': cookies}
asyncio.get_event_loop().run_until_complete(main()) | return random.randint(300, 500)
async def main(): |
Presentation.tsx | import React, { MouseEventHandler } from "react";
export class Presentation extends React.Component<
PresentationProps,
PresentationState
> {
state: PresentationState = {};
prevSlideProps?: SlideProps[];
hidden = [false, false, false];
render() {
const slideProps = this.getProps();
this.prevSlideProps = slideProps;
return (
<div className="presentation">
<Slide
src={slideProps[0].src}
alt={slideProps[0].alt}
offset={slideProps[0].offset}
hidden={slideProps[0].hidden}
slideDuration={slideProps[0].slideDuration}
onMouseDown={this.props.onMouseDown}
onMouseUp={this.props.onMouseUp}
/>
<Slide
src={slideProps[1].src}
alt={slideProps[1].alt}
offset={slideProps[1].offset}
hidden={slideProps[1].hidden}
slideDuration={slideProps[1].slideDuration}
onMouseDown={this.props.onMouseDown}
onMouseUp={this.props.onMouseUp}
/>
<Slide
src={slideProps[2].src}
alt={slideProps[2].alt}
offset={slideProps[2].offset}
hidden={slideProps[2].hidden}
slideDuration={slideProps[2].slideDuration}
onMouseDown={this.props.onMouseDown}
onMouseUp={this.props.onMouseUp}
/>
</div>
);
}
getProps = () => {
const slides = this.props.slides;
let info: SlideProps[] = [];
for (let index = 0; index < slides.length; index++) {
const slideInfo = slides[index];
let offset = index - 1;
this.calculateHidden(offset, slideInfo.id);
info[slideInfo.id] = {
src: slideInfo.src,
alt: slideInfo.alt,
slideDuration: this.props.slideDuration,
offset: offset,
hidden: this.hidden[slideInfo.id],
};
}
return info;
};
calculateHidden = (currentOffset: number, id: number) => {
if (this.prevSlideProps == undefined) return false;
let previousOffset = this.prevSlideProps[id].offset;
let dif = Math.abs(currentOffset - previousOffset);
if (dif == 0) return this.hidden[id];
this.hidden[id] = dif >= 2;
};
}
export function Slide(props: SlideProps) {
let style = {
"--offset": 100 * props.offset + "%",
"--slideDuration": props.slideDuration,
} as React.CSSProperties;
return (
<div
onMouseDown={props.onMouseDown}
onMouseUp={props.onMouseUp}
className="slide"
style={{
...style,
zIndex: props.hidden ? -100 : 0,
transition: props.hidden
? "all 0ms none"
: `transform ${props.slideDuration} ease-in-out`,
}}
>
<div className="slide-wrapper" style={{ backgroundColor: "white" }}>
<img className="bg" src={props.src} alt={props.alt} />
</div>
<div className="slide-wrapper">
<img className="main" draggable={false} src={props.src} alt={props.alt} />
</div>
</div>
);
}
export interface SlideProps {
src: string;
alt: string;
offset: number;
hidden: boolean;
slideDuration: string;
onMouseDown?: MouseEventHandler
onMouseUp?: MouseEventHandler
}
export interface SlideInfo { | }
export interface PresentationProps {
slides: Slides;
slideDuration: string;
onMouseDown?: MouseEventHandler
onMouseUp?: MouseEventHandler
}
export type Slides = [SlideInfo, SlideInfo, SlideInfo];
interface PresentationState {
prevSlideProps?: SlideProps[];
slideProps?: SlideProps[];
} | src: string;
alt: string;
id: 0 | 1 | 2; |
ArticleCard.tsx | import * as React from 'react'
import { styled } from '../../Theme'
import { Box } from '../../Components/Box'
import { Flex } from '../../Components/Flex'
import { Avatar } from '../../Components/Avatar'
import { AuthorProps } from '../../Components/AuthorBlock/AuthorBlock'
import { formatDistanceStrict } from 'date-fns'
const StyledHeadingBox = styled(Box)`
padding: 18px;
height: 125px;
h1 {
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
font-size: 24px;
font-weight: 700;
line-height: 28px;
letter-spacing: -1px;
margin: 0;
overflow: hidden;
color: ${props => props.theme.colors.primary};
}
`
const StyledHeroImageBox = styled(Box)<{ imageUrl: string | undefined }>`
display: block;
position: relative;
height: 170px;
border-top-left-radius: 5px;
border-top-right-radius: 5px;
background-image: url(${props => props.imageUrl});
background-position: center;
background-origin: border-box;
background-size: cover;
background-color: #eee;
${props => props.theme.breakpoints.up('md')} {
height: 240px;
}
${props => props.theme.breakpoints.up('lg')} {
height: 170px;
}
` as React.FunctionComponent<{ imageUrl: string | undefined }>
const StyledLink = styled.a<{ to: string }>`
display: block;
border-radius: 5px;
text-decoration: none;
transition: all 0.2s ease-in-out 0s;
box-shadow: 0px 2px 5px rgba(51, 51, 51, 0.1);
&:focus {
outline: none;
}
&:hover {
${props => props.theme.breakpoints.up('md')} {
box-shadow: 0px 4px 10px rgba(51, 51, 51, 0.2);
}
text-decoration: none;
}
`
const StyledPublishedAtBox = styled(Box)`
span {
font-size: 12px;
font-weight: 700;
line-height: 15px;
text-transform: uppercase;
color: #666666;
}
span + span {
font-weight: 400;
text-transform: none;
}
`
const StyledAvatarWrapper = styled.div`
position: absolute;
bottom: -22.5px;
left: 18px;
`
export const ArticleCard = ({
title,
link,
linkAs,
heroImageUrl,
dateTime,
author,
}: {
title: string
link: string
linkAs?: any
heroImageUrl?: string
dateTime: string
author: AuthorProps
}) => ( | <Box>
<StyledLink href={link} to={link} as={linkAs}>
<StyledHeroImageBox imageUrl={heroImageUrl}>
<StyledAvatarWrapper>
<Avatar
firstName={author.firstName}
lastName={author.lastName}
mini={true}
withShadow
/>
</StyledAvatarWrapper>
</StyledHeroImageBox>
<StyledHeadingBox>
<Flex alignItems={`center`} justifyContent={`flex-start`} mt={5}>
<StyledPublishedAtBox>
<span>{`${author.firstName} ${author.lastName}`}</span>{' '}
<span>
{`wrote this ${formatDistanceStrict(
new Date(dateTime),
new Date(),
{
addSuffix: true,
},
)}`}
</span>
</StyledPublishedAtBox>
</Flex>
<h1>{title}</h1>
</StyledHeadingBox>
</StyledLink>
</Box>
) | |
iter.rs | //! Matrix iterators.
use std::iter::FusedIterator;
use std::marker::PhantomData;
use std::mem;
use crate::base::dimension::{Dim, U1};
use crate::base::storage::{Storage, StorageMut};
use crate::base::{Matrix, MatrixSlice, MatrixSliceMut, Scalar};
macro_rules! iterator {
(struct $Name:ident for $Storage:ident.$ptr: ident -> $Ptr:ty, $Ref:ty, $SRef: ty) => {
/// An iterator through a dense matrix with arbitrary strides matrix.
pub struct $Name<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> {
ptr: $Ptr,
inner_ptr: $Ptr,
inner_end: $Ptr,
size: usize, // We can't use an end pointer here because a stride might be zero.
strides: (S::RStride, S::CStride),
_phantoms: PhantomData<($Ref, R, C, S)>,
}
// TODO: we need to specialize for the case where the matrix storage is owned (in which
// case the iterator is trivial because it does not have any stride).
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> $Name<'a, N, R, C, S> {
/// Creates a new iterator for the given matrix storage.
pub fn new(storage: $SRef) -> $Name<'a, N, R, C, S> {
let shape = storage.shape();
let strides = storage.strides();
let inner_offset = shape.0.value() * strides.0.value();
let size = shape.0.value() * shape.1.value();
let ptr = storage.$ptr();
// If we have a size of 0, 'ptr' must be
// dangling. Howver, 'inner_offset' might
// not be zero if only one dimension is zero, so
// we don't want to call 'offset'.
// This pointer will never actually get used
// if our size is '0', so it's fine to use
// 'ptr' for both the start and end.
let inner_end = if size == 0 {
ptr
} else {
// Safety:
// If 'size' is non-zero, we know that 'ptr'
// is not dangling, and 'inner_offset' must lie
// within the allocation
unsafe { ptr.add(inner_offset) }
};
$Name {
ptr,
inner_ptr: ptr,
inner_end,
size: shape.0.value() * shape.1.value(),
strides,
_phantoms: PhantomData,
}
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> Iterator
for $Name<'a, N, R, C, S>
{
type Item = $Ref;
#[inline]
fn next(&mut self) -> Option<$Ref> {
unsafe {
if self.size == 0 {
None
} else {
self.size -= 1;
// Jump to the next outer dimension if needed.
if self.ptr == self.inner_end {
let stride = self.strides.1.value() as isize;
// This might go past the end of the allocation,
// depending on the value of 'size'. We use
// `wrapping_offset` to avoid UB
self.inner_end = self.ptr.wrapping_offset(stride);
// This will always be in bounds, since
// we're going to dereference it
self.ptr = self.inner_ptr.offset(stride);
self.inner_ptr = self.ptr;
}
// Go to the next element.
let old = self.ptr;
// Don't offset `self.ptr` for the last element,
// as this will be out of bounds. Iteration is done
// at this point (the next call to `next` will return `None`)
// so this is not observable.
if self.size != 0 {
let stride = self.strides.0.value();
self.ptr = self.ptr.add(stride);
}
Some(mem::transmute(old))
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.size, Some(self.size))
}
#[inline]
fn count(self) -> usize {
self.size_hint().0
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> DoubleEndedIterator
for $Name<'a, N, R, C, S>
{
#[inline]
fn next_back(&mut self) -> Option<$Ref> {
unsafe {
if self.size == 0 {
None
} else {
// Pre-decrement `size` such that it now counts to the
// element we want to return.
self.size -= 1;
// Fetch strides
let inner_stride = self.strides.0.value();
let outer_stride = self.strides.1.value();
// Compute number of rows
// Division should be exact
let inner_raw_size = self.inner_end.offset_from(self.inner_ptr) as usize;
let inner_size = inner_raw_size / inner_stride;
// Compute rows and cols remaining
let outer_remaining = self.size / inner_size;
let inner_remaining = self.size % inner_size;
// Compute pointer to last element
let last = self.ptr.offset(
(outer_remaining * outer_stride + inner_remaining * inner_stride)
as isize,
);
| // on the mutability of `$Ref`.
Some(mem::transmute(last))
}
}
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> ExactSizeIterator
for $Name<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.size
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + $Storage<N, R, C>> FusedIterator
for $Name<'a, N, R, C, S>
{
}
};
}
iterator!(struct MatrixIter for Storage.ptr -> *const N, &'a N, &'a S);
iterator!(struct MatrixIterMut for StorageMut.ptr_mut -> *mut N, &'a mut N, &'a mut S);
/*
*
* Row iterators.
*
*/
#[derive(Clone)]
/// An iterator through the rows of a matrix.
pub struct RowIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> RowIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
RowIter { mat, curr: 0 }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator for RowIter<'a, N, R, C, S> {
type Item = MatrixSlice<'a, N, U1, C, S::RStride, S::CStride>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.curr < self.mat.nrows() {
let res = self.mat.row(self.curr);
self.curr += 1;
Some(res)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(
self.mat.nrows() - self.curr,
Some(self.mat.nrows() - self.curr),
)
}
#[inline]
fn count(self) -> usize {
self.mat.nrows() - self.curr
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
for RowIter<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.mat.nrows() - self.curr
}
}
/// An iterator through the mutable rows of a matrix.
pub struct RowIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> RowIterMut<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
RowIterMut {
mat,
curr: 0,
phantom: PhantomData,
}
}
fn nrows(&self) -> usize {
unsafe { (*self.mat).nrows() }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
for RowIterMut<'a, N, R, C, S>
{
type Item = MatrixSliceMut<'a, N, U1, C, S::RStride, S::CStride>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.curr < self.nrows() {
let res = unsafe { (*self.mat).row_mut(self.curr) };
self.curr += 1;
Some(res)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.nrows() - self.curr, Some(self.nrows() - self.curr))
}
#[inline]
fn count(self) -> usize {
self.nrows() - self.curr
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator
for RowIterMut<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.nrows() - self.curr
}
}
/*
*
* Column iterators.
*
*/
#[derive(Clone)]
/// An iterator through the columns of a matrix.
pub struct ColumnIter<'a, N: Scalar, R: Dim, C: Dim, S: Storage<N, R, C>> {
mat: &'a Matrix<N, R, C, S>,
curr: usize,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ColumnIter<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a Matrix<N, R, C, S>) -> Self {
ColumnIter { mat, curr: 0 }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> Iterator
for ColumnIter<'a, N, R, C, S>
{
type Item = MatrixSlice<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.curr < self.mat.ncols() {
let res = self.mat.column(self.curr);
self.curr += 1;
Some(res)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(
self.mat.ncols() - self.curr,
Some(self.mat.ncols() - self.curr),
)
}
#[inline]
fn count(self) -> usize {
self.mat.ncols() - self.curr
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + Storage<N, R, C>> ExactSizeIterator
for ColumnIter<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.mat.ncols() - self.curr
}
}
/// An iterator through the mutable columns of a matrix.
pub struct ColumnIterMut<'a, N: Scalar, R: Dim, C: Dim, S: StorageMut<N, R, C>> {
mat: *mut Matrix<N, R, C, S>,
curr: usize,
phantom: PhantomData<&'a mut Matrix<N, R, C, S>>,
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ColumnIterMut<'a, N, R, C, S> {
pub(crate) fn new(mat: &'a mut Matrix<N, R, C, S>) -> Self {
ColumnIterMut {
mat,
curr: 0,
phantom: PhantomData,
}
}
fn ncols(&self) -> usize {
unsafe { (*self.mat).ncols() }
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> Iterator
for ColumnIterMut<'a, N, R, C, S>
{
type Item = MatrixSliceMut<'a, N, R, U1, S::RStride, S::CStride>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.curr < self.ncols() {
let res = unsafe { (*self.mat).column_mut(self.curr) };
self.curr += 1;
Some(res)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.ncols() - self.curr, Some(self.ncols() - self.curr))
}
#[inline]
fn count(self) -> usize {
self.ncols() - self.curr
}
}
impl<'a, N: Scalar, R: Dim, C: Dim, S: 'a + StorageMut<N, R, C>> ExactSizeIterator
for ColumnIterMut<'a, N, R, C, S>
{
#[inline]
fn len(&self) -> usize {
self.ncols() - self.curr
}
} | // We want either `& *last` or `&mut *last` here, depending |
serialization_utils.rs | //! Utility functions for assisting with conversion of headers from and to the HTTP text form.
use std::io::IoResult;
use rfc2616::is_token;
/// Normalise an HTTP header name.
///
/// Rules:
///
/// - The first character is capitalised
/// - Any character immediately following `-` (HYPHEN-MINUS) is capitalised
/// - All other characters are made lowercase
///
/// This will fail if passed a non-ASCII name.
///
/// # Examples
///
/// ~~~ .{rust}
/// # use http::headers::serialization_utils::normalise_header_name;
/// assert_eq!(normalise_header_name(&String::from_str("foo-bar")), String::from_str("Foo-Bar"));
/// assert_eq!(normalise_header_name(&String::from_str("FOO-BAR")), String::from_str("Foo-Bar"));
/// ~~~
pub fn normalise_header_name(name: &String) -> String {
let mut result: String = String::with_capacity(name.len());
let mut capitalise = true;
for c in name[].chars() {
let c = match capitalise {
true => c.to_ascii().to_uppercase(),
false => c.to_ascii().to_lowercase(),
};
result.push(c.as_char());
// ASCII 45 is '-': in that case, capitalise the next char
capitalise = c.as_byte() == 45;
}
result
}
/// Split a value on commas, as is common for HTTP headers.
///
/// This does not handle quoted-strings intelligently.
///
/// # Examples
///
/// ~~~ .{rust}
/// # use http::headers::serialization_utils::comma_split;
/// assert_eq!(
/// comma_split(" en;q=0.8, en_AU, text/html"),
/// vec![String::from_str("en;q=0.8"), String::from_str("en_AU"), String::from_str("text/html")]
/// )
/// ~~~
pub fn comma_split(value: &str) -> Vec<String> {
value.split(',').map(|w| String::from_str(w.trim_left())).collect()
}
pub fn comma_split_iter<'a>(value: &'a str)
-> ::std::iter::Map<'a, &'a str, &'a str, ::std::str::CharSplits<'a, char>> {
value.split(',').map(|w| w.trim_left())
}
pub trait WriterUtil: Writer {
fn write_maybe_quoted_string(&mut self, s: &String) -> IoResult<()> {
if is_token(s) {
self.write(s.as_bytes())
} else {
self.write_quoted_string(s)
}
}
fn write_quoted_string(&mut self, s: &String) -> IoResult<()> {
try!(self.write(b"\""));
for b in s.as_bytes().iter() {
if *b == b'\\' || *b == b'"' {
try!(self.write(b"\\"));
}
// XXX This doesn't seem right.
try!(self.write(&[*b]));
}
self.write(b"\"")
}
fn write_parameter(&mut self, k: &str, v: &String) -> IoResult<()> {
try!(self.write(k.as_bytes()));
try!(self.write(b"="));
self.write_maybe_quoted_string(v)
}
fn write_parameters(&mut self, parameters: &[(String, String)]) -> IoResult<()> {
for &(ref k, ref v) in parameters.iter() {
try!(self.write(b";"));
try!(self.write_parameter(k[], v));
}
Ok(())
}
fn write_quality(&mut self, quality: Option<f64>) -> IoResult<()> {
// TODO: remove second and third decimal places if zero, and use a better quality type anyway
match quality {
Some(qvalue) => write!(&mut *self, ";q={:0.3}", qvalue),
None => Ok(()),
}
}
#[inline]
fn write_token(&mut self, token: &String) -> IoResult<()> {
assert!(is_token(token));
self.write(token.as_bytes())
}
}
impl<W: Writer> WriterUtil for W { }
/// Join a vector of values with commas, as is common for HTTP headers.
///
/// # Examples
///
/// ~~~ .{rust}
/// # use http::headers::serialization_utils::comma_join;
/// assert_eq!(
/// comma_join(&[String::from_str("en;q=0.8"), String::from_str("en_AU"), String::from_str("text/html")]),
/// String::from_str("en;q=0.8, en_AU, text/html")
/// )
/// ~~~
#[inline]
pub fn comma_join(values: &[String]) -> String {
let mut out = String::new();
let mut iter = values.iter();
match iter.next() {
Some(s) => out.push_str(s[]),
None => return out
}
for value in iter {
out.push_str(", ");
out.push_str(value[]);
}
out
}
/// Push a ( token | quoted-string ) onto a string and return it again
pub fn push_maybe_quoted_string(mut s: String, t: &String) -> String {
if is_token(t) {
s.push_str(t[]);
s
} else {
push_quoted_string(s, t)
}
}
/// Make a string into a ( token | quoted-string ), preferring a token
pub fn maybe_quoted_string(s: &String) -> String {
if is_token(s) {
s.clone()
} else {
quoted_string(s)
}
}
/// Quote a string, to turn it into an RFC 2616 quoted-string
pub fn push_quoted_string(mut s: String, t: &String) -> String {
let i = s.len();
s.reserve(t.len() + i + 2);
s.push('"');
for c in t[].chars() {
if c == '\\' || c == '"' {
s.push('\\');
}
s.push(c);
}
s.push('"');
s
}
/// Quote a string, to turn it into an RFC 2616 quoted-string
pub fn quoted_string(s: &String) -> String {
push_quoted_string(String::new(), s)
}
/// Parse a quoted-string. Returns ``None`` if the string is not a valid quoted-string.
pub fn unquote_string(s: &String) -> Option<String> {
enum State { Start, Normal, Escaping, End }
let mut state = State::Start;
let mut output = String::new();
// Strings with escapes cause overallocation, but it's not worth a second pass to avoid this!
output.reserve(s.len() - 2);
let mut iter = s[].chars();
loop {
state = match (state, iter.next()) {
(State::Start, Some(c)) if c == '"' => State::Normal,
(State::Start, Some(_)) => return None,
(State::Normal, Some(c)) if c == '\\' => State::Escaping,
(State::Normal, Some(c)) if c == '"' => State::End,
(State::Normal, Some(c)) | (State::Escaping, Some(c)) => {
output.push(c);
State::Normal
},
(State::End, Some(_)) => return None,
(State::End, None) => return Some(output),
(_, None) => return None,
}
}
}
/// Parse a ( token | quoted-string ). Returns ``None`` if it is not valid.
pub fn maybe_unquote_string(s: &String) -> Option<String> {
if is_token(s) {
Some(s.clone())
} else {
unquote_string(s)
}
}
// Takes and emits the String instead of the &mut str for a simpler, fluid interface
pub fn push_parameter(mut s: String, k: &String, v: &String) -> String |
// pub fn push_parameters<K: Str, V: Str>(mut s: String, parameters: &[(K, V)]) -> String {
pub fn push_parameters(mut s: String, parameters: &[(String, String)]) -> String {
for &(ref k, ref v) in parameters.iter() {
s.push(';');
s = push_parameter(s, k, v);
}
s
}
#[cfg(test)]
mod test {
use super::{normalise_header_name, comma_split, comma_split_iter, comma_join,
push_parameter, push_parameters, push_maybe_quoted_string, push_quoted_string,
maybe_quoted_string, quoted_string, unquote_string, maybe_unquote_string};
#[test]
#[should_fail]
fn test_normalise_header_name_fail() {
normalise_header_name(&String::from_str("foö-bar"));
}
#[test]
fn test_normalise_header_name() {
assert_eq!(normalise_header_name(&String::from_str("foo-bar")), String::from_str("Foo-Bar"));
assert_eq!(normalise_header_name(&String::from_str("FOO-BAR")), String::from_str("Foo-Bar"));
}
#[test]
fn test_comma_split() {
// Simple 0-element case
assert_eq!(comma_split(""), vec!(String::new()));
// Simple 1-element case
assert_eq!(comma_split("foo"), vec!(String::from_str("foo")));
// Simple 2-element case
assert_eq!(comma_split("foo,bar"), vec!(String::from_str("foo"), String::from_str("bar")));
// Simple >2-element case
assert_eq!(comma_split("foo,bar,baz,quux"), vec!(String::from_str("foo"), String::from_str("bar"), String::from_str("baz"), String::from_str("quux")));
// Doesn't handle quoted-string intelligently
assert_eq!(comma_split("\"foo,bar\",baz"), vec!(String::from_str("\"foo"), String::from_str("bar\""), String::from_str("baz")));
// Doesn't do right trimming, but does left
assert_eq!(comma_split(" foo;q=0.8 , bar/* "), vec!(String::from_str("foo;q=0.8 "), String::from_str("bar/* ")));
}
#[test]
fn test_comma_split_iter() {
// These are the same cases as in test_comma_split above.
let s = "";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec![""]);
let s = "foo";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec!["foo"]);
let s = "foo,bar";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec!["foo", "bar"]);
let s = "foo,bar,baz,quux";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec!["foo", "bar", "baz", "quux"]);
let s = "\"foo,bar\",baz";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec!["\"foo", "bar\"", "baz"]);
let s = " foo;q=0.8 , bar/* ";
assert_eq!(comma_split_iter(s).collect::< Vec<&'static str> >(), vec!["foo;q=0.8 ", "bar/* "]);
}
#[test]
fn test_comma_join() {
assert_eq!(comma_join(&[String::new()]), String::new());
assert_eq!(comma_join(&[String::from_str("foo")]), String::from_str("foo"));
assert_eq!(comma_join(&[String::from_str("foo"), String::from_str("bar")]), String::from_str("foo, bar"));
assert_eq!(comma_join(&[String::from_str("foo"), String::from_str("bar"), String::from_str("baz"), String::from_str("quux")]), String::from_str("foo, bar, baz, quux"));
assert_eq!(comma_join(&[String::from_str("\"foo,bar\""), String::from_str("baz")]), String::from_str("\"foo,bar\", baz"));
assert_eq!(comma_join(&[String::from_str(" foo;q=0.8 "), String::from_str("bar/* ")]), String::from_str(" foo;q=0.8 , bar/* "));
}
#[test]
fn test_push_maybe_quoted_string() {
assert_eq!(push_maybe_quoted_string(String::from_str("foo,"), &String::from_str("bar")), String::from_str("foo,bar"));
assert_eq!(push_maybe_quoted_string(String::from_str("foo,"), &String::from_str("bar/baz")), String::from_str("foo,\"bar/baz\""));
}
#[test]
fn test_maybe_quoted_string() {
assert_eq!(maybe_quoted_string(&String::from_str("bar")), String::from_str("bar"));
assert_eq!(maybe_quoted_string(&String::from_str("bar/baz \"yay\"")), String::from_str("\"bar/baz \\\"yay\\\"\""));
}
#[test]
fn test_push_quoted_string() {
assert_eq!(push_quoted_string(String::from_str("foo,"), &String::from_str("bar")), String::from_str("foo,\"bar\""));
assert_eq!(push_quoted_string(String::from_str("foo,"), &String::from_str("bar/baz \"yay\\\"")),
String::from_str("foo,\"bar/baz \\\"yay\\\\\\\"\""));
}
#[test]
fn test_quoted_string() {
assert_eq!(quoted_string(&String::from_str("bar")), String::from_str("\"bar\""));
assert_eq!(quoted_string(&String::from_str("bar/baz \"yay\\\"")), String::from_str("\"bar/baz \\\"yay\\\\\\\"\""));
}
#[test]
fn test_unquote_string() {
assert_eq!(unquote_string(&String::from_str("bar")), None);
assert_eq!(unquote_string(&String::from_str("\"bar\"")), Some(String::from_str("bar")));
assert_eq!(unquote_string(&String::from_str("\"bar/baz \\\"yay\\\\\\\"\"")), Some(String::from_str("bar/baz \"yay\\\"")));
assert_eq!(unquote_string(&String::from_str("\"bar")), None);
assert_eq!(unquote_string(&String::from_str(" \"bar\"")), None);
assert_eq!(unquote_string(&String::from_str("\"bar\" ")), None);
assert_eq!(unquote_string(&String::from_str("\"bar\" \"baz\"")), None);
assert_eq!(unquote_string(&String::from_str("\"bar/baz \\\"yay\\\\\"\"")), None);
}
#[test]
fn test_maybe_unquote_string() {
assert_eq!(maybe_unquote_string(&String::from_str("bar")), Some(String::from_str("bar")));
assert_eq!(maybe_unquote_string(&String::from_str("\"bar\"")), Some(String::from_str("bar")));
assert_eq!(maybe_unquote_string(&String::from_str("\"bar/baz \\\"yay\\\\\\\"\"")), Some(String::from_str("bar/baz \"yay\\\"")));
assert_eq!(maybe_unquote_string(&String::from_str("\"bar")), None);
assert_eq!(maybe_unquote_string(&String::from_str(" \"bar\"")), None);
assert_eq!(maybe_unquote_string(&String::from_str("\"bar\" ")), None);
assert_eq!(maybe_unquote_string(&String::from_str("\"bar\" \"baz\"")), None);
assert_eq!(maybe_unquote_string(&String::from_str("\"bar/baz \\\"yay\\\\\"\"")), None);
}
#[test]
fn test_push_parameter() {
assert_eq!(push_parameter(String::from_str("foo"), &String::from_str("bar"), &String::from_str("baz")), String::from_str("foobar=baz"));
assert_eq!(push_parameter(String::from_str("foo"), &String::from_str("bar"), &String::from_str("baz/quux")), String::from_str("foobar=\"baz/quux\""));
}
#[test]
fn test_push_parameters() {
assert_eq!(push_parameters(String::from_str("foo"), [][]), String::from_str("foo"));
assert_eq!(push_parameters(String::from_str("foo"), [(String::from_str("bar"), String::from_str("baz"))][]), String::from_str("foo;bar=baz"));
assert_eq!(push_parameters(String::from_str("foo"), [(String::from_str("bar"), String::from_str("baz/quux"))][]), String::from_str("foo;bar=\"baz/quux\""));
assert_eq!(push_parameters(String::from_str("foo"), [(String::from_str("bar"), String::from_str("baz")), (String::from_str("quux"), String::from_str("fuzz"))][]),
String::from_str("foo;bar=baz;quux=fuzz"));
assert_eq!(push_parameters(String::from_str("foo"), [(String::from_str("bar"), String::from_str("baz")), (String::from_str("quux"), String::from_str("fuzz zee"))][]),
String::from_str("foo;bar=baz;quux=\"fuzz zee\""));
assert_eq!(push_parameters(String::from_str("foo"), [(String::from_str("bar"), String::from_str("baz/quux")), (String::from_str("fuzz"), String::from_str("zee"))][]),
String::from_str("foo;bar=\"baz/quux\";fuzz=zee"));
}
}
| {
s.push_str(k[]);
s.push('=');
push_maybe_quoted_string(s, v)
} |
linkRow.tsx | import React from 'react';
import { Alignment, Box, Button, Direction, Image, KibaIcon, Stack, Text } from '@kibalabs/ui-react';
import { Link } from '../model'; |
interface ILinkRowProps {
link: Link;
onViewLinkClicked?: (link: Link) => void;
}
export const LinkRow = (props: ILinkRowProps): React.ReactElement => {
const destinationUrl = new URL(props.link.destination);
const onViewClicked = (): void => {
if (props.onViewLinkClicked) {
props.onViewLinkClicked(props.link);
}
};
return (
<Stack direction={Direction.Horizontal} shouldAddGutters={true} childAlignment={Alignment.Center} contentAlignment={Alignment.Start} isFullWidth={false}>
<Box maxHeight='15px' maxWidth='15px' isFullWidth={false} isFullHeight={false}>
<Image variant='small' fitType='crop' source={`https://icons.duckduckgo.com/ip3/${destinationUrl.hostname}.ico`} alternativeText={destinationUrl.hostname} />
</Box>
<Text>{`/${props.link.sourcePath}`}</Text>
<KibaIcon iconId='mui-arrow-right-alt' />
<Text>{props.link.destination}</Text>
{props.onViewLinkClicked && (
<Button variant='small' text='View' onClicked={onViewClicked} />
)}
</Stack>
);
}; | |
UsersController.go | package controllers
import (
"net/http"
"../lib"
"../models"
)
// GetAllUsersHandler ...
func GetAllUsersHandler(w http.ResponseWriter, req *http.Request) {
res := lib.Response{ResponseWriter: w}
user := new(models.User)
users := user.FetchAll()
res.SendOK(users)
}
// CreateUserHandler ...
func CreateUserHandler(w http.ResponseWriter, r *http.Request) {
req := lib.Request{ResponseWriter: w, Request: r}
res := lib.Response{ResponseWriter: w}
user := new(models.User)
req.GetJSONBody(user)
if err := user.Save(); err != nil {
res.SendBadRequest(err.Error())
return
}
res.SendCreated(user)
}
// GetUserByIDHandler ...
func GetUserByIDHandler(w http.ResponseWriter, r *http.Request) {
req := lib.Request{ResponseWriter: w, Request: r}
res := lib.Response{ResponseWriter: w}
id, _ := req.GetVarID()
user := models.User{
ID: id,
}
if err := user.FetchByID(); err != nil {
res.SendNotFound()
return
}
res.SendOK(user)
}
// UpdateUserHandler ...
func UpdateUserHandler(w http.ResponseWriter, r *http.Request) |
// DeleteUserHandler ...
func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {
req := lib.Request{ResponseWriter: w, Request: r}
res := lib.Response{ResponseWriter: w}
id, _ := req.GetVarID()
user := models.User{
ID: id,
}
if err := user.Delete(); err != nil {
res.SendNotFound()
return
}
res.SendNoContent()
}
| {
req := lib.Request{ResponseWriter: w, Request: r}
res := lib.Response{ResponseWriter: w}
id, _ := req.GetVarID()
user := new(models.User)
req.GetJSONBody(user)
user.ID = id
if err := user.Save(); err != nil {
res.SendBadRequest(err.Error())
return
}
res.SendOK(user)
} |
custom.js | //Timer for messages and tasks
var i = 3, j=5, k=9;
function incrementI() {
i++;
document.getElementById('quickMessages').innerHTML = i;
}
setInterval('incrementI()', 5000);
function incrementJ() {
j++;
document.getElementById('quickAlerts').innerHTML = j; | k++;
document.getElementById('quickShop').innerHTML = j;
}
setInterval('incrementK()', 9000); | }
setInterval('incrementJ()', 12000);
function incrementK() { |
tree2str.go | /* https://leetcode.com/problems/construct-string-from-binary-tree/#/description
You need to construct a string consists of parenthesis and integers from a binary tree with the preorder traversing way.
The null node needs to be represented by empty parenthesis pair "()".
And you need to omit all the empty parenthesis pairs that don't affect the one-to-one mapping relationship between the string and the original binary tree.
Example 1:
Input: Binary tree: [1,2,3,4]
1
/ \
2 3
/
4
Output: "1(2(4))(3)"
Explanation: Originallay it needs to be "1(2(4)())(3()())",
but you need to omit all the unnecessary empty parenthesis pairs.
And it will be "1(2(4))(3)".
Example 2:
Input: Binary tree: [1,2,3,null,4]
1
/ \
2 3
\
4
Output: "1(2()(4))(3)"
Explanation: Almost the same as the first example,
except we can't omit the first parenthesis pair to break the one-to-one mapping relationship between the input and the output.
*/
package ltree
import (
"bytes"
"strconv"
)
func | (t *TreeNode) string {
buffer := new(bytes.Buffer)
var helper func(node *TreeNode)
helper = func(node *TreeNode) {
if node == nil {
return
}
buffer.WriteString(strconv.Itoa(node.Val))
if node.Left == nil && node.Right == nil {
return
}
buffer.WriteByte('(')
helper(node.Left)
buffer.WriteByte(')')
if node.Right != nil {
buffer.WriteByte('(')
helper(node.Right)
buffer.WriteByte(')')
}
}
helper(t)
return buffer.String()
}
/*
import "fmt"
func tree2str(t *TreeNode) string {
if t == nil {
return ""
}
if t.Left == nil && t.Right == nil {
return fmt.Sprintf("%d", t.Val)
}
left, right := "", ""
left = fmt.Sprintf("(%s)", tree2str(t.Left))
if t.Right != nil {
right = fmt.Sprintf("(%s)", tree2str(t.Right))
}
return fmt.Sprintf("%d%s%s", t.Val, left, right)
}
*/
| tree2str |
create-91e1c215f38e0074.js | (self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[3654],{60931:function(n,e,r){(window.__NEXT_P=window.__NEXT_P||[]).push(["/hosts/create",function(){return r(13794)}])},13794:function(n,e,r){"use strict";r.r(e),r.d(e,{default:function(){return c}});var t=r(85893),u=r(18545);function | (){return(0,t.jsx)(t.Fragment,{children:(0,t.jsx)("div",{className:"col-12 p-0 m-0",children:(0,t.jsx)(u.h,{mode:"create"})})})}}},function(n){n.O(0,[3978,699,9861,8545,9774,2888,179],(function(){return e=60931,n(n.s=e);var e}));var e=n.O();_N_E=e}]); | c |
setting.rs | use std::collections::HashMap;
use http::StatusCode;
use serde::{Deserialize, Serialize, Deserializer};
use tide::response::IntoResponse;
use tide::{Context, Response};
use crate::error::{ResponseError, SResult};
use crate::helpers::tide::ContextExt;
use crate::models::token::ACL::*;
use crate::routes::document::IndexUpdateResponse;
use crate::Data;
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct Setting {
pub ranking_order: Option<RankingOrder>,
pub distinct_field: Option<DistinctField>,
pub ranking_rules: Option<RankingRules>,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RankingOrdering {
Asc,
Dsc,
}
pub type RankingOrder = Vec<String>;
pub type DistinctField = String;
pub type RankingRules = HashMap<String, RankingOrdering>;
pub async fn get(ctx: Context<Data>) -> SResult<Response> {
ctx.is_allowed(SettingsRead)?;
let index = ctx.index()?;
let db = &ctx.state().db;
let reader = db.main_read_txn().map_err(ResponseError::internal)?;
let settings = match index.main.customs(&reader).unwrap() {
Some(bytes) => bincode::deserialize(bytes).unwrap(),
None => Setting::default(),
};
Ok(tide::response::json(settings))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", deny_unknown_fields)]
pub struct | {
#[serde(default, deserialize_with = "deserialize_some")]
pub ranking_order: Option<Option<RankingOrder>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub distinct_field: Option<Option<DistinctField>>,
#[serde(default, deserialize_with = "deserialize_some")]
pub ranking_rules: Option<Option<RankingRules>>,
}
// Any value that is present is considered Some value, including null.
fn deserialize_some<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
where T: Deserialize<'de>,
D: Deserializer<'de>
{
Deserialize::deserialize(deserializer).map(Some)
}
pub async fn update(mut ctx: Context<Data>) -> SResult<Response> {
ctx.is_allowed(SettingsWrite)?;
let settings: SettingBody = ctx.body_json().await.map_err(ResponseError::bad_request)?;
let index = ctx.index()?;
let db = &ctx.state().db;
let reader = db.main_read_txn().map_err(ResponseError::internal)?;
let mut writer = db.update_write_txn().map_err(ResponseError::internal)?;
let mut current_settings = match index.main.customs(&reader).unwrap() {
Some(bytes) => bincode::deserialize(bytes).unwrap(),
None => Setting::default(),
};
if let Some(ranking_order) = settings.ranking_order {
current_settings.ranking_order = ranking_order;
}
if let Some(distinct_field) = settings.distinct_field {
current_settings.distinct_field = distinct_field;
}
if let Some(ranking_rules) = settings.ranking_rules {
current_settings.ranking_rules = ranking_rules;
}
let bytes = bincode::serialize(¤t_settings).unwrap();
let update_id = index
.customs_update(&mut writer, bytes)
.map_err(ResponseError::internal)?;
writer.commit().map_err(ResponseError::internal)?;
let response_body = IndexUpdateResponse { update_id };
Ok(tide::response::json(response_body)
.with_status(StatusCode::ACCEPTED)
.into_response())
}
| SettingBody |
service.rs | use std::task::{Context, Poll};
use std::time::Duration;
use std::{fmt, future::Future, marker::PhantomData, pin::Pin, rc::Rc};
use futures::future::{select, Either, FutureExt};
use futures::ready;
use ntex::rt::time::Delay;
use ntex::service::{IntoServiceFactory, Service, ServiceFactory};
use ntex::util::time::LowResTimeService;
use ntex_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use super::framed::{Dispatcher, DispatcherItem};
use super::handshake::{Handshake, HandshakeResult};
type ResponseItem<U> = Option<<U as Encoder>::Item>;
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub(crate) struct FactoryBuilder<St, C, Io, Codec> {
connect: C,
disconnect_timeout: usize,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> FactoryBuilder<St, C, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = Handshake<Io, Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
Codec: Decoder + Encoder,
{
/// Construct framed handler service factory with specified connect service
pub(crate) fn new<F>(connect: F) -> FactoryBuilder<St, C, Io, Codec>
where
F: IntoServiceFactory<C>,
{
FactoryBuilder {
connect: connect.into_factory(),
disconnect_timeout: 3000,
_t: PhantomData,
}
}
/// Set connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the connection get dropped.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3 seconds.
pub(crate) fn disconnect_timeout(mut self, val: usize) -> Self {
self.disconnect_timeout = val;
self
}
pub(crate) fn build<F, T, Cfg>(self, service: F) -> FramedService<St, C, T, Io, Codec, Cfg>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
{
FramedService {
connect: self.connect,
handler: Rc::new(service.into_factory()),
disconnect_timeout: self.disconnect_timeout,
time: LowResTimeService::with(Duration::from_secs(1)),
_t: PhantomData,
}
}
}
pub(crate) struct FramedService<St, C, T, Io, Codec, Cfg> {
connect: C,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
_t: PhantomData<(St, Io, Codec, Cfg)>,
}
impl<St, C, T, Io, Codec, Cfg> ServiceFactory for FramedService<St, C, T, Io, Codec, Cfg>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = Handshake<Io, Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
<C::Service as Service>::Future: 'static,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Config = Cfg;
type Request = Io;
type Response = ();
type Error = C::Error;
type InitError = C::InitError;
type Service = FramedServiceImpl<St, C::Service, T, Io, Codec>;
type Future = FramedServiceResponse<St, C, T, Io, Codec>;
fn new_service(&self, _: Cfg) -> Self::Future {
// create connect service and then create service impl
FramedServiceResponse {
fut: self.connect.new_service(()),
handler: self.handler.clone(),
disconnect_timeout: self.disconnect_timeout,
time: self.time.clone(),
}
}
}
#[pin_project::pin_project]
pub(crate) struct FramedServiceResponse<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = Handshake<Io, Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
#[pin]
fut: C::Future,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
}
impl<St, C, T, Io, Codec> Future for FramedServiceResponse<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = Handshake<Io, Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Output = Result<FramedServiceImpl<St, C::Service, T, Io, Codec>, C::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let connect = ready!(this.fut.poll(cx))?;
Poll::Ready(Ok(FramedServiceImpl {
connect,
handler: this.handler.clone(),
disconnect_timeout: *this.disconnect_timeout,
time: this.time.clone(),
_t: PhantomData,
}))
}
}
pub(crate) struct FramedServiceImpl<St, C, T, Io, Codec> {
connect: C,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, T, Io, Codec> Service for FramedServiceImpl<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: Service<Request = Handshake<Io, Codec>, Response = HandshakeResult<Io, St, Codec>>,
C::Error: fmt::Debug,
C::Future: 'static,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Request = Io;
type Response = ();
type Error = C::Error;
type Future = Pin<Box<dyn Future<Output = Result<(), Self::Error>>>>;
#[inline]
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.connect.poll_ready(cx)
}
#[inline]
fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> {
self.connect.poll_shutdown(cx, is_error)
}
#[inline]
fn call(&self, req: Io) -> Self::Future {
log::trace!("Start connection handshake");
let handler = self.handler.clone();
let timeout = self.disconnect_timeout;
let handshake = self.connect.call(Handshake::new(req));
let time = self.time.clone();
Box::pin(async move {
let result = handshake.await.map_err(|e| {
log::trace!("Connection handshake failed: {:?}", e);
e
})?;
log::trace!("Connection handshake succeeded");
let handler = handler.new_service(result.state).await?;
log::trace!("Connection handler is created, starting dispatcher");
Dispatcher::with(result.framed, result.out, handler, time)
.keepalive_timeout(result.keepalive)
.disconnect_timeout(timeout as u64)
.await
})
}
}
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub(crate) struct FactoryBuilder2<St, C, Io, Codec> {
connect: C,
disconnect_timeout: usize,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> FactoryBuilder2<St, C, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = HandshakeResult<Io, (), Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
Codec: Decoder + Encoder,
{
/// Construct framed handler service factory with specified connect service
pub(crate) fn new<F>(connect: F) -> FactoryBuilder2<St, C, Io, Codec>
where
F: IntoServiceFactory<C>,
{
FactoryBuilder2 {
connect: connect.into_factory(),
disconnect_timeout: 3000,
_t: PhantomData,
}
}
/// Set connection disconnect timeout in milliseconds.
pub(crate) fn disconnect_timeout(mut self, val: usize) -> Self {
self.disconnect_timeout = val;
self
}
pub(crate) fn build<F, T, Cfg>(self, service: F) -> FramedService2<St, C, T, Io, Codec, Cfg>
where
F: IntoServiceFactory<T>,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
{
FramedService2 {
connect: self.connect,
handler: Rc::new(service.into_factory()),
disconnect_timeout: self.disconnect_timeout,
time: LowResTimeService::with(Duration::from_secs(1)),
_t: PhantomData,
}
}
}
pub(crate) struct FramedService2<St, C, T, Io, Codec, Cfg> {
connect: C,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
_t: PhantomData<(St, Io, Codec, Cfg)>,
}
impl<St, C, T, Io, Codec, Cfg> ServiceFactory for FramedService2<St, C, T, Io, Codec, Cfg>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = HandshakeResult<Io, (), Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
<C::Service as Service>::Future: 'static,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Config = Cfg;
type Request = (Framed<Io, Codec>, Option<Delay>);
type Response = ();
type Error = C::Error;
type InitError = C::InitError;
type Service = FramedServiceImpl2<St, C::Service, T, Io, Codec>;
type Future = FramedServiceResponse2<St, C, T, Io, Codec>;
fn new_service(&self, _: Cfg) -> Self::Future {
// create connect service and then create service impl
FramedServiceResponse2 {
fut: self.connect.new_service(()),
handler: self.handler.clone(),
disconnect_timeout: self.disconnect_timeout,
time: self.time.clone(),
}
}
}
#[pin_project::pin_project]
pub(crate) struct FramedServiceResponse2<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = HandshakeResult<Io, (), Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
#[pin]
fut: C::Future,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
}
impl<St, C, T, Io, Codec> Future for FramedServiceResponse2<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: ServiceFactory<
Config = (),
Request = HandshakeResult<Io, (), Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Output = Result<FramedServiceImpl2<St, C::Service, T, Io, Codec>, C::InitError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let connect = ready!(this.fut.poll(cx))?;
Poll::Ready(Ok(FramedServiceImpl2 {
connect,
handler: this.handler.clone(),
disconnect_timeout: *this.disconnect_timeout,
time: this.time.clone(),
_t: PhantomData,
}))
}
}
pub(crate) struct FramedServiceImpl2<St, C, T, Io, Codec> {
connect: C,
handler: Rc<T>,
disconnect_timeout: usize,
time: LowResTimeService,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, T, Io, Codec> Service for FramedServiceImpl2<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite + Unpin,
C: Service<
Request = HandshakeResult<Io, (), Codec>,
Response = HandshakeResult<Io, St, Codec>,
>,
C::Error: fmt::Debug,
C::Future: 'static,
T: ServiceFactory<
Config = St,
Request = DispatcherItem<Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
<T::Service as Service>::Error: 'static,
<T::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
{
type Request = (Framed<Io, Codec>, Option<Delay>);
type Response = ();
type Error = C::Error;
type Future = Pin<Box<dyn Future<Output = Result<(), Self::Error>>>>;
#[inline]
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.connect.poll_ready(cx)
}
#[inline]
fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> {
self.connect.poll_shutdown(cx, is_error)
}
#[inline]
fn call(&self, (req, delay): (Framed<Io, Codec>, Option<Delay>)) -> Self::Future {
log::trace!("Start connection handshake");
let handler = self.handler.clone();
let timeout = self.disconnect_timeout;
let handshake = self.connect.call(Handshake::with_codec(req));
let time = self.time.clone();
Box::pin(async move {
let (framed, out, ka, handler) = if let Some(delay) = delay {
let res = select(
delay,
async {
let result = handshake.await.map_err(|e| {
log::trace!("Connection handshake failed: {:?}", e);
e
})?;
log::trace!("Connection handshake succeeded");
let handler = handler.new_service(result.state).await?;
log::trace!("Connection handler is created, starting dispatcher");
Ok::<_, C::Error>((
result.framed,
result.out,
result.keepalive,
handler,
))
}
.boxed_local(),
)
.await;
match res {
Either::Left(_) => {
log::warn!("Handshake timed out"); | }
Either::Right(item) => item.0?,
}
} else {
let result = handshake.await.map_err(|e| {
log::trace!("Connection handshake failed: {:?}", e);
e
})?;
log::trace!("Connection handshake succeeded");
let handler = handler.new_service(result.state).await?;
log::trace!("Connection handler is created, starting dispatcher");
(result.framed, result.out, result.keepalive, handler)
};
Dispatcher::with(framed, out, handler, time)
.keepalive_timeout(ka)
.disconnect_timeout(timeout as u64)
.await
})
}
} | return Ok(()); |
connection-error.js | var test = require('tape')
var mockAdapter = require('any-db-fake')
var ConnectionPool = require('../')
test('Connection error forwarding', function (t) {
// A stub adapter errors on connect
var pool = ConnectionPool(mockAdapter({
createConnection: function (_, callback) {
process.nextTick(function () { callback(new Error("Blammo")) })
}
}))
t.plan(2)
t.on('end', pool.close.bind(pool))
t.test('Connection errors in pool.query', function (t) {
t.plan(6);
pool.query('This is not valid SQL', function(err) {
t.assert(err, "Error should be passed to callback when there are no params")
t.equal('Blammo', err.message, "Got expected error") | t.equal('Blammo', err.message, "Got expected error")
});
pool.query('Still invalid SQL').on('error', function (err) {
t.assert(err, "Error should be emitted when there is no callback")
t.equal('Blammo', err.message, "Got expected error")
})
});
t.test('Connection errors in pool.acquire', function (t) {
t.plan(2)
pool.acquire(function (err, conn) {
t.assert(err, "Error is forwarded to callback")
t.equal('Blammo', err.message, "Got expected error")
})
})
}) | });
pool.query('This is not valid SQL', [], function(err) {
t.assert(err, "Error should be passed to callback when there are params") |
order_dependencies.py | import csv
import sys
def orderEdges(fileName):
dynamic_dependencies_file = open(fileName)
csv_reader = csv.reader(dynamic_dependencies_file)
list_of_edges = []
for row in csv_reader:
list_of_edges.append(row[0].split())
sortedList = insertionSort(list_of_edges)
return sortedList
def writeCSV(sortedList, fileName):
with open(fileName, "w") as f:
writer = csv.writer(f)
writer.writerows(sortedList)
def | (list_of_values):
for i in range(len(list_of_values)):
j = findMin(i, list_of_values)
list_of_values[i], list_of_values[j] = list_of_values[j], list_of_values[i]
return list_of_values
def findMin(i, list_of_values):
smallest_value = int(list_of_values[i][2])
index = i
for j in range(i, len(list_of_values)):
if int(list_of_values[j][2]) < smallest_value:
index = j
smallest_value = int(list_of_values[j][2])
return index
if __name__ == "__main__":
fileName = sys.argv[1]
sortedList = orderEdges(fileName)
writeCSV(sortedList, 'sorted_edges.csv')
| insertionSort |
test_map.py | from openmdao.api import Group, Problem, MetaModelUnStructuredComp, NearestNeighbor
from openmdao.utils.assert_utils import assert_near_equal
import numpy as np
import unittest
class CompressorMap(MetaModelUnStructuredComp):
def | (self):
super(CompressorMap, self).__init__()
self.add_input('Nc', val=1.0)
self.add_input('Rline', val=2.0)
self.add_input('alpha', val=0.0)
self.add_output('PR', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
self.add_output('eff', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
self.add_output('Wc', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
class TestMap(unittest.TestCase):
def test_comp_map(self):
# create compressor map and save reference to options (for training data)
c = CompressorMap()
m = c.options
# add compressor map to problem
p = Problem()
p.model.add_subsystem('compmap', c)
p.setup()
# train metamodel
Nc = np.array([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1])
Rline = np.array([1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0])
alpha = np.array([0.0, 1.0])
Nc_mat, Rline_mat, alpha_mat = np.meshgrid(Nc, Rline, alpha, sparse=False)
m['train:Nc'] = Nc_mat.flatten()
m['train:Rline'] = Rline_mat.flatten()
m['train:alpha'] = alpha_mat.flatten()
m['train:PR'] = m['train:Nc']*m['train:Rline']+m['train:alpha']
m['train:eff'] = m['train:Nc']*m['train:Rline']**2+m['train:alpha']
m['train:Wc'] = m['train:Nc']**2*m['train:Rline']**2+m['train:alpha']
# check predicted values
p['compmap.Nc'] = 0.9
p['compmap.Rline'] = 2.0
p['compmap.alpha'] = 0.0
p.run_model()
tol = 1e-1
assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)
assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)
assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)
p['compmap.Nc'] = 0.95
p['compmap.Rline'] = 2.1
p['compmap.alpha'] = 0.0
p.run_model()
assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)
assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)
assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)
if __name__ == "__main__":
unittest.main()
| __init__ |
main.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT.
// [START aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync]
package main
import (
"context"
aiplatform "cloud.google.com/go/aiplatform/apiv1beta1"
aiplatformpb "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1"
)
func main() |
// [END aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync]
| {
ctx := context.Background()
c, err := aiplatform.NewJobClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &aiplatformpb.DeleteModelDeploymentMonitoringJobRequest{
// TODO: Fill request struct fields.
// See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1#DeleteModelDeploymentMonitoringJobRequest.
}
op, err := c.DeleteModelDeploymentMonitoringJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
err = op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
} |
color.go | package color
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/mattn/go-isatty"
"github.com/shiena/ansicolor"
)
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
var Output = ansicolor.NewAnsiColorWriter(os.Stdout)
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Print(a...) }
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) { c.Printf(format, a...) }
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) { c.Println(a...) }
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjuction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjuction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) |
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
func printColor(format string, p Attribute, a ...interface{}) {
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
c := &Color{params: []Attribute{p}}
c.Printf(format, a...)
}
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string {
return New(FgBlack).SprintfFunc()(format, a...)
}
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string {
return New(FgRed).SprintfFunc()(format, a...)
}
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string {
return New(FgGreen).SprintfFunc()(format, a...)
}
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string {
return New(FgYellow).SprintfFunc()(format, a...)
}
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string {
return New(FgBlue).SprintfFunc()(format, a...)
}
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return New(FgMagenta).SprintfFunc()(format, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string {
return New(FgCyan).SprintfFunc()(format, a...)
}
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string {
return New(FgWhite).SprintfFunc()(format, a...)
}
| { printColor(format, FgRed, a...) } |
GetFaceDetectionPaginator.ts | import { Rekognition } from "../Rekognition";
import { RekognitionClient } from "../RekognitionClient";
import {
GetFaceDetectionCommand,
GetFaceDetectionCommandInput,
GetFaceDetectionCommandOutput,
} from "../commands/GetFaceDetectionCommand";
import { RekognitionPaginationConfiguration } from "./Interfaces";
import { Paginator } from "@aws-sdk/types";
const makePagedClientRequest = async (
client: RekognitionClient,
input: GetFaceDetectionCommandInput,
...args: any
): Promise<GetFaceDetectionCommandOutput> => {
// @ts-ignore
return await client.send(new GetFaceDetectionCommand(input, ...args));
};
const makePagedRequest = async (
client: Rekognition,
input: GetFaceDetectionCommandInput,
...args: any
): Promise<GetFaceDetectionCommandOutput> => {
// @ts-ignore
return await client.getFaceDetection(input, ...args);
};
export async function* getFaceDetectionPaginate(
config: RekognitionPaginationConfiguration,
input: GetFaceDetectionCommandInput,
...additionalArguments: any
): Paginator<GetFaceDetectionCommandOutput> {
let token: string | undefined = config.startingToken || undefined;
let hasNext = true;
let page: GetFaceDetectionCommandOutput; | while (hasNext) {
input.NextToken = token;
input["MaxResults"] = config.pageSize;
if (config.client instanceof Rekognition) {
page = await makePagedRequest(config.client, input, ...additionalArguments);
} else if (config.client instanceof RekognitionClient) {
page = await makePagedClientRequest(config.client, input, ...additionalArguments);
} else {
throw new Error("Invalid client, expected Rekognition | RekognitionClient");
}
yield page;
token = page.NextToken;
hasNext = !!token;
}
// @ts-ignore
return undefined;
} | |
Icls.js | import xmljs from 'xml-js';
import Store from '../Model/Store';
import debugRenderer from 'debug';
import {autoCast, castString} from '../lib/utils';
let debug = debugRenderer('icls:parser:icls');
export default class IclsParser {
constructor () {
this._store = new Store();
}
_convertToUnderstandable (xml) {
return xmljs.xml2js(xml);
}
_buildTree (iclsXmlObj) {
let rootNode = iclsXmlObj.elements[0];
this._readTree(rootNode);
}
_readTree (el, ancestors = []) {
if (Array.isArray(el)) {
for (let element of el) {
this._readTree(element, ancestors);
}
return;
}
| this._parseChildNodes(el, ancestors);
}
_prepareNodeValues (values) {
return Object.keys(values).reduce((accumulator, curr) => {
accumulator[castString(curr)] = autoCast(values[curr]);
return accumulator;
}, {});
}
_parseSchemeNode (el, ancestors) {
if (el.name !== 'scheme' || el.attributes === undefined) {
return;
}
debug('Parse Scheme node');
this._setInStore(el.attributes, ancestors);
}
_parseOptionNode (el, ancestors) {
if (el.name !== 'option' || el.attributes === undefined) {
return;
}
debug('Parse Option %s node', el.attributes.name);
if (el.attributes.value === undefined && el.elements !== undefined) {
this._readTree(el.elements, [
...ancestors,
el.attributes.name
]);
return;
}
let attrs = {};
attrs[el.attributes.name] = el.attributes.value;
this._setInStore(attrs, ancestors);
}
_parseColorNode (el, ancestors) {
if (el.name !== 'colors' || el.elements === undefined) {
return;
}
debug('Parse Colors node');
this._readTree(
el.elements,
[
...ancestors,
'colors'
]
);
}
_parseAttributeNode (el, ancestors) {
if (el.name !== 'attributes' || el.elements === undefined) {
return;
}
debug('Parse Attributes node');
this._readTree(el.elements, ancestors);
}
_parseChildNodes (el, ancestors) {
debug('Parsing childnodes of %s', el.name);
if (ancestors.length) {
debug('Ancestors: %o', ancestors);
}
if (el.elements === undefined) {
return;
}
this._readTree(el.elements, ancestors);
}
getStore () {
return this._store;
}
parse (iclsXmlString) {
debug('Parsing started');
const iclsJsObj = this._convertToUnderstandable(iclsXmlString);
this._buildTree(iclsJsObj);
debug('Parsing Finished');
return this.getStore();
}
_setInStore (attrs, ancestors) {
attrs = this._prepareNodeValues(attrs);
for (let key in attrs) {
let storeKey = this._getStoreKey(key, ancestors);
this._store.set(storeKey, attrs[key]);
}
}
_getStoreKey (key, ancestors) {
let path = [
...ancestors,
key
];
return castString(path.join('.'));
}
} | this._parseSchemeNode(el, ancestors);
this._parseOptionNode(el, ancestors);
this._parseColorNode(el, ancestors);
this._parseAttributeNode(el, ancestors); |
control-card.js | import React from 'react';
import PropTypes from 'prop-types';
import Heading from '../heading';
import Icon from '../icon';
import Tooltip from '../tooltip';
export default class | extends React.Component {
static propTypes = {
/**
* The type of update to the card. Options are "close" and "collapse".
* Must have `onButtonClick` prop to enable action and button.
*/
buttonType: PropTypes.oneOf(['close', 'collapse']),
/** The content of the card. */
children: PropTypes.node.isRequired,
/**
* Called on click of update button. Must have `buttonType` prop to enable
* this action and button.
*/
onButtonClick: PropTypes.func,
/** Card title heading. */
title: PropTypes.string,
/**
* The card title heading variant. Options are "primary", "secondary",
* "tertiary", and "minor".
*/
titleSize: PropTypes.oneOf(['primary', 'secondary', 'tertiary', 'minor'])
};
static defaultProps = {
buttonType: 'close',
titleSize: 'tertiary'
};
onButtonClick = () => {
const { onButtonClick } = this.props;
if (onButtonClick) {
onButtonClick();
}
};
renderTitle() {
const { title, titleSize } = this.props;
if (!title) return null;
return (
<div className="mb24">
<Heading variant={titleSize}>{title}</Heading>
</div>
);
}
renderUpdateButton() {
const { onButtonClick, buttonType } = this.props;
if (!onButtonClick || !buttonType) return null;
let toolTipMessage = 'Close';
let updateIcon = 'close';
if (buttonType === 'collapse') {
toolTipMessage = 'Collapse';
updateIcon = 'caret-up';
}
return (
<div className="absolute top right mt18 mr18">
<Tooltip content={toolTipMessage} block={true}>
<button
aria-label={toolTipMessage}
type="button"
className="bg-transparent color-gray-dark color-blue-on-hover px0 py0"
onClick={onButtonClick}
>
<Icon name={updateIcon} />
</button>
</Tooltip>
</div>
);
}
render() {
const { children } = this.props;
return (
<section className="bg-white round-bold shadow-darken10-bold relative py36 py60-ml px18 px36-mm px60-ml">
{this.renderTitle()}
{this.renderUpdateButton()}
{children}
</section>
);
}
}
| ControlCard |
package.rs | use package::Version;
use package::PackageType;
/// A struct for storing package informations, such as its name and version.
#[derive(Debug, Serialize, Deserialize)]
pub struct Package {
name: String,
hash: u32,
version: Version,
package_type: PackageType,
path: String, // relative path
dependencies: Vec<Package>,
}
impl PartialEq for Package {
fn eq(&self, other: &Self) -> bool {
self.name == other.name &&
self.version == other.version
}
}
impl Package {
/// Returns the name of the package.
pub fn | (&self) -> &str {
&self.name
}
/// Returns the version of the package.
pub fn version(&self) -> &Version {
&self.version
}
/// Returns the hash of the package.
pub fn hash(&self) -> u32 {
self.hash
}
/// Returns the type of the package.
pub fn package_type(&self) -> &PackageType {
&self.package_type
}
} | name |
http2.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package conv
import (
"context"
"errors"
"github.com/alipay/sofa-mosn/pkg/protocol"
"github.com/alipay/sofa-mosn/pkg/protocol/http2"
"github.com/alipay/sofa-mosn/pkg/types"
)
func init() {
protocol.RegisterCommonConv(protocol.HTTP2, &common2http{}, &http2common{})
}
// common -> http2 converter
type common2http struct{}
func (c *common2http) ConvHeader(ctx context.Context, headerMap types.HeaderMap) (types.HeaderMap, error) {
if header, ok := headerMap.(protocol.CommonHeader); ok {
cheader := make(map[string]string, len(header))
delete(header, protocol.MosnHeaderDirection)
// copy headers | for k, v := range header {
cheader[k] = v
}
return protocol.CommonHeader(cheader), nil
}
return nil, errors.New("header type not supported")
}
func (c *common2http) ConvData(ctx context.Context, buffer types.IoBuffer) (types.IoBuffer, error) {
return buffer, nil
}
func (c *common2http) ConvTrailer(ctx context.Context, headerMap types.HeaderMap) (types.HeaderMap, error) {
return headerMap, nil
}
// http2 -> common converter
type http2common struct{}
func (c *http2common) ConvHeader(ctx context.Context, headerMap types.HeaderMap) (types.HeaderMap, error) {
headers := http2.DecodeHeader(headerMap)
direction := ""
switch headerMap.(type) {
case *http2.ReqHeader:
direction = protocol.Request
case *http2.RspHeader:
direction = protocol.Response
default:
return nil, errors.New("header type not supported")
}
headers.Set(protocol.MosnHeaderDirection, direction)
return headers, nil
}
func (c *http2common) ConvData(ctx context.Context, buffer types.IoBuffer) (types.IoBuffer, error) {
return buffer, nil
}
func (c *http2common) ConvTrailer(ctx context.Context, headerMap types.HeaderMap) (types.HeaderMap, error) {
return headerMap, nil
} | |
login.js | let fs = require('fs');
const electron = window.require("electron");
const win = electron.remote.getCurrentWindow();
let username;
let password;
checkUserInfo();
$(document).ready(function(){
$( ".btn" ).click(function( ) {
username = $("#username").val();
password = $("#password").val();
checkAccount();
});
$(".btn2").click(function(){
win.loadFile("register.html");
});
});
function checkUserInfo(){
const app = electron.remote.app;
let base = app.getAppPath();
fs.readFile(base + "/user.json", "utf8", (err, data) => {
if (err) {
console.log("doesn't exsist");
}else{
let library = JSON.parse(data.toString());
pullLibrary(library.Library);
}
});
}
function | () {
setTimeout(function () {
$.get("http://162.208.8.88/log.php", {Username: username, Password: password})
.done(function (data) {
var response = JSON.parse(data.toString());
if (response.logged == true) {
Alert.info("Success", "Logged in successfully!");
setTimeout(function(){
pullLibrary(response.library);
}, 1000);
} else {
console.log(data.toString());
Alert.error("Failed", "invalid login credentials")
}
});
}, 500);
}
function pullLibrary(library){
console.log(library);
const app = electron.remote.app;
var basepath = app.getAppPath();
let info = {"Username": username, "Password": password, "Library": library};
$.post('http://162.208.8.88/pull.php', {library: library},
function (data) {
console.log(data);
fs.writeFile(basepath + "/movies.json", data.toString(), (err, result) => { // WRITE
if (err) {
return console.error(err);
} else {
console.log("success");
let info = {Username: username, Password: password, Library: library}
setTimeout(function(){
fs.writeFile(basepath + "/user.json", JSON.stringify(info, null, 2), (err, result) => {
if (err) {
return console.error(err);
} else {
win.loadFile("index.html");
console.log("success again");
}
});
}, 300)
}
});
});
}
var Alert = undefined;
(function(Alert) {
var alert, error, info, success, warning, _container;
info = function(message, title, options) {
return alert("info", message, title, "icon-info-sign", options);
};
warning = function(message, title, options) {
return alert("warning", message, title, "icon-warning-sign", options);
};
error = function(message, title, options) {
return alert("error", message, title, "icon-minus-sign", options);
};
success = function(message, title, options) {
return alert("success", message, title, "icon-ok-sign", options);
};
alert = function(type, message, title, icon, options) {
var alertElem, messageElem, titleElem, iconElem, innerElem, _container;
if (typeof options === "undefined") {
options = {};
}
options = $.extend({}, Alert.defaults, options);
if (!_container) {
_container = $("#alerts");
if (_container.length === 0) {
_container = $("<ul>").attr("id", "alerts").appendTo($("body"));
}
}
if (options.width) {
_container.css({
width: options.width
});
}
alertElem = $("<li>").addClass("alert").addClass("alert-" + type);
setTimeout(function() {
alertElem.addClass('open');
}, 1);
if (icon) {
iconElem = $("<i>").addClass(icon);
alertElem.append(iconElem);
}
innerElem = $("<div>").addClass("alert-block");
alertElem.append(innerElem);
if (title) {
titleElem = $("<div>").addClass("alert-title").append(title);
innerElem.append(titleElem);
}
if (message) {
messageElem = $("<div>").addClass("alert-message").append(message);
innerElem.append(messageElem);
}
if (options.displayDuration > 0) {
setTimeout((function() {
leave();
}), options.displayDuration);
} else {
innerElem.append("<em>Click to Dismiss</em>");
}
alertElem.on("click", function() {
leave();
});
function leave() {
alertElem.removeClass('open');
alertElem.one('webkitTransitionEnd otransitionend oTransitionEnd msTransitionEnd transitionend', function() { return alertElem.remove(); });
}
return _container.prepend(alertElem);
};
Alert.defaults = {
width: "",
icon: "",
displayDuration: 3000,
pos: ""
};
Alert.info = info;
Alert.warning = warning;
Alert.error = error;
Alert.success = success;
return _container = void 0;
})(Alert || (Alert = {}));
this.Alert = Alert;
$('#test').on('click', function() {
Alert.info('Message');
});
jQuery.fn.swap = function(b){
// method from: http://blog.pengoworks.com/index.cfm/2008/9/24/A-quick-and-dirty-swap-method-for-jQuery
b = jQuery(b)[0];
var a = this[0];
var t = a.parentNode.insertBefore(document.createTextNode(''), a);
b.parentNode.insertBefore(a, b);
t.parentNode.insertBefore(b, t);
t.parentNode.removeChild(t);
return this;
};
| checkAccount |
ecs_guide.rs | use bevy::{
app::{AppExit, ScheduleRunnerPlugin, ScheduleRunnerSettings},
ecs::schedule::ReportExecutionOrderAmbiguities,
log::LogPlugin,
prelude::*,
utils::Duration,
};
use rand::random;
/// This is a guided introduction to Bevy's "Entity Component System" (ECS)
/// All Bevy app logic is built using the ECS pattern, so definitely pay attention!
///
/// Why ECS?
/// * Data oriented: Functionality is driven by data
/// * Clean Architecture: Loose coupling of functionality / prevents deeply nested inheritance
/// * High Performance: Massively parallel and cache friendly
///
/// ECS Definitions:
///
/// Component: just a normal Rust data type. generally scoped to a single piece of functionality
/// Examples: position, velocity, health, color, name
///
/// Entity: a collection of components with a unique id
/// Examples: Entity1 { Name("Alice"), Position(0, 0) }, Entity2 { Name("Bill"), Position(10, 5)
/// }
/// Resource: a shared global piece of data
/// Examples: asset_storage, events, system state
///
/// System: runs logic on entities, components, and resources
/// Examples: move_system, damage_system
///
/// Now that you know a little bit about ECS, lets look at some Bevy code!
/// We will now make a simple "game" to illustrate what Bevy's ECS looks like in practice.
// COMPONENTS: Pieces of functionality we add to entities. These are just normal Rust data types
//
// Our game will have a number of "players". Each player has a name that identifies them
struct Player {
name: String,
}
// Each player also has a score. This component holds on to that score
struct Score {
value: usize,
}
// RESOURCES: "Global" state accessible by systems. These are also just normal Rust data types!
//
// This resource holds information about the game:
#[derive(Default)]
struct GameState {
current_round: usize,
total_players: usize,
winning_player: Option<String>,
}
// This resource provides rules for our "game".
struct GameRules {
winning_score: usize,
max_rounds: usize,
max_players: usize,
}
// SYSTEMS: Logic that runs on entities, components, and resources. These generally run once each
// time the app updates.
//
// This is the simplest type of system. It just prints "This game is fun!" on each run:
fn print_message_system() {
println!("This game is fun!");
}
// Systems can also read and modify resources. This system starts a new "round" on each update:
// NOTE: "mut" denotes that the resource is "mutable"
// Res<GameRules> is read-only. ResMut<GameState> can modify the resource
fn new_round_system(game_rules: Res<GameRules>, mut game_state: ResMut<GameState>) {
game_state.current_round += 1;
println!(
"Begin round {} of {}",
game_state.current_round, game_rules.max_rounds
);
}
// This system updates the score for each entity with the "Player" and "Score" component.
fn score_system(mut query: Query<(&Player, &mut Score)>) {
for (player, mut score) in query.iter_mut() {
let scored_a_point = random::<bool>();
if scored_a_point {
score.value += 1;
println!(
"{} scored a point! Their score is: {}",
player.name, score.value
);
} else {
println!(
"{} did not score a point! Their score is: {}",
player.name, score.value
);
}
}
// this game isn't very fun is it :)
}
// This system runs on all entities with the "Player" and "Score" components, but it also
// accesses the "GameRules" resource to determine if a player has won.
fn score_check_system(
game_rules: Res<GameRules>,
mut game_state: ResMut<GameState>,
query: Query<(&Player, &Score)>,
) {
for (player, score) in query.iter() {
if score.value == game_rules.winning_score {
game_state.winning_player = Some(player.name.clone());
}
}
}
// This system ends the game if we meet the right conditions. This fires an AppExit event, which
// tells our App to quit. Check out the "event.rs" example if you want to learn more about using
// events.
fn game_over_system(
game_rules: Res<GameRules>,
game_state: Res<GameState>,
mut app_exit_events: EventWriter<AppExit>,
) {
if let Some(ref player) = game_state.winning_player {
println!("{} won the game!", player);
app_exit_events.send(AppExit);
} else if game_state.current_round == game_rules.max_rounds {
println!("Ran out of rounds. Nobody wins!");
app_exit_events.send(AppExit);
}
println!();
}
// This is a "startup" system that runs exactly once when the app starts up. Startup systems are
// generally used to create the initial "state" of our game. The only thing that distinguishes a
// "startup" system from a "normal" system is how it is registered: Startup:
// app.add_startup_system(startup_system) Normal: app.add_system(normal_system)
fn startup_system(mut commands: Commands, mut game_state: ResMut<GameState>) {
// Create our game rules resource
commands.insert_resource(GameRules {
max_rounds: 10,
winning_score: 4,
max_players: 4,
});
// Add some players to our world. Players start with a score of 0 ... we want our game to be
// fair!
commands.spawn_batch(vec![
(
Player {
name: "Alice".to_string(),
},
Score { value: 0 },
),
(
Player {
name: "Bob".to_string(),
},
Score { value: 0 },
),
]);
// set the total players to "2"
game_state.total_players = 2;
}
// This system uses a command buffer to (potentially) add a new player to our game on each
// iteration. Normal systems cannot safely access the World instance directly because they run in
// parallel. Our World contains all of our components, so mutating arbitrary parts of it in parallel
// is not thread safe. Command buffers give us the ability to queue up changes to our World without
// directly accessing it
fn new_player_system(
mut commands: Commands,
game_rules: Res<GameRules>,
mut game_state: ResMut<GameState>,
) {
// Randomly add a new player
let add_new_player = random::<bool>();
if add_new_player && game_state.total_players < game_rules.max_players {
game_state.total_players += 1;
commands.spawn_bundle((
Player {
name: format!("Player {}", game_state.total_players),
},
Score { value: 0 },
));
println!("Player {} joined the game!", game_state.total_players);
}
}
// If you really need full, immediate read/write access to the world or resources, you can use a
// "thread local system". These run on the main app thread (hence the name "thread local")
// WARNING: These will block all parallel execution of other systems until they finish, so they
// should generally be avoided if you care about performance
#[allow(dead_code)]
fn thread_local_system(world: &mut World) {
// this does the same thing as "new_player_system"
let total_players = world.get_resource_mut::<GameState>().unwrap().total_players;
let should_add_player = {
let game_rules = world.get_resource::<GameRules>().unwrap();
let add_new_player = random::<bool>();
add_new_player && total_players < game_rules.max_players
};
// Randomly add a new player
if should_add_player {
world.spawn().insert_bundle((
Player {
name: format!("Player {}", total_players),
},
Score { value: 0 },
));
let mut game_state = world.get_resource_mut::<GameState>().unwrap();
game_state.total_players += 1;
}
}
// Sometimes systems need their own unique "local" state. Bevy's ECS provides Local<T> resources for
// this case. Local<T> resources are unique to their system and are automatically initialized on
// your behalf (if they don't already exist). If you have a system's id, you can also access local
// resources directly in the Resources collection using `Resources::get_local()`. In general you
// should only need this feature in the following cases: 1. You have multiple instances of the same
// system and they each need their own unique state 2. You already have a global version of a
// resource that you don't want to overwrite for your current system 3. You are too lazy to
// register the system's resource as a global resource
#[derive(Default)]
struct State {
counter: usize,
}
// NOTE: this doesn't do anything relevant to our game, it is just here for illustrative purposes
#[allow(dead_code)]
fn local_state_system(mut state: Local<State>, query: Query<(&Player, &Score)>) {
for (player, score) in query.iter() {
println!("processed: {} {}", player.name, score.value);
}
println!("this system ran {} times", state.counter);
state.counter += 1;
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, StageLabel)]
enum MyStage {
BeforeRound,
AfterRound,
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, SystemLabel)]
enum MyLabels {
ScoreCheck,
}
// Our Bevy app's entry point
fn main() {
// Bevy apps are created using the builder pattern. We use the builder to add systems,
// resources, and plugins to our app
App::new()
// Resources can be added to our app like this
.insert_resource(State { counter: 0 })
// Some systems are configured by adding their settings as a resource
.insert_resource(ScheduleRunnerSettings::run_loop(Duration::from_secs(5)))
// Plugins are just a grouped set of app builder calls (just like we're doing here).
// We could easily turn our game into a plugin, but you can check out the plugin example for
// that :) The plugin below runs our app's "system schedule" once every 5 seconds
// (configured above).
.add_plugin(ScheduleRunnerPlugin::default())
// Resources that implement the Default or FromResources trait can be added like this:
.init_resource::<GameState>()
// Startup systems run exactly once BEFORE all other systems. These are generally used for
// app initialization code (ex: adding entities and resources)
.add_startup_system(startup_system)
// my_system calls converts normal rust functions into ECS systems:
.add_system(print_message_system)
// SYSTEM EXECUTION ORDER
//
// Each system belongs to a `Stage`, which controls the execution strategy and broad order
// of the systems within each tick. Startup stages (which startup systems are
// registered in) will always complete before ordinary stages begin,
// and every system in a stage must complete before the next stage advances.
// Once every stage has concluded, the main loop is complete and begins again.
//
// By default, all systems run in parallel, except when they require mutable access to a
// piece of data. This is efficient, but sometimes order matters.
// For example, we want our "game over" system to execute after all other systems to ensure
// we don't accidentally run the game for an extra round.
//
// Rather than splitting each of your systems into separate stages, you should force an
// explicit ordering between them by giving the relevant systems a label with
// `.label`, then using the `.before` or `.after` methods. Systems will not be
// scheduled until all of the systems that they have an "ordering dependency" on have
// completed.
//
// Doing that will, in just about all cases, lead to better performance compared to
// splitting systems between stages, because it gives the scheduling algorithm more
// opportunities to run systems in parallel.
// Stages are still necessary, however: end of a stage is a hard sync point
// (meaning, no systems are running) where `Commands` issued by systems are processed.
// This is required because commands can perform operations that are incompatible with
// having systems in flight, such as spawning or deleting entities,
// adding or removing resources, etc.
//
// add_system(system) adds systems to the UPDATE stage by default
// However we can manually specify the stage if we want to. The following is equivalent to
// add_system(score_system)
.add_system_to_stage(CoreStage::Update, score_system)
// We can also create new stages. Here is what our games stage order will look like:
// "before_round": new_player_system, new_round_system
// "update": print_message_system, score_system
// "after_round": score_check_system, game_over_system
.add_stage_before(
CoreStage::Update,
MyStage::BeforeRound,
SystemStage::parallel(),
)
.add_stage_after(
CoreStage::Update,
MyStage::AfterRound,
SystemStage::parallel(),
)
.add_system_to_stage(MyStage::BeforeRound, new_round_system)
.add_system_to_stage(MyStage::BeforeRound, new_player_system) | MyStage::AfterRound,
score_check_system.label(MyLabels::ScoreCheck),
)
.add_system_to_stage(
MyStage::AfterRound,
game_over_system.after(MyLabels::ScoreCheck),
)
// We can check our systems for execution order ambiguities by examining the output produced
// in the console by using the `LogPlugin` and adding the following Resource to our App :)
// Be aware that not everything reported by this checker is a potential problem, you'll have
// to make that judgement yourself.
.add_plugin(LogPlugin::default())
.insert_resource(ReportExecutionOrderAmbiguities)
// This call to run() starts the app we just built!
.run();
} | // We can ensure that game_over system runs after score_check_system using explicit ordering
// constraints First, we label the system we want to refer to using `.label`
// Then, we use either `.before` or `.after` to describe the order we want the relationship
.add_system_to_stage( |
exe018.py | from random import shuffle
n0 = input('1ª Aluna: ')
n1 = input('2ª Aluna: ')
n2 = input('3ª Aluna: ')
n3 = input('4ª Aluna: ')
l = [n0, n1, n2, n3]
shuffle(l) |
print('A ordem dos alunos é {}'.format(l)) |
|
lib.rs | #![no_std]
extern crate alloc;
use alloc::string::String;
use contract_ffi::{
contract_api::{account, runtime, system, Error},
key::Key,
unwrap_or_revert::UnwrapOrRevert,
value::{account::PurseId, U512},
};
enum | {
Amount = 0,
Name = 1,
}
#[no_mangle]
pub extern "C" fn call() {
let amount: U512 = runtime::get_arg(Arg::Amount as u32)
.unwrap_or_revert_with(Error::MissingArgument)
.unwrap_or_revert_with(Error::InvalidArgument);
let name: String = runtime::get_arg(Arg::Name as u32)
.unwrap_or_revert_with(Error::MissingArgument)
.unwrap_or_revert_with(Error::InvalidArgument);
let main_purse: PurseId = account::get_main_purse();
let new_purse: PurseId = system::create_purse();
system::transfer_from_purse_to_purse(main_purse, new_purse, amount).unwrap_or_revert();
let new_purse_key: Key = new_purse.value().into();
runtime::put_key(&name, &new_purse_key);
}
| Arg |
lib.rs | pub mod reverse_resolver;
pub mod service;
pub mod upsert_util; | pub mod upserter; | |
webrtc.js | var localVideo;
var localStream;
var remoteVideo;
var remoteStream;
var peerConnection;
var uuid;
var serverConnection;
var movement_result;
var peerConnectionConfig = {
'iceServers': [
{ 'urls': 'stun:stun.stunprotocol.org:3478' },
{ 'urls': 'stun:stun.l.google.com:19302' },
{ 'urls': 'stun:relay.backups.cz' },
{
url: 'turn:relay.backups.cz',
credential: 'webrtc',
username: 'webrtc'
},
{
url: 'turn:relay.backups.cz?transport=tcp',
credential: 'webrtc',
username: 'webrtc'
}
]
};
function pageReady() {
uuid = createUUID();
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
movement_result = document.getElementById('result');
address = window.location.hostname;
//use this if you want to add some domain
// if (address.includes('localhost')) {
// serverConnection = new WebSocket('wss://' + address + ':8443');
// }
// else {
// serverConnection = new WebSocket('wss://' + window.location.hostname);
// }
//use this if you only use public IP
//also comment this if you use code above it
serverConnection = new WebSocket('wss://' + address + ':8443');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: { width: 1080, height: 633 },
audio: true,
};
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia(constraints).then(getUserMediaSuccess).catch(errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function button_click(element) {
serverConnection.send(JSON.stringify({ 'movement': element.id }));
movement_result.innerHTML = element.id;
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.srcObject = stream;
remoteStream = new MediaStream();
}
function start(isCaller) {
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.ontrack = gotRemoteStream;
peerConnection.addStream(localStream);
if (isCaller) {
peerConnection.createOffer().then(createdDescription).catch(errorHandler);
}
}
function gotMessageFromServer(message) {
if (!peerConnection) start(false);
var signal = JSON.parse(message.data);
// Ignore messages from ourself
if (signal.uuid == uuid) return;
if (signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp)).then(function () {
// Only create answers in response to offers
if (signal.sdp.type == 'offer') {
peerConnection.createAnswer().then(createdDescription).catch(errorHandler);
}
}).catch(errorHandler);
} else if (signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice)).catch(errorHandler);
}
}
function gotIceCandidate(event) {
if (event.candidate != null) {
serverConnection.send(JSON.stringify({ 'ice': event.candidate, 'uuid': uuid }));
}
}
function createdDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description).then(function () {
serverConnection.send(JSON.stringify({ 'sdp': peerConnection.localDescription, 'uuid': uuid }));
}).catch(errorHandler);
}
function gotRemoteStream(event) {
console.log('got remote stream');
event.streams[0].getTracks().forEach((track) => {
remoteStream.addTrack(track);
});
remoteVideo.srcObject = remoteStream;
}
function errorHandler(error) {
console.log(error);
}
// Taken from http://stackoverflow.com/a/105074/515584
// Strictly speaking, it's not a real UUID, but it gets the job done here
function createUUID() { | return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();
} | function s4() {
return Math.floor((1 + Math.random()) * 0x10000).toString(16).substring(1);
}
|
vpnsitelinkconnections.go | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// VpnSiteLinkConnectionsClient is the network Client
type VpnSiteLinkConnectionsClient struct {
BaseClient
}
// NewVpnSiteLinkConnectionsClient creates an instance of the VpnSiteLinkConnectionsClient client.
func | (subscriptionID string) VpnSiteLinkConnectionsClient {
return NewVpnSiteLinkConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewVpnSiteLinkConnectionsClientWithBaseURI creates an instance of the VpnSiteLinkConnectionsClient client using a
// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds,
// Azure stack).
func NewVpnSiteLinkConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VpnSiteLinkConnectionsClient {
return VpnSiteLinkConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Get retrieves the details of a vpn site link connection.
// Parameters:
// resourceGroupName - the resource group name of the VpnGateway.
// gatewayName - the name of the gateway.
// connectionName - the name of the vpn connection.
// linkConnectionName - the name of the vpn connection.
func (client VpnSiteLinkConnectionsClient) Get(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string) (result VpnSiteLinkConnection, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/VpnSiteLinkConnectionsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, gatewayName, connectionName, linkConnectionName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSiteLinkConnectionsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.VpnSiteLinkConnectionsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VpnSiteLinkConnectionsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client VpnSiteLinkConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, gatewayName string, connectionName string, linkConnectionName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"connectionName": autorest.Encode("path", connectionName),
"gatewayName": autorest.Encode("path", gatewayName),
"linkConnectionName": autorest.Encode("path", linkConnectionName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-12-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client VpnSiteLinkConnectionsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client VpnSiteLinkConnectionsClient) GetResponder(resp *http.Response) (result VpnSiteLinkConnection, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| NewVpnSiteLinkConnectionsClient |
urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin | admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('imagr_users.urls'))
) | |
JoshirakuBD_NCED2.py | from typing import Tuple, Union
import vapoursynth as vs
from lvsfunc.misc import source
from vardautomation import FileInfo, PresetAAC, PresetBD, VPath
from project_module import encoder as enc, flt
core = vs.core
core.num_threads = 4
# Sources
JP_NCED = FileInfo(r'BDMV/120926_JOSHIRAKU_VOL1/BDMV/STREAM/00002.m2ts', (24, -24),
idx=lambda x: source(x, cachedir=''))
JP_BD_13 = FileInfo(r'BDMV/130522_JOSHIRAKU_VOL6/BDMV/STREAM/00000.m2ts', (101493, -29),
idx=lambda x: source(x, cachedir=''),
preset=[PresetBD, PresetAAC])
JP_BD_13.name_file_final = VPath(fr"premux/{JP_NCED.name} (Premux).mkv")
def | () -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
"""Main filterchain"""
import havsfunc as haf
import lvsfunc as lvf
import rekt
import vardefunc as vdf
from adptvgrnMod import adptvgrnMod
from awsmfunc import bbmod
from ccd import ccd
from vsutil import depth, get_y
from xvs import WarpFixChromaBlend
src = JP_NCED.clip_cut
src_13 = JP_BD_13.clip_cut
src = lvf.rfs(src, src_13, [(2073, None)])
# Edgefixing
rkt = rekt.rektlvls(
src,
[0, 1079], [17, 16],
[0, 1, 2, 3] + [1917, 1918, 1919], [16, 4, -2, 2] + [-2, 5, 14]
)
ef = bbmod(rkt, left=4, right=3, y=False)
ef = depth(ef, 32)
# Descaling + Rescaling
src_y = get_y(ef)
descaled = lvf.kernels.Bicubic().descale(src_y, 1280, 720)
rescaled = vdf.scale.nnedi3_upscale(descaled)
downscaled = lvf.kernels.Bicubic(-1/2, 1/4).scale(rescaled, 1920, 1080)
l_mask = vdf.mask.FDOG().get_mask(src_y, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum()
l_mask = l_mask.std.Median().std.Convolution([1] * 9)
rescaled_masked = core.std.MaskedMerge(src_y, downscaled, l_mask)
scaled = depth(vdf.misc.merge_chroma(rescaled_masked, ef), 16)
unwarp = flt.line_darkening(scaled, 0.145).warp.AWarpSharp2(depth=2)
sharp = haf.LSFmod(unwarp, strength=65, Smode=3, Lmode=1, edgemode=1, edgemaskHQ=True)
mask_sharp = core.std.MaskedMerge(scaled, sharp, depth(l_mask, 16))
upscaled = lvf.kernels.Bicubic().scale(descaled, 1920, 1080)
descale_mask = lvf.scale.descale_detail_mask(src_y, upscaled)
details_merged = core.std.MaskedMerge(mask_sharp, depth(ef, 16), depth(descale_mask, 16))
# Denoising
denoise_y = core.knlm.KNLMeansCL(details_merged, d=1, a=3, s=4, h=0.15, channels='Y')
denoise_uv = ccd(denoise_y, threshold=6, matrix='709')
stab = haf.GSMC(denoise_uv, radius=2, adapt=1, planes=[0])
decs = vdf.noise.decsiz(stab, sigmaS=8, min_in=208 << 8, max_in=232 << 8)
# Fixing chroma
cshift = haf.FixChromaBleedingMod(decs, cx=-.25, cy=0, thr=100, strength=1, blur=True)
cwarp = WarpFixChromaBlend(cshift, thresh=88, blur=3, depth=6)
# Regular debanding + graining
detail_mask = flt.detail_mask(cwarp, brz=(1800, 3500))
deband = vdf.deband.dumb3kdb(cwarp, threshold=32, grain=16)
deband_masked = core.std.MaskedMerge(deband, cwarp, detail_mask)
grain: vs.VideoNode = adptvgrnMod(deband_masked, 0.2, luma_scaling=10, size=1.35, static=True, grain_chroma=False)
return grain
if __name__ == '__main__':
FILTERED = filterchain()
enc.Patcher(JP_BD_13, FILTERED).patch( # type: ignore
ranges=[(1794, 2157)],
external_file=f"premux/{JP_NCED.name[:-1]}1 (Premux).mkv",
clean_up=True)
elif __name__ == '__vapoursynth__':
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
raise ImportError(
f"Input clip has multiple output nodes ({len(FILTERED)})! Please output just 1 clip"
)
else:
enc.dither_down(FILTERED).set_output(0)
else:
JP_NCED.clip_cut.std.SetFrameProp('node', intval=0).set_output(0)
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1):
clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
else:
FILTERED.std.SetFrameProp('node', intval=1).set_output(1)
| filterchain |
kube_config_test.go | package kubernetes
import (
"fmt"
"os"
"path/filepath"
"reflect"
"testing"
)
func TestParseKubeConfig(t *testing.T) {
testCases := []struct {
sourceFile string
expected KubeConfig
checkFunc func(expected KubeConfig, config string) (bool, error)
}{
{
"user_with_token.yml",
KubeConfig{
KubeConfigBase: KubeConfigBase{
APIVersion: "v1",
Clusters: []clusterItem{
{
Name: "test-cluster",
Cluster: cluster{
Server: "https://testcluster.net:8080",
},
},
},
Kind: "Config",
},
Users: []userItem{
{
Name: "test-user",
User: user{
Token: "test-token",
},
},
},
},
isValidConfig,
},
{
"user_with_cert.yml",
KubeConfig{
KubeConfigBase: KubeConfigBase{
APIVersion: "v1",
Clusters: []clusterItem{
{
Name: "test-cluster",
Cluster: cluster{
ClusterAuthorityData: "test-cluster-authority-data",
Server: "https://testcluster.org:443",
},
},
},
Contexts: []contextItem{
{
Name: "test-cluster",
Context: context{
Cluster: "test-cluster",
User: "test-user",
Namespace: "test-namespace",
},
},
},
CurrentContext: "test-cluster",
Kind: "Config",
Preferences: nil,
},
Users: []userItem{
{
Name: "test-user",
User: user{
ClientCertificteData: "test-client-certificate-data",
ClientKeyData: "test-client-key-data",
},
},
},
},
isValidConfig,
},
{
"user_with_cert_token.yml",
KubeConfig{
KubeConfigBase: KubeConfigBase{
APIVersion: "v1",
Clusters: []clusterItem{
{
Name: "test-cluster",
Cluster: cluster{ | Server: "https://testcluster.org:443",
},
},
},
Contexts: []contextItem{
{
Name: "test-cluster",
Context: context{
Cluster: "test-cluster",
User: "test-user",
Namespace: "test-namespace",
},
},
},
CurrentContext: "test-cluster",
Kind: "Config",
Preferences: map[string]interface{}{
"colors": true,
},
},
Users: []userItem{
{
Name: "test-user",
User: user{
ClientCertificteData: "test-client-certificate-data",
ClientKeyData: "test-client-key-data",
Token: "test-token",
},
},
},
},
isValidConfig,
},
{
"user_with_no_auth.yml",
KubeConfig{},
isInvalidConfig,
},
{
"no_cluster.yml",
KubeConfig{},
isInvalidConfig,
},
{
"no_user.yml",
KubeConfig{},
isInvalidConfig,
},
{
"user_with_partial_auth.yml",
KubeConfig{},
isInvalidConfig,
},
{
"cluster_with_no_server.yml",
KubeConfig{},
isInvalidConfig,
},
}
for i, test := range testCases {
encodedConfig := LoadConfig(test.sourceFile)
if len(encodedConfig) == 0 {
t.Fatalf("Test case [%d]: Failed to read config from file '%+v' \n",
i, test.sourceFile)
}
if success, err := test.checkFunc(test.expected, encodedConfig); !success {
t.Fatalf("Test case [%d]: Failed, config '%+v' with error: '%+v'",
i, test.sourceFile, err)
}
}
}
func isValidConfig(expected KubeConfig, encodedConfig string) (bool, error) {
result, err := ParseKubeConfig(encodedConfig)
if err != nil {
return false, err
}
if !reflect.DeepEqual(expected, *result) {
return false, fmt.Errorf("expected '%+v but got '%+v' with encoded config '%+v'",
expected, *result, encodedConfig)
}
return true, nil
}
func isInvalidConfig(_ KubeConfig, encodedConfig string) (bool, error) {
if _, err := ParseKubeConfig(encodedConfig); err == nil {
return false, fmt.Errorf("expected test to throw error but didn't")
}
return true, nil
}
func LoadConfig(fileName string) string {
filePath := filepath.Join("testdata", fileName)
bytes, err := os.ReadFile(filePath)
if err != nil {
return ""
}
return string(bytes)
} | ClusterAuthorityData: "test-cluster-authority-data", |
target_apps_request_builder.go | package targetapps
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
)
// TargetAppsRequestBuilder provides operations to call the targetApps method.
type TargetAppsRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// TargetAppsRequestBuilderPostRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type TargetAppsRequestBuilderPostRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// NewTargetAppsRequestBuilderInternal instantiates a new TargetAppsRequestBuilder and sets the default values.
func NewTargetAppsRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*TargetAppsRequestBuilder) {
m := &TargetAppsRequestBuilder{
}
m.urlTemplate = "{+baseurl}/deviceAppManagement/managedAppRegistrations/{managedAppRegistration%2Did}/intendedPolicies/{managedAppPolicy%2Did}/microsoft.graph.targetedManagedAppProtection/microsoft.graph.targetApps";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewTargetAppsRequestBuilder instantiates a new TargetAppsRequestBuilder and sets the default values.
func NewTargetAppsRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*TargetAppsRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewTargetAppsRequestBuilderInternal(urlParams, requestAdapter)
}
// CreatePostRequestInformation invoke action targetApps
func (m *TargetAppsRequestBuilder) CreatePostRequestInformation(body TargetAppsRequestBodyable)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) { | // CreatePostRequestInformationWithRequestConfiguration invoke action targetApps
func (m *TargetAppsRequestBuilder) CreatePostRequestInformationWithRequestConfiguration(body TargetAppsRequestBodyable, requestConfiguration *TargetAppsRequestBuilderPostRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", body)
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Post invoke action targetApps
func (m *TargetAppsRequestBuilder) Post(body TargetAppsRequestBodyable)(error) {
return m.PostWithRequestConfigurationAndResponseHandler(body, nil, nil);
}
// PostWithRequestConfigurationAndResponseHandler invoke action targetApps
func (m *TargetAppsRequestBuilder) PostWithRequestConfigurationAndResponseHandler(body TargetAppsRequestBodyable, requestConfiguration *TargetAppsRequestBuilderPostRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(error) {
requestInfo, err := m.CreatePostRequestInformationWithRequestConfiguration(body, requestConfiguration);
if err != nil {
return err
}
err = m.requestAdapter.SendNoContentAsync(requestInfo, responseHandler, nil)
if err != nil {
return err
}
return nil
} | return m.CreatePostRequestInformationWithRequestConfiguration(body, nil);
} |
factory.go | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package googlecloudexporter
import (
"context"
"time"
"github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/exporter/exporterhelper"
)
const (
// The value of "type" key in configuration.
typeStr = "googlecloud"
defaultTimeout = 12 * time.Second // Consistent with Cloud Monitoring's timeout
)
// NewFactory creates a factory for the googlecloud exporter
func NewFactory() component.ExporterFactory {
return component.NewExporterFactory(
typeStr,
createDefaultConfig,
component.WithTracesExporter(createTracesExporter),
component.WithMetricsExporter(createMetricsExporter),
)
}
// createDefaultConfig creates the default configuration for exporter.
func createDefaultConfig() config.Exporter {
cfg := &Config{
ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)),
TimeoutSettings: exporterhelper.TimeoutSettings{Timeout: defaultTimeout},
RetrySettings: exporterhelper.NewDefaultRetrySettings(),
QueueSettings: exporterhelper.NewDefaultQueueSettings(),
Config: collector.DefaultConfig(),
}
return cfg
}
// createTracesExporter creates a trace exporter based on this config.
func createTracesExporter(
_ context.Context,
params component.ExporterCreateSettings,
cfg config.Exporter) (component.TracesExporter, error) {
eCfg := cfg.(*Config)
tExp, err := collector.NewGoogleCloudTracesExporter(eCfg.Config, params.BuildInfo.Version, eCfg.Timeout)
if err != nil {
return nil, err
}
return exporterhelper.NewTracesExporter(
cfg,
params,
tExp.PushTraces,
exporterhelper.WithShutdown(tExp.Shutdown),
// Disable exporterhelper Timeout, since we are using a custom mechanism
// within exporter itself
exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}),
exporterhelper.WithQueue(eCfg.QueueSettings),
exporterhelper.WithRetry(eCfg.RetrySettings))
}
// createMetricsExporter creates a metrics exporter based on this config.
func createMetricsExporter(
ctx context.Context,
params component.ExporterCreateSettings,
cfg config.Exporter) (component.MetricsExporter, error) {
eCfg := cfg.(*Config)
mExp, err := collector.NewGoogleCloudMetricsExporter(ctx, eCfg.Config, params.TelemetrySettings.Logger, params.BuildInfo.Version, eCfg.Timeout)
if err != nil |
return exporterhelper.NewMetricsExporter(
cfg,
params,
mExp.PushMetrics,
exporterhelper.WithShutdown(mExp.Shutdown),
// Disable exporterhelper Timeout, since we are using a custom mechanism
// within exporter itself
exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}),
exporterhelper.WithQueue(eCfg.QueueSettings),
exporterhelper.WithRetry(eCfg.RetrySettings))
}
| {
return nil, err
} |
__init__.py | from .anoflows import AnoFlows | ||
stsb.py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-05-20 16:25
from typing import Union, List, Callable
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import read_cells
STS_B_TRAIN = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-train.csv'
STS_B_DEV = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-dev.csv'
STS_B_TEST = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-test.csv'
class SemanticTextualSimilarityDataset(TransformableDataset):
def | (self,
data: Union[str, List],
sent_a_col,
sent_b_col,
similarity_col,
delimiter='auto',
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.delimiter = delimiter
self.similarity_col = similarity_col
self.sent_b_col = sent_b_col
self.sent_a_col = sent_a_col
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
for i, cells in enumerate(read_cells(filepath, strip=True, delimiter=self.delimiter)):
yield {
'sent_a': cells[self.sent_a_col],
'sent_b': cells[self.sent_b_col],
'similarity': float(cells[self.similarity_col])
}
| __init__ |
celebrities_births.py | '''
This script contains a class for representing the date.
Additionally, the class Scraper get the HTML code of a
Wikipedia page and extracts the name of celebrities that
were born in a certain date
'''
import re
import requests
from bs4 import BeautifulSoup
from datetime import datetime
class Date:
'''
This class is used to represent a date.
Attributes:
_day_of_month (tuple): The days in each month of the year
_month_str (tuple): The names of the months
year (int): The year of the date.
month (int): The month of the date.
day (int): The day of the date.
'''
_day_of_month = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
_month_str = ('January', 'February', 'March',
'April', 'May', 'June',
'July', 'August', 'September',
'October', 'November', 'December')
def __init__(self, day: int, month: int, year: int):
'''
See help(Date) for accurate signature
'''
if not self.is_date_valid(day, month, year):
raise ValueError('Date not valid')
self.year = year
self.month = month
self.day = day
def __str__(self):
'''
This function is used to return the string representation of the date.
Returns:
str: The string representation of the date.
'''
return "{0}-{1}-{2}".format(self.day, self.month, self.year)
def __repr__(self):
|
def __eq__(self, other):
'''
This function is used to compare the date with other date.
Args:
other (Date): The other date to be compared with.
Returns:
bool: True if the date is equal to the other date, False otherwise.
'''
return self.year == other.year and self.month == other.month and \
self.day == other.day
def __lt__(self, other):
'''
This function is used to compare the date with other date.
Args:
other (Date): The other date to be compared with.
Returns:
bool: True if the date is less than the other date,
False otherwise.
'''
if self.year < other.year:
return True
elif self.year == other.year:
if self.month < other.month:
return True
elif self.month == other.month:
if self.day < other.day:
return True
return False
@staticmethod
def is_leap_year(year: int) -> bool:
'''
This method checks if a year is a leap year
Args:
year (int): The year to check
Returns:
(bool): True if the year is a leap year, False otherwise
'''
return year % 4 == 0
def is_date_valid(self, day: int, month: int, year: int) -> bool:
'''
This method is used to check if the date is valid.
Args:
day (int): The day of the date.
month (int): The month of the date.
year (int): The year of the date.
Returns:
bool: True if the date is valid, False otherwise.
'''
current_day = self._day_of_month[month - 1]
if self.is_leap_year(year) and month == 2:
current_day += 1
return year >= 0 and month >= 1 and month <= 12 and \
day >= 1 and day <= current_day
@classmethod
def from_string(cls, date_as_string):
'''
This function is used to create a date from a string.
Args:
date_as_string (str): The string representation of the date.
Returns:
Date: The date created from the string.
'''
day, month, year = map(int, date_as_string.split('-'))
return cls(day, month, year)
@classmethod
def today(cls):
'''
This function is used to create a date from a string.
Args:
date_as_string (str): The string representation of the date.
Returns:
Date: The date created from the string.
'''
cur_day = datetime.now()
day, month, year = cur_day.day, cur_day.month, cur_day.year
return cls(day, month, year)
def to_wiki_format(self):
'''
Returns the date into a format legible by the Wikipedia URL
Returns:
(str): String that can be appended to the Wikipedia URL
For example 'July_31'
'''
return f'{self._month_str[self.month - 1]}_{self.day}'
class Scraper:
'''
### Summary
Attributes:
###
'''
def __init__(self):
self.ROOT = 'https://en.wikipedia.org/wiki/'
def _get_soup(self, date: str) -> BeautifulSoup:
# private method, you don't need a docstring
r = requests.get(self.ROOT + date)
soup = BeautifulSoup(r.text, 'html.parser')
return soup
def _get_birth_header(self, date: str) -> BeautifulSoup:
# Private
soup = self._get_soup(date)
span = soup.find(
'span', {'class': 'mw-headline'}, text=re.compile("Births"))
# If the list is empty because it didn't find anything
if not span:
raise ValueError('The given date has no birth data')
h2 = span.find_parent()
return h2
def _get_celebrity_list(self, date: str) -> list:
# Add <ul> tags until you find the next <h2> tag
next_node = self._get_birth_header(date)
celebrities_list = []
while True:
next_node = next_node.find_next_sibling()
if getattr(next_node, 'name') == 'ul':
celebrities_list.extend(next_node.find_all('li'))
elif getattr(next_node, 'name') == 'h2':
break
return celebrities_list
def _clean_li(self, li: BeautifulSoup) -> str:
# Private method
li_complete = li.text.split('–')
name_complete = li_complete[1].split(',')
name = name_complete[0].strip()
return name
def get_celebrities(self, date: str = None) -> list:
'''
returns full list of celebrities whose birthday is
equal to the date parameter.
'''
if date is None:
date = 'January_1'
cel_list = self._get_celebrity_list(date)
celebrities = []
for li in cel_list:
celebrities.append(self._clean_li(li))
return celebrities
if __name__ == '__main__':
date_object = Date(27, 3, 1991)
scraper = Scraper()
celebrities = scraper.get_celebrities(date_object.to_wiki_format())
print(celebrities)
| '''
This function is used to return the string representation of the date.
Returns:
str: The string representation of the date.
'''
return "{0}-{1}-{2}".format(self.day, self.month, self.year) |
BoxPlot_error.py | #By Zhenghang(Klaus) Zhong
#Box Plot of error distribution
from pandas import DataFrame
from pandas import read_csv
import pandas as pd
import numpy as np
from matplotlib import pyplot
# load results into a dataframe
filenames_128 = ['dis_diff_128.csv']
filenames_256 = ['dis_diff_256.csv']
filenames_512 = ['dis_diff_512.csv']
results = DataFrame()
for name in filenames_128: | results_128 = read_csv(name, header=0,usecols = [1])
# describe all results, as 1 unit = 10cm, we want to transfer to meters, /10
results_128 = results_128.div(10, axis = 0)
for name in filenames_256:
results_256 = read_csv(name, header=0,usecols = [1])
# describe all results
results_256 = results_256.div(10, axis = 0)
for name in filenames_512:
results_512 = read_csv(name, header=0,usecols = [1])
# describe all results
results_512 = results_512.div(10, axis = 0)
print(results_128.describe())
print(results_256.describe())
print(results_512.describe())
# box and whisker plot
df = pd.DataFrame(np.concatenate((results_128,results_512),axis = 1),
columns=['128', '512'])
df.boxplot(sym='k',showmeans = True,showfliers = False,return_type='dict')
#results_256.boxplot(sym='k',showmeans = True,whis = [0,8],showfliers = False,return_type='dict')
pyplot.xlabel('Hidden node')
pyplot.ylabel('Error (m)')
pyplot.show() | |
receiver.rs | //! The default implementation of a WebSocket Receiver.
use std::io::Read;
use std::io::Result as IoResult;
use hyper::buffer::BufReader;
use crate::dataframe::{DataFrame, Opcode};
use crate::message::OwnedMessage;
use crate::result::{WebSocketError, WebSocketResult};
pub use crate::stream::sync::Shutdown;
use crate::stream::sync::{AsTcpStream, Stream};
use crate::ws;
use crate::ws::receiver::Receiver as ReceiverTrait;
use crate::ws::receiver::{DataFrameIterator, MessageIterator};
/// This reader bundles an existing stream with a parsing algorithm.
/// It is used by the client in its `.split()` function as the reading component.
pub struct Reader<R>
where
R: Read,
{
/// the stream to be read from
pub stream: BufReader<R>,
/// the parser to parse bytes into messages
pub receiver: Receiver,
}
impl<R> Reader<R>
where
R: Read,
{
/// Reads a single data frame from the remote endpoint.
pub fn recv_dataframe(&mut self) -> WebSocketResult<DataFrame> {
self.receiver.recv_dataframe(&mut self.stream)
}
/// Returns an iterator over incoming data frames.
pub fn incoming_dataframes(&mut self) -> DataFrameIterator<Receiver, BufReader<R>> {
self.receiver.incoming_dataframes(&mut self.stream)
}
/// Reads a single message from this receiver.
pub fn recv_message(&mut self) -> WebSocketResult<OwnedMessage> {
self.receiver.recv_message(&mut self.stream)
}
/// An iterator over incoming messsages.
/// This iterator will block until new messages arrive and will never halt.
pub fn incoming_messages<'a>(&'a mut self) -> MessageIterator<'a, Receiver, BufReader<R>> {
self.receiver.incoming_messages(&mut self.stream)
}
}
impl<S> Reader<S>
where
S: AsTcpStream + Stream + Read,
{
/// Closes the receiver side of the connection, will cause all pending and future IO to
/// return immediately with an appropriate value.
pub fn shutdown(&self) -> IoResult<()> {
self.stream.get_ref().as_tcp().shutdown(Shutdown::Read)
}
/// Shuts down both Sender and Receiver, will cause all pending and future IO to
/// return immediately with an appropriate value.
pub fn | (&self) -> IoResult<()> {
self.stream.get_ref().as_tcp().shutdown(Shutdown::Both)
}
}
/// A Receiver that wraps a Reader and provides a default implementation using
/// DataFrames and Messages.
pub struct Receiver {
buffer: Vec<DataFrame>,
mask: bool,
}
impl Receiver {
/// Create a new Receiver using the specified Reader.
pub fn new(mask: bool) -> Receiver {
Receiver {
buffer: Vec::new(),
mask,
}
}
}
impl ws::Receiver for Receiver {
type F = DataFrame;
type M = OwnedMessage;
/// Reads a single data frame from the remote endpoint.
fn recv_dataframe<R>(&mut self, reader: &mut R) -> WebSocketResult<DataFrame>
where
R: Read,
{
DataFrame::read_dataframe(reader, self.mask)
}
/// Returns the data frames that constitute one message.
fn recv_message_dataframes<R>(&mut self, reader: &mut R) -> WebSocketResult<Vec<DataFrame>>
where
R: Read,
{
let mut finished = if self.buffer.is_empty() {
let first = self.recv_dataframe(reader)?;
if first.opcode == Opcode::Continuation {
return Err(WebSocketError::ProtocolError(
"Unexpected continuation data frame opcode",
));
}
let finished = first.finished;
self.buffer.push(first);
finished
} else {
false
};
while !finished {
let next = self.recv_dataframe(reader)?;
finished = next.finished;
match next.opcode as u8 {
// Continuation opcode
0 => self.buffer.push(next),
// Control frame
8..=15 => {
return Ok(vec![next]);
}
// Others
_ => {
return Err(WebSocketError::ProtocolError(
"Unexpected data frame opcode",
));
}
}
}
Ok(::std::mem::replace(&mut self.buffer, Vec::new()))
}
}
| shutdown_all |
pwm_input.rs | //! Testing PWM input
#![deny(unsafe_code)]
#![no_main]
#![no_std]
use panic_halt as _;
use stm32f1xx_hal::{
prelude::*,
pac,
pwm_input::*,
};
use cortex_m_rt::entry;
#[entry]
fn | () -> ! {
let p = pac::Peripherals::take().unwrap();
let mut flash = p.FLASH.constrain();
let mut rcc = p.RCC.constrain();
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut afio = p.AFIO.constrain(&mut rcc.apb2);
let mut dbg = p.DBGMCU;
let mut gpiob = p.GPIOB.split(&mut rcc.apb2);
let pb4 = gpiob.pb4;
let pb5 = gpiob.pb5;
let pwm_input = p.TIM3.pwm_input(
(pb4, pb5),
&mut rcc.apb1,
&mut afio.mapr,
&mut dbg,
&clocks,
Configuration::Frequency(10.khz()),
);
loop {
let _freq = pwm_input
.read_frequency(ReadMode::Instant, &clocks)
.unwrap();
let _duty_cycle = pwm_input.read_duty(ReadMode::Instant).unwrap();
}
}
| main |
lib.rs | mod list;
mod map;
mod path;
mod reflect;
mod struct_trait;
mod tuple_struct;
mod type_registry;
mod type_uuid;
mod impls {
#[cfg(feature = "bevy_app")]
mod bevy_app;
#[cfg(feature = "bevy_ecs")]
mod bevy_ecs;
#[cfg(feature = "glam")]
mod glam;
#[cfg(feature = "smallvec")]
mod smallvec;
mod std;
#[cfg(feature = "bevy_app")]
pub use self::bevy_app::*;
#[cfg(feature = "bevy_ecs")]
pub use self::bevy_ecs::*;
#[cfg(feature = "glam")]
pub use self::glam::*;
#[cfg(feature = "smallvec")]
pub use self::smallvec::*;
pub use self::std::*;
}
pub mod serde;
pub mod prelude {
#[cfg(feature = "bevy_ecs")]
pub use crate::ReflectComponent;
#[cfg(feature = "bevy_app")]
pub use crate::RegisterTypeBuilder;
pub use crate::{
reflect_trait, GetField, GetTupleStructField, Reflect, ReflectDeserialize, Struct,
TupleStruct,
};
}
pub use impls::*;
pub use list::*;
pub use map::*;
pub use path::*;
pub use reflect::*;
pub use struct_trait::*;
pub use tuple_struct::*;
pub use type_registry::*;
pub use type_uuid::*;
pub use bevy_reflect_derive::*;
pub use erased_serde;
#[cfg(test)]
mod tests {
use ::serde::de::DeserializeSeed;
use bevy_utils::HashMap;
use ron::{
ser::{to_string_pretty, PrettyConfig},
Deserializer,
};
use crate::serde::{ReflectDeserializer, ReflectSerializer};
use super::*;
#[test]
fn reflect_struct() {
#[derive(Reflect)]
struct Foo {
a: u32,
b: f32,
c: Bar,
}
#[derive(Reflect)]
struct Bar {
x: u32,
}
let mut foo = Foo {
a: 42,
b: 3.14,
c: Bar { x: 1 },
};
let a = *foo.get_field::<u32>("a").unwrap();
assert_eq!(a, 42);
*foo.get_field_mut::<u32>("a").unwrap() += 1;
assert_eq!(foo.a, 43);
let bar = foo.get_field::<Bar>("c").unwrap();
assert_eq!(bar.x, 1);
// nested retrieval
let c = foo.field("c").unwrap();
if let ReflectRef::Struct(value) = c.reflect_ref() {
assert_eq!(*value.get_field::<u32>("x").unwrap(), 1);
} else {
panic!("expected a struct");
}
// patch Foo with a dynamic struct
let mut dynamic_struct = DynamicStruct::default();
dynamic_struct.insert("a", 123u32);
dynamic_struct.insert("should_be_ignored", 456);
foo.apply(&dynamic_struct);
assert_eq!(foo.a, 123);
}
#[test]
fn reflect_map() {
#[derive(Reflect, Hash)]
#[reflect(Hash)]
struct Foo {
a: u32,
b: String,
}
let key_a = Foo {
a: 1,
b: "k1".to_string(),
};
let key_b = Foo {
a: 1,
b: "k1".to_string(),
};
let key_c = Foo {
a: 3,
b: "k3".to_string(),
};
let mut map = DynamicMap::default();
map.insert(key_a, 10u32);
assert_eq!(10, *map.get(&key_b).unwrap().downcast_ref::<u32>().unwrap());
assert!(map.get(&key_c).is_none());
*map.get_mut(&key_b).unwrap().downcast_mut::<u32>().unwrap() = 20;
assert_eq!(20, *map.get(&key_b).unwrap().downcast_ref::<u32>().unwrap());
}
#[test]
fn reflect_unit_struct() {
#[derive(Reflect)]
struct Foo(u32, u64);
let mut foo = Foo(1, 2);
assert_eq!(1, *foo.get_field::<u32>(0).unwrap());
assert_eq!(2, *foo.get_field::<u64>(1).unwrap());
let mut patch = DynamicTupleStruct::default();
patch.insert(3u32);
patch.insert(4u64);
assert_eq!(3, *patch.field(0).unwrap().downcast_ref::<u32>().unwrap());
assert_eq!(4, *patch.field(1).unwrap().downcast_ref::<u64>().unwrap());
foo.apply(&patch);
assert_eq!(3, foo.0);
assert_eq!(4, foo.1);
let mut iter = patch.iter_fields();
assert_eq!(3, *iter.next().unwrap().downcast_ref::<u32>().unwrap());
assert_eq!(4, *iter.next().unwrap().downcast_ref::<u64>().unwrap());
}
#[test]
#[should_panic(expected = "the given key does not support hashing")]
fn reflect_map_no_hash() {
#[derive(Reflect)]
struct Foo {
a: u32,
}
let foo = Foo { a: 1 };
let mut map = DynamicMap::default();
map.insert(foo, 10u32);
}
#[test]
fn reflect_ignore() {
#[derive(Reflect)]
struct Foo {
a: u32,
#[reflect(ignore)]
_b: u32,
}
let foo = Foo { a: 1, _b: 2 };
let values: Vec<u32> = foo
.iter_fields()
.map(|value| *value.downcast_ref::<u32>().unwrap())
.collect();
assert_eq!(values, vec![1]);
}
#[test]
fn reflect_complex_patch() {
#[derive(Reflect, Eq, PartialEq, Debug)]
struct Foo {
a: u32,
#[reflect(ignore)]
_b: u32,
c: Vec<isize>,
d: HashMap<usize, i8>,
e: Bar,
}
#[derive(Reflect, Eq, PartialEq, Debug)]
struct Bar {
x: u32,
}
let mut hash_map = HashMap::default();
hash_map.insert(1, 1);
hash_map.insert(2, 2);
let mut foo = Foo {
a: 1,
_b: 1,
c: vec![1, 2],
d: hash_map,
e: Bar { x: 1 },
};
let mut foo_patch = DynamicStruct::default();
foo_patch.insert("a", 2u32);
foo_patch.insert("b", 2u32); // this should be ignored
let mut list = DynamicList::default();
list.push(3isize);
list.push(4isize);
list.push(5isize);
foo_patch.insert("c", list);
let mut map = DynamicMap::default();
map.insert(2usize, 3i8);
foo_patch.insert("d", map);
let mut bar_patch = DynamicStruct::default();
bar_patch.insert("x", 2u32);
foo_patch.insert("e", bar_patch);
foo.apply(&foo_patch);
let mut hash_map = HashMap::default();
hash_map.insert(1, 1);
hash_map.insert(2, 3);
let expected_foo = Foo {
a: 2,
_b: 1,
c: vec![3, 4, 5],
d: hash_map,
e: Bar { x: 2 },
};
assert_eq!(foo, expected_foo);
}
#[test]
fn reflect_serialize() {
#[derive(Reflect)]
struct | {
a: u32,
#[reflect(ignore)]
_b: u32,
c: Vec<isize>,
d: HashMap<usize, i8>,
e: Bar,
f: String,
}
#[derive(Reflect)]
struct Bar {
x: u32,
}
let mut hash_map = HashMap::default();
hash_map.insert(1, 1);
hash_map.insert(2, 2);
let foo = Foo {
a: 1,
_b: 1,
c: vec![1, 2],
d: hash_map,
e: Bar { x: 1 },
f: "hi".to_string(),
};
let mut registry = TypeRegistry::default();
registry.register::<u32>();
registry.register::<isize>();
registry.register::<usize>();
registry.register::<Bar>();
registry.register::<String>();
registry.register::<i8>();
let serializer = ReflectSerializer::new(&foo, ®istry);
let serialized = to_string_pretty(&serializer, PrettyConfig::default()).unwrap();
let mut deserializer = Deserializer::from_str(&serialized).unwrap();
let reflect_deserializer = ReflectDeserializer::new(®istry);
let value = reflect_deserializer.deserialize(&mut deserializer).unwrap();
let dynamic_struct = value.take::<DynamicStruct>().unwrap();
assert!(foo.partial_eq(&dynamic_struct).unwrap());
}
#[test]
fn reflect_take() {
#[derive(Reflect, Debug, PartialEq)]
struct Bar {
x: u32,
}
let x: Box<dyn Reflect> = Box::new(Bar { x: 2 });
let y = x.take::<Bar>().unwrap();
assert_eq!(y, Bar { x: 2 });
}
}
| Foo |
models.py | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
shared_with = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='shared_lists'
)
@property
def name(self):
return self.item_set.first().text
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
class Meta:
|
def __str__(self):
return self.text
| ordering = ('id',)
unique_together = ('list', 'text') |
placeholder.js | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
const TAG_TO_PLACEHOLDER_NAMES = {
'A': 'LINK',
'B': 'BOLD_TEXT',
'BR': 'LINE_BREAK',
'EM': 'EMPHASISED_TEXT',
'H1': 'HEADING_LEVEL1',
'H2': 'HEADING_LEVEL2',
'H3': 'HEADING_LEVEL3',
'H4': 'HEADING_LEVEL4',
'H5': 'HEADING_LEVEL5',
'H6': 'HEADING_LEVEL6',
'HR': 'HORIZONTAL_RULE',
'I': 'ITALIC_TEXT',
'LI': 'LIST_ITEM',
'LINK': 'MEDIA_LINK',
'OL': 'ORDERED_LIST',
'P': 'PARAGRAPH',
'Q': 'QUOTATION',
'S': 'STRIKETHROUGH_TEXT',
'SMALL': 'SMALL_TEXT',
'SUB': 'SUBSTRIPT',
'SUP': 'SUPERSCRIPT',
'TBODY': 'TABLE_BODY',
'TD': 'TABLE_CELL',
'TFOOT': 'TABLE_FOOTER',
'TH': 'TABLE_HEADER_CELL',
'THEAD': 'TABLE_HEADER',
'TR': 'TABLE_ROW',
'TT': 'MONOSPACED_TEXT',
'U': 'UNDERLINED_TEXT',
'UL': 'UNORDERED_LIST',
};
/**
* Creates unique names for placeholder with different content.
*
* Returns the same placeholder name when the content is identical.
*/
export class | {
constructor() {
// Count the occurrence of the base name top generate a unique name
this._placeHolderNameCounts = {};
// Maps signature to placeholder names
this._signatureToName = {};
}
getStartTagPlaceholderName(tag, attrs, isVoid) {
const signature = this._hashTag(tag, attrs, isVoid);
if (this._signatureToName[signature]) {
return this._signatureToName[signature];
}
const upperTag = tag.toUpperCase();
const baseName = TAG_TO_PLACEHOLDER_NAMES[upperTag] || `TAG_${upperTag}`;
const name = this._generateUniqueName(isVoid ? baseName : `START_${baseName}`);
this._signatureToName[signature] = name;
return name;
}
getCloseTagPlaceholderName(tag) {
const signature = this._hashClosingTag(tag);
if (this._signatureToName[signature]) {
return this._signatureToName[signature];
}
const upperTag = tag.toUpperCase();
const baseName = TAG_TO_PLACEHOLDER_NAMES[upperTag] || `TAG_${upperTag}`;
const name = this._generateUniqueName(`CLOSE_${baseName}`);
this._signatureToName[signature] = name;
return name;
}
getPlaceholderName(name, content) {
const upperName = name.toUpperCase();
const signature = `PH: ${upperName}=${content}`;
if (this._signatureToName[signature]) {
return this._signatureToName[signature];
}
const uniqueName = this._generateUniqueName(upperName);
this._signatureToName[signature] = uniqueName;
return uniqueName;
}
getUniquePlaceholder(name) {
return this._generateUniqueName(name.toUpperCase());
}
// Generate a hash for a tag - does not take attribute order into account
_hashTag(tag, attrs, isVoid) {
const start = `<${tag}`;
const strAttrs = Object.keys(attrs).sort().map((name) => ` ${name}=${attrs[name]}`).join('');
const end = isVoid ? '/>' : `></${tag}>`;
return start + strAttrs + end;
}
_hashClosingTag(tag) { return this._hashTag(`/${tag}`, {}, false); }
_generateUniqueName(base) {
const seen = this._placeHolderNameCounts.hasOwnProperty(base);
if (!seen) {
this._placeHolderNameCounts[base] = 1;
return base;
}
const id = this._placeHolderNameCounts[base];
this._placeHolderNameCounts[base] = id + 1;
return `${base}_${id}`;
}
}
//# sourceMappingURL=data:application/json;base64,{"version":3,"file":"placeholder.js","sourceRoot":"../../","sources":["packages/compiler/src/i18n/serializers/placeholder.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAEH,MAAM,wBAAwB,GAA0B;IACtD,GAAG,EAAE,MAAM;IACX,GAAG,EAAE,WAAW;IAChB,IAAI,EAAE,YAAY;IAClB,IAAI,EAAE,iBAAiB;IACvB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,gBAAgB;IACtB,IAAI,EAAE,iBAAiB;IACvB,GAAG,EAAE,aAAa;IAClB,IAAI,EAAE,WAAW;IACjB,MAAM,EAAE,YAAY;IACpB,IAAI,EAAE,cAAc;IACpB,GAAG,EAAE,WAAW;IAChB,GAAG,EAAE,WAAW;IAChB,GAAG,EAAE,oBAAoB;IACzB,OAAO,EAAE,YAAY;IACrB,KAAK,EAAE,WAAW;IAClB,KAAK,EAAE,aAAa;IACpB,OAAO,EAAE,YAAY;IACrB,IAAI,EAAE,YAAY;IAClB,OAAO,EAAE,cAAc;IACvB,IAAI,EAAE,mBAAmB;IACzB,OAAO,EAAE,cAAc;IACvB,IAAI,EAAE,WAAW;IACjB,IAAI,EAAE,iBAAiB;IACvB,GAAG,EAAE,iBAAiB;IACtB,IAAI,EAAE,gBAAgB;CACvB,CAAC;AAEF;;;;GAIG;AACH,MAAM,OAAO,mBAAmB;IAAhC;QACE,mEAAmE;QAC3D,2BAAsB,GAA0B,EAAE,CAAC;QAC3D,sCAAsC;QAC9B,qBAAgB,GAA0B,EAAE,CAAC;IAuEvD,CAAC;IArEC,0BAA0B,CAAC,GAAW,EAAE,KAA4B,EAAE,MAAe;QACnF,MAAM,SAAS,GAAG,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,KAAK,EAAE,MAAM,CAAC,CAAC;QACpD,IAAI,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,EAAE;YACpC,OAAO,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,CAAC;SACzC;QAED,MAAM,QAAQ,GAAG,GAAG,CAAC,WAAW,EAAE,CAAC;QACnC,MAAM,QAAQ,GAAG,wBAAwB,CAAC,QAAQ,CAAC,IAAI,OAAO,QAAQ,EAAE,CAAC;QACzE,MAAM,IAAI,GAAG,IAAI,CAAC,mBAAmB,CAAC,MAAM,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,SAAS,QAAQ,EAAE,CAAC,CAAC;QAE/E,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC;QAExC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,0BAA0B,CAAC,GAAW;QACpC,MAAM,SAAS,GAAG,IAAI,CAAC,eAAe,CAAC,GAAG,CAAC,CAAC;QAC5C,IAAI,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,EAAE;YACpC,OAAO,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,CAAC;SACzC;QAED,MAAM,QAAQ,GAAG,GAAG,CAAC,WAAW,EAAE,CAAC;QACnC,MAAM,QAAQ,GAAG,wBAAwB,CAAC,QAAQ,CAAC,IAAI,OAAO,QAAQ,EAAE,CAAC;QACzE,MAAM,IAAI,GAAG,IAAI,CAAC,mBAAmB,CAAC,SAAS,QAAQ,EAAE,CAAC,CAAC;QAE3D,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,GAAG,IAAI,CAAC;QAExC,OAAO,IAAI,CAAC;IACd,CAAC;IAED,kBAAkB,CAAC,IAAY,EAAE,OAAe;QAC9C,MAAM,SAAS,GAAG,IAAI,CAAC,WAAW,EAAE,CAAC;QACrC,MAAM,SAAS,GAAG,OAAO,SAAS,IAAI,OAAO,EAAE,CAAC;QAChD,IAAI,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,EAAE;YACpC,OAAO,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,CAAC;SACzC;QAED,MAAM,UAAU,GAAG,IAAI,CAAC,mBAAmB,CAAC,SAAS,CAAC,CAAC;QACvD,IAAI,CAAC,gBAAgB,CAAC,SAAS,CAAC,GAAG,UAAU,CAAC;QAE9C,OAAO,UAAU,CAAC;IACpB,CAAC;IAED,oBAAoB,CAAC,IAAY;QAC/B,OAAO,IAAI,CAAC,mBAAmB,CAAC,IAAI,CAAC,WAAW,EAAE,CAAC,CAAC;IACtD,CAAC;IAED,yEAAyE;IACjE,QAAQ,CAAC,GAAW,EAAE,KAA4B,EAAE,MAAe;QACzE,MAAM,KAAK,GAAG,IAAI,GAAG,EAAE,CAAC;QACxB,MAAM,QAAQ,GAAG,MAAM,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,IAAI,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,IAAI,IAAI,IAAI,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;QAC7F,MAAM,GAAG,GAAG,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,MAAM,GAAG,GAAG,CAAC;QAEzC,OAAO,KAAK,GAAG,QAAQ,GAAG,GAAG,CAAC;IAChC,CAAC;IAEO,eAAe,CAAC,GAAW,IAAY,OAAO,IAAI,CAAC,QAAQ,CAAC,IAAI,GAAG,EAAE,EAAE,EAAE,EAAE,KAAK,CAAC,CAAC,CAAC,CAAC;IAEpF,mBAAmB,CAAC,IAAY;QACtC,MAAM,IAAI,GAAG,IAAI,CAAC,sBAAsB,CAAC,cAAc,CAAC,IAAI,CAAC,CAAC;QAC9D,IAAI,CAAC,IAAI,EAAE;YACT,IAAI,CAAC,sBAAsB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YACtC,OAAO,IAAI,CAAC;SACb;QAED,MAAM,EAAE,GAAG,IAAI,CAAC,sBAAsB,CAAC,IAAI,CAAC,CAAC;QAC7C,IAAI,CAAC,sBAAsB,CAAC,IAAI,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;QAC3C,OAAO,GAAG,IAAI,IAAI,EAAE,EAAE,CAAC;IACzB,CAAC;CACF","sourcesContent":["/**\n * @license\n * Copyright Google Inc. All Rights Reserved.\n *\n * Use of this source code is governed by an MIT-style license that can be\n * found in the LICENSE file at https://angular.io/license\n */\n\nconst TAG_TO_PLACEHOLDER_NAMES: {[k: string]: string} = {\n  'A': 'LINK',\n  'B': 'BOLD_TEXT',\n  'BR': 'LINE_BREAK',\n  'EM': 'EMPHASISED_TEXT',\n  'H1': 'HEADING_LEVEL1',\n  'H2': 'HEADING_LEVEL2',\n  'H3': 'HEADING_LEVEL3',\n  'H4': 'HEADING_LEVEL4',\n  'H5': 'HEADING_LEVEL5',\n  'H6': 'HEADING_LEVEL6',\n  'HR': 'HORIZONTAL_RULE',\n  'I': 'ITALIC_TEXT',\n  'LI': 'LIST_ITEM',\n  'LINK': 'MEDIA_LINK',\n  'OL': 'ORDERED_LIST',\n  'P': 'PARAGRAPH',\n  'Q': 'QUOTATION',\n  'S': 'STRIKETHROUGH_TEXT',\n  'SMALL': 'SMALL_TEXT',\n  'SUB': 'SUBSTRIPT',\n  'SUP': 'SUPERSCRIPT',\n  'TBODY': 'TABLE_BODY',\n  'TD': 'TABLE_CELL',\n  'TFOOT': 'TABLE_FOOTER',\n  'TH': 'TABLE_HEADER_CELL',\n  'THEAD': 'TABLE_HEADER',\n  'TR': 'TABLE_ROW',\n  'TT': 'MONOSPACED_TEXT',\n  'U': 'UNDERLINED_TEXT',\n  'UL': 'UNORDERED_LIST',\n};\n\n/**\n * Creates unique names for placeholder with different content.\n *\n * Returns the same placeholder name when the content is identical.\n */\nexport class PlaceholderRegistry {\n  // Count the occurrence of the base name top generate a unique name\n  private _placeHolderNameCounts: {[k: string]: number} = {};\n  // Maps signature to placeholder names\n  private _signatureToName: {[k: string]: string} = {};\n\n  getStartTagPlaceholderName(tag: string, attrs: {[k: string]: string}, isVoid: boolean): string {\n    const signature = this._hashTag(tag, attrs, isVoid);\n    if (this._signatureToName[signature]) {\n      return this._signatureToName[signature];\n    }\n\n    const upperTag = tag.toUpperCase();\n    const baseName = TAG_TO_PLACEHOLDER_NAMES[upperTag] || `TAG_${upperTag}`;\n    const name = this._generateUniqueName(isVoid ? baseName : `START_${baseName}`);\n\n    this._signatureToName[signature] = name;\n\n    return name;\n  }\n\n  getCloseTagPlaceholderName(tag: string): string {\n    const signature = this._hashClosingTag(tag);\n    if (this._signatureToName[signature]) {\n      return this._signatureToName[signature];\n    }\n\n    const upperTag = tag.toUpperCase();\n    const baseName = TAG_TO_PLACEHOLDER_NAMES[upperTag] || `TAG_${upperTag}`;\n    const name = this._generateUniqueName(`CLOSE_${baseName}`);\n\n    this._signatureToName[signature] = name;\n\n    return name;\n  }\n\n  getPlaceholderName(name: string, content: string): string {\n    const upperName = name.toUpperCase();\n    const signature = `PH: ${upperName}=${content}`;\n    if (this._signatureToName[signature]) {\n      return this._signatureToName[signature];\n    }\n\n    const uniqueName = this._generateUniqueName(upperName);\n    this._signatureToName[signature] = uniqueName;\n\n    return uniqueName;\n  }\n\n  getUniquePlaceholder(name: string): string {\n    return this._generateUniqueName(name.toUpperCase());\n  }\n\n  // Generate a hash for a tag - does not take attribute order into account\n  private _hashTag(tag: string, attrs: {[k: string]: string}, isVoid: boolean): string {\n    const start = `<${tag}`;\n    const strAttrs = Object.keys(attrs).sort().map((name) => ` ${name}=${attrs[name]}`).join('');\n    const end = isVoid ? '/>' : `></${tag}>`;\n\n    return start + strAttrs + end;\n  }\n\n  private _hashClosingTag(tag: string): string { return this._hashTag(`/${tag}`, {}, false); }\n\n  private _generateUniqueName(base: string): string {\n    const seen = this._placeHolderNameCounts.hasOwnProperty(base);\n    if (!seen) {\n      this._placeHolderNameCounts[base] = 1;\n      return base;\n    }\n\n    const id = this._placeHolderNameCounts[base];\n    this._placeHolderNameCounts[base] = id + 1;\n    return `${base}_${id}`;\n  }\n}\n"]} | PlaceholderRegistry |
gzip.go | /*
* The MIT License (MIT)
*
* Copyright (c) 2018 Yu Jing <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package compr
import (
"bytes"
"compress/gzip"
"io/ioutil"
)
func | (in []byte) (out []byte, err error) {
gr, err := gzip.NewReader(bytes.NewReader(in))
if err != nil {
return
}
out, err = ioutil.ReadAll(gr)
if err != nil {
return
}
return
}
func GzipCompress(in []byte) (out []byte, err error) {
var buf bytes.Buffer
w := gzip.NewWriter(&buf)
_, err = w.Write(in)
w.Close()
if err != nil {
return nil, err
}
return buf.Bytes(), err
}
| GzipDecompress |
1-dir.js | console.dir(global, { depth: 0 }); |
||
index.ts | import openBrowser from './openBrowser';
export * from './types'; | export { openBrowser }; |
|
syntax_tree.rs | use std::{fmt, slice::Iter};
use rowan::GreenNodeBuilder;
use crate::{ast::Document, Error, SyntaxElement, SyntaxKind};
use super::GraphQLLanguage;
/// An AST generated by the parser. Consists of a syntax tree and a `Vec<Error>`
/// if any.
///
/// ## Example
///
/// Given a syntactically incorrect token `uasdf21230jkdw` which cannot be part
/// of any of GraphQL definitions and a syntactically correct SelectionSet, we
/// are able to see both the AST for the SelectionSet and the error with an
/// incorrect token.
/// ```rust
/// use apollo_parser::Parser;
///
/// let schema = r#"
/// uasdf21230jkdw
///
/// {
/// pet
/// faveSnack
/// }
/// "#;
/// let parser = Parser::new(schema);
///
/// let ast = parser.parse();
/// // The Vec<Error> that's part of the SyntaxTree struct.
/// assert_eq!(ast.errors().len(), 1);
///
/// // The AST with Document as its root node.
/// let doc = ast.document();
/// let nodes: Vec<_> = doc.definitions().into_iter().collect();
/// assert_eq!(nodes.len(), 1);
/// ```
#[derive(PartialEq, Eq, Clone)]
pub struct SyntaxTree {
pub(crate) ast: rowan::SyntaxNode<GraphQLLanguage>,
pub(crate) errors: Vec<crate::Error>,
}
impl SyntaxTree {
/// Get a reference to the syntax tree's errors.
pub fn errors(&self) -> Iter<'_, crate::Error> {
self.errors.iter()
}
/// Return the root typed `Document` node.
pub fn document(self) -> Document {
Document { syntax: self.ast }
}
}
impl fmt::Debug for SyntaxTree {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn print(f: &mut fmt::Formatter<'_>, indent: usize, element: SyntaxElement) -> fmt::Result {
let kind: SyntaxKind = element.kind();
write!(f, "{:indent$}", "", indent = indent)?;
match element {
rowan::NodeOrToken::Node(node) => {
writeln!(f, "- {:?}@{:?}", kind, node.text_range())?;
for child in node.children_with_tokens() {
print(f, indent + 4, child)?;
}
Ok(())
}
rowan::NodeOrToken::Token(token) => {
writeln!(
f,
"- {:?}@{:?} {:?}",
kind,
token.text_range(),
token.text()
)
}
}
}
fn print_err(f: &mut fmt::Formatter<'_>, errors: Vec<Error>) -> fmt::Result {
for err in errors {
writeln!(f, "- {:?}", err)?;
}
write!(f, "")
}
print(f, 0, self.ast.clone().into())?;
print_err(f, self.errors.clone()) | }
#[derive(Debug)]
pub(crate) struct SyntaxTreeBuilder {
builder: GreenNodeBuilder<'static>,
}
impl SyntaxTreeBuilder {
/// Create a new instance of `SyntaxBuilder`.
pub(crate) fn new() -> Self {
Self {
builder: GreenNodeBuilder::new(),
}
}
/// Start new node and make it current.
pub(crate) fn start_node(&mut self, kind: SyntaxKind) {
self.builder.start_node(rowan::SyntaxKind(kind as u16));
}
/// Finish current branch and restore previous branch as current.
pub(crate) fn finish_node(&mut self) {
self.builder.finish_node();
}
/// Adds new token to the current branch.
pub(crate) fn token(&mut self, kind: SyntaxKind, text: &str) {
self.builder.token(rowan::SyntaxKind(kind as u16), text);
}
pub(crate) fn finish(self, errors: Vec<Error>) -> SyntaxTree {
SyntaxTree {
ast: rowan::SyntaxNode::new_root(self.builder.finish()),
// TODO: keep the errors in the builder rather than pass it in here?
errors,
}
}
}
#[cfg(test)]
mod test {
use crate::ast::Definition;
use crate::Parser;
#[test]
fn directive_name() {
let input = "directive @example(isTreat: Boolean, treatKind: String) on FIELD | MUTATION";
let parser = Parser::new(input);
let ast = parser.parse();
let doc = ast.document();
for def in doc.definitions() {
if let Definition::DirectiveDefinition(directive) = def {
assert_eq!(directive.name().unwrap().text(), "example");
}
}
}
#[test]
fn object_type_definition() {
let input = "
type ProductDimension {
size: String
weight: Float @tag(name: \"hi from inventory value type field\")
}
";
let parser = Parser::new(input);
let ast = parser.parse();
assert_eq!(0, ast.errors().len());
let doc = ast.document();
for def in doc.definitions() {
if let Definition::ObjectTypeDefinition(object_type) = def {
assert_eq!(object_type.name().unwrap().text(), "ProductDimension");
for field_def in object_type.fields_definition().unwrap().field_definitions() {
println!("{}", field_def.name().unwrap().text()); // size weight
}
}
}
}
} | } |
marshal_test.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bson
import (
"bytes"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson/bsoncodec"
"go.mongodb.org/mongo-driver/bson/primitive"
)
func TestMarshalAppendWithRegistry(t *testing.T) {
for _, tc := range marshalingTestCases {
t.Run(tc.name, func(t *testing.T) {
dst := make([]byte, 0, 1024)
var reg *bsoncodec.Registry
if tc.reg != nil {
reg = tc.reg
} else {
reg = DefaultRegistry
}
got, err := MarshalAppendWithRegistry(reg, dst, tc.val)
noerr(t, err)
if !bytes.Equal(got, tc.want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, tc.want)
t.Errorf("Bytes:\n%v\n%v", got, tc.want)
}
})
}
}
func TestMarshalAppendWithContext(t *testing.T) {
for _, tc := range marshalingTestCases {
t.Run(tc.name, func(t *testing.T) {
dst := make([]byte, 0, 1024)
var reg *bsoncodec.Registry
if tc.reg != nil | else {
reg = DefaultRegistry
}
ec := bsoncodec.EncodeContext{Registry: reg}
got, err := MarshalAppendWithContext(ec, dst, tc.val)
noerr(t, err)
if !bytes.Equal(got, tc.want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, tc.want)
t.Errorf("Bytes:\n%v\n%v", got, tc.want)
}
})
}
}
func TestMarshalWithRegistry(t *testing.T) {
for _, tc := range marshalingTestCases {
t.Run(tc.name, func(t *testing.T) {
var reg *bsoncodec.Registry
if tc.reg != nil {
reg = tc.reg
} else {
reg = DefaultRegistry
}
got, err := MarshalWithRegistry(reg, tc.val)
noerr(t, err)
if !bytes.Equal(got, tc.want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, tc.want)
t.Errorf("Bytes:\n%v\n%v", got, tc.want)
}
})
}
}
func TestMarshalWithContext(t *testing.T) {
for _, tc := range marshalingTestCases {
t.Run(tc.name, func(t *testing.T) {
var reg *bsoncodec.Registry
if tc.reg != nil {
reg = tc.reg
} else {
reg = DefaultRegistry
}
ec := bsoncodec.EncodeContext{Registry: reg}
got, err := MarshalWithContext(ec, tc.val)
noerr(t, err)
if !bytes.Equal(got, tc.want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, tc.want)
t.Errorf("Bytes:\n%v\n%v", got, tc.want)
}
})
}
}
func TestMarshalAppend(t *testing.T) {
for _, tc := range marshalingTestCases {
t.Run(tc.name, func(t *testing.T) {
if tc.reg != nil {
t.Skip() // test requires custom registry
}
dst := make([]byte, 0, 1024)
got, err := MarshalAppend(dst, tc.val)
noerr(t, err)
if !bytes.Equal(got, tc.want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, tc.want)
t.Errorf("Bytes:\n%v\n%v", got, tc.want)
}
})
}
}
func TestMarshalExtJSONAppendWithContext(t *testing.T) {
t.Run("MarshalExtJSONAppendWithContext", func(t *testing.T) {
dst := make([]byte, 0, 1024)
type teststruct struct{ Foo int }
val := teststruct{1}
ec := bsoncodec.EncodeContext{Registry: DefaultRegistry}
got, err := MarshalExtJSONAppendWithContext(ec, dst, val, true, false)
noerr(t, err)
want := []byte(`{"foo":{"$numberInt":"1"}}`)
if !bytes.Equal(got, want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, want)
t.Errorf("Bytes:\n%s\n%s", got, want)
}
})
}
func TestMarshalExtJSONWithContext(t *testing.T) {
t.Run("MarshalExtJSONWithContext", func(t *testing.T) {
type teststruct struct{ Foo int }
val := teststruct{1}
ec := bsoncodec.EncodeContext{Registry: DefaultRegistry}
got, err := MarshalExtJSONWithContext(ec, val, true, false)
noerr(t, err)
want := []byte(`{"foo":{"$numberInt":"1"}}`)
if !bytes.Equal(got, want) {
t.Errorf("Bytes are not equal. got %v; want %v", got, want)
t.Errorf("Bytes:\n%s\n%s", got, want)
}
})
}
func TestMarshal_roundtripFromBytes(t *testing.T) {
before := []byte{
// length
0x1c, 0x0, 0x0, 0x0,
// --- begin array ---
// type - document
0x3,
// key - "foo"
0x66, 0x6f, 0x6f, 0x0,
// length
0x12, 0x0, 0x0, 0x0,
// type - string
0x2,
// key - "bar"
0x62, 0x61, 0x72, 0x0,
// value - string length
0x4, 0x0, 0x0, 0x0,
// value - "baz"
0x62, 0x61, 0x7a, 0x0,
// null terminator
0x0,
// --- end array ---
// null terminator
0x0,
}
var doc D
require.NoError(t, Unmarshal(before, &doc))
after, err := Marshal(doc)
require.NoError(t, err)
require.True(t, bytes.Equal(before, after))
}
func TestMarshal_roundtripFromDoc(t *testing.T) {
before := D{
{"foo", "bar"},
{"baz", int64(-27)},
{"bing", A{nil, primitive.Regex{Pattern: "word", Options: "i"}}},
}
b, err := Marshal(before)
require.NoError(t, err)
var after D
require.NoError(t, Unmarshal(b, &after))
if !cmp.Equal(after, before) {
t.Errorf("Documents to not match. got %v; want %v", after, before)
}
}
| {
reg = tc.reg
} |
build.go | // Package rest provides RESTful serialization of AWS requests and responses.
package rest
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"net/http"
"net/url"
"path"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
)
// RFC1123GMT is a RFC1123 (RFC822) formated timestame. This format is not
// using the standard library's time.RFC1123 due to the desire to always use
// GMT as the timezone.
const RFC1123GMT = "Mon, 2 Jan 2006 15:04:05 GMT"
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
var errValueNotSet = fmt.Errorf("value not set")
func init() {
for i := 0; i < len(noEscape); i++ {
// AWS expects every character except these to be escaped
noEscape[i] = (i >= 'A' && i <= 'Z') ||
(i >= 'a' && i <= 'z') ||
(i >= '0' && i <= '9') ||
i == '-' ||
i == '.' ||
i == '_' ||
i == '~'
}
}
// BuildHandler is a named request handler for building rest protocol requests
var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
// Build builds the REST component of a service request.
func Build(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v, false)
buildBody(r, v)
}
}
// BuildAsGET builds the REST component of a service request with the ability to hoist
// data from the body.
func BuildAsGET(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v, true)
buildBody(r, v)
}
}
func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) |
func buildBody(r *request.Request, v reflect.Value) {
if field, ok := v.Type().FieldByName("_"); ok {
if payloadName := field.Tag.Get("payload"); payloadName != "" {
pfield, _ := v.Type().FieldByName(payloadName)
if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
payload := reflect.Indirect(v.FieldByName(payloadName))
if payload.IsValid() && payload.Interface() != nil {
switch reader := payload.Interface().(type) {
case io.ReadSeeker:
r.SetReaderBody(reader)
case []byte:
r.SetBufferBody(reader)
case string:
r.SetStringBody(reader)
default:
r.Error = awserr.New("SerializationError",
"failed to encode REST request",
fmt.Errorf("unknown payload type %s", payload.Type()))
}
}
}
}
}
}
func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
str, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(name, str)
return nil
}
func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
prefix := tag.Get("locationName")
for _, key := range v.MapKeys() {
str, err := convertType(v.MapIndex(key), tag)
if err == errValueNotSet {
continue
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
header.Add(prefix+key.String(), str)
}
return nil
}
func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
value, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
return nil
}
func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
switch value := v.Interface().(type) {
case []*string:
for _, item := range value {
query.Add(name, *item)
}
case map[string]*string:
for key, item := range value {
query.Add(key, *item)
}
case map[string][]*string:
for key, items := range value {
for _, item := range items {
query.Add(key, *item)
}
}
default:
str, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
query.Set(name, str)
}
return nil
}
func cleanPath(u *url.URL) {
hasSlash := strings.HasSuffix(u.Path, "/")
// clean up path, removing duplicate `/`
u.Path = path.Clean(u.Path)
u.RawPath = path.Clean(u.RawPath)
if hasSlash && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
u.RawPath += "/"
}
}
// EscapePath escapes part of a URL path in Amazon style
func EscapePath(path string, encodeSep bool) string {
var buf bytes.Buffer
for i := 0; i < len(path); i++ {
c := path[i]
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
fmt.Fprintf(&buf, "%%%02X", c)
}
}
return buf.String()
}
func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
v = reflect.Indirect(v)
if !v.IsValid() {
return "", errValueNotSet
}
switch value := v.Interface().(type) {
case string:
str = value
case []byte:
str = base64.StdEncoding.EncodeToString(value)
case bool:
str = strconv.FormatBool(value)
case int64:
str = strconv.FormatInt(value, 10)
case float64:
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC1123GMT)
case aws.JSONValue:
if len(value) == 0 {
return "", errValueNotSet
}
escaping := protocol.NoEscape
if tag.Get("location") == "header" {
escaping = protocol.Base64Escape
}
str, err = protocol.EncodeJSONValue(value, escaping)
if err != nil {
return "", fmt.Errorf("unable to encode JSONValue, %v", err)
}
default:
err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
return "", err
}
return str, nil
}
| {
query := r.HTTPRequest.URL.Query()
// Setup the raw path to match the base path pattern. This is needed
// so that when the path is mutated a custom escaped version can be
// stored in RawPath that will be used by the Go client.
r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
for i := 0; i < v.NumField(); i++ {
m := v.Field(i)
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
continue
}
if m.IsValid() {
field := v.Type().Field(i)
name := field.Tag.Get("locationName")
if name == "" {
name = field.Name
}
if kind := m.Kind(); kind == reflect.Ptr {
m = m.Elem()
} else if kind == reflect.Interface {
if !m.Elem().IsValid() {
continue
}
}
if !m.IsValid() {
continue
}
if field.Tag.Get("ignore") != "" {
continue
}
var err error
switch field.Tag.Get("location") {
case "headers": // header maps
err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
case "header":
err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
case "uri":
err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
case "querystring":
err = buildQueryString(query, m, name, field.Tag)
default:
if buildGETQuery {
err = buildQueryString(query, m, name, field.Tag)
}
}
r.Error = err
}
if r.Error != nil {
return
}
}
r.HTTPRequest.URL.RawQuery = query.Encode()
if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
cleanPath(r.HTTPRequest.URL)
}
} |
local.strategy.ts | import { Strategy } from 'passport-local';
import { PassportStrategy } from '@nestjs/passport';
import { Injectable } from '@nestjs/common';
import { AuthenticationService } from './authentication.service';
import User from '../users/user.entity';
@Injectable()
export class | extends PassportStrategy(Strategy) {
constructor(private authenticationService: AuthenticationService) {
super({
usernameField: 'email'
});
}
async validate(email: string, password: string): Promise<User> {
return this.authenticationService.getAuthenticatedUser(email, password);
}
}
| LocalStrategy |
passport.js | const { authSecret } = require('../.env');
const passport = require('passport'); |
module.exports = app => {
const params = {
secretOrKey: authSecret,
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken()
};
const strategy = new Strategy(params, (payload, done) => {
app.db('users')
.where({ id: payload.id })
.first()
.then(user => done(null, user ? { ...payload } : false))
.catch(err => done(err, false))
});
passport.use(strategy)
return {
authenticate: () => passport.authenticate('jwt', { session: false })
}
} | const passportJwt = require('passport-jwt');
const { Strategy, ExtractJwt } = passportJwt; |
structural_mechanics_analysis_rom.py | import KratosMultiphysics
import KratosMultiphysics.RomApplication as romapp
import KratosMultiphysics.StructuralMechanicsApplication
from KratosMultiphysics.RomApplication.empirical_cubature_method import EmpiricalCubatureMethod
from KratosMultiphysics.RomApplication import python_solvers_wrapper_rom as solver_wrapper
from KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_analysis import StructuralMechanicsAnalysis
import json
import numpy as np
class StructuralMechanicsAnalysisROM(StructuralMechanicsAnalysis):
def __init__(self,model,project_parameters, hyper_reduction_element_selector = None):
super().__init__(model,project_parameters)
if hyper_reduction_element_selector != None :
if hyper_reduction_element_selector == "EmpiricalCubature":
self.hyper_reduction_element_selector = EmpiricalCubatureMethod()
self.time_step_residual_matrix_container = []
else:
err_msg = "The requested element selection method \"" + hyper_reduction_element_selector + "\" is not in the rom application\n"
err_msg += "Available options are: \"EmpiricalCubature\""
raise Exception(err_msg)
else:
self.hyper_reduction_element_selector = None
#### Internal functions ####
def _CreateSolver(self):
""" Create the Solver (and create and import the ModelPart if it is not alread in the model) """
## Solver construction
with open('RomParameters.json') as rom_parameters:
rom_settings = KratosMultiphysics.Parameters(rom_parameters.read())
self.project_parameters["solver_settings"].AddValue("rom_settings", rom_settings["rom_settings"])
return solver_wrapper.CreateSolverByParameters(self.model, self.project_parameters["solver_settings"],self.project_parameters["problem_data"]["parallel_type"].GetString())
def _GetSimulationName(self):
return "::[ROM Simulation]:: "
def ModifyAfterSolverInitialize(self):
"""Here is where the ROM_BASIS is imposed to each node"""
super().ModifyAfterSolverInitialize()
computing_model_part = self._solver.GetComputingModelPart()
with open('RomParameters.json') as f:
data = json.load(f)
nodal_dofs = len(data["rom_settings"]["nodal_unknowns"])
nodal_modes = data["nodal_modes"]
counter = 0
rom_dofs= self.project_parameters["solver_settings"]["rom_settings"]["number_of_rom_dofs"].GetInt()
for node in computing_model_part.Nodes:
aux = KratosMultiphysics.Matrix(nodal_dofs, rom_dofs)
for j in range(nodal_dofs):
Counter=str(node.Id)
for i in range(rom_dofs):
aux[j,i] = nodal_modes[Counter][j][i]
node.SetValue(romapp.ROM_BASIS, aux ) # ROM basis
counter+=1
if self.hyper_reduction_element_selector != None:
|
def FinalizeSolutionStep(self):
if self.hyper_reduction_element_selector != None:
if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
print('\n\n\n\nGenerating matrix of residuals')
ResMat = self.ResidualUtilityObject.GetResiduals()
NP_ResMat = np.array(ResMat, copy=False)
self.time_step_residual_matrix_container.append(NP_ResMat)
super().FinalizeSolutionStep()
def Finalize(self):
super().Finalize()
if self.hyper_reduction_element_selector != None:
if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
OriginalNumberOfElements = self._GetSolver().GetComputingModelPart().NumberOfElements()
ModelPartName = self._GetSolver().settings["model_import_settings"]["input_filename"].GetString()
self. hyper_reduction_element_selector.SetUp(self.time_step_residual_matrix_container, OriginalNumberOfElements, ModelPartName)
self.hyper_reduction_element_selector.Run()
| if self.hyper_reduction_element_selector.Name == "EmpiricalCubature":
self.ResidualUtilityObject = romapp.RomResidualsUtility(self._GetSolver().GetComputingModelPart(), self.project_parameters["solver_settings"]["rom_settings"], self._GetSolver().get_solution_scheme()) |
ngx-moderndatepicker.component.ts | import { Component, OnInit, Input, OnChanges, SimpleChanges, HostListener, forwardRef, ElementRef, ChangeDetectionStrategy } from '@angular/core';
import { NG_VALUE_ACCESSOR , ControlValueAccessor} from '@angular/forms';
import {
startOfMonth,
endOfMonth,
addMonths,
setYear,
eachDay,
getDate,
getMonth,
getYear,
isToday,
isSameDay,
isSameMonth,
isSameYear,
format,
getDay,
subDays,
setDay,
addYears,
subYears,
setMonth,
} from 'date-fns';
export type AddClass = string | string[] | { [k: string]: boolean } | null;
export interface ModernDatePickerOptions {
minYear?: number; // default: current year - 30
maxYear?: number; // default: current year + 30
displayFormat?: string; // default: 'MMM D[,] YYYY'
dayNamesFormat?: string; // default 'ddd'
monthNamesFormat?: string; // default 'MMM'
firstCalendarDay?: number; // 0 = Sunday (default), 1 = Monday, ..
locale?: object;
minDate?: Date;
maxDate?: Date;
/** Placeholder for the input field */
placeholder?: string;
/** [ngClass] to add to the input field */
addClass?: AddClass;
/** [ngStyle] to add to the input field */
addStyle?: { [k: string]: any } | null;
/** ID to assign to the input field */
fieldId?: string;
/** If false, barTitleIfEmpty will be disregarded and a date will always be shown. Default: true */
weekendsDay?: number[];
/** Sunday is 0 , Highlights the weekends with gray background**/
holidayList?: Array<Date>;
/** List of Holidays **/
}
// Counter for calculating the auto-incrementing field ID
let counter = 0;
/**
* Internal library helper that helps to check if value is empty
* @param value
*/
const isNil = (value: Date | ModernDatePickerOptions) => {
return (typeof value === 'undefined') || (value === null);
};
@Component({
selector: 'ngx-moderndatepicker',
templateUrl: 'ngx-moderndatepicker.component.html',
styleUrls: ['ngx-moderndatepicker.component.scss'],
changeDetection: ChangeDetectionStrategy.OnPush,
providers: [
{ provide: NG_VALUE_ACCESSOR, useExisting: forwardRef(() => NgxModerndatepickerComponent), multi: true }
]
})
export class | implements OnInit, OnChanges, ControlValueAccessor {
@Input() options: ModernDatePickerOptions;
/**
* Disable datepicker's input
*/
@Input() headless = false;
/**
* Set datepicker's visibility state
*/
@Input() isOpened = false;
/**
* Datepicker dropdown position
*/
@Input() position = 'bottom-right';
private positions = ['bottom-left', 'bottom-right', 'top-left', 'top-right'];
innerValue: Date;
displayValue: string;
displayFormat: string;
date: Date;
barTitle: string;
barTitleFormat: string;
barTitleIfEmpty: string;
minYear: number;
maxYear: number;
firstCalendarDay: number;
view: string;
years: { year: number; isThisYear: boolean }[];
dayNames: string[];
monthNames: Array<any>;
dayNamesFormat: string;
monthNamesFormat: string;
days: {
date: Date;
day: number;
month: number;
year: number;
inThisMonth: boolean;
isToday: boolean;
isSelected: boolean;
isSelectable: boolean;
isWeekend: boolean;
isHoliday: boolean;
}[];
locale: object;
placeholder: string;
addClass: AddClass;
addStyle: { [k: string]: any } | null;
fieldId: string;
disabled: boolean;
useEmptyBarTitle: boolean;
private onTouchedCallback: () => void = () => { };
private onChangeCallback: (_: any) => void = () => { };
public setDisabledState(isDisabled: boolean) {
this.disabled = isDisabled;
}
get value(): Date {
return this.innerValue;
}
set value(val: Date) {
this.innerValue = val;
this.onChangeCallback(this.innerValue);
}
constructor() {
}
ngOnInit() {
this.view = 'year';
if(!this.date) {
this.date = new Date();
}
this.setOptions();
this.initDayNames();
this.initYears();
this.initMonthName();
this.init();
// Check if 'position' property is correct
if (this.positions.indexOf(this.position) === -1) {
throw new TypeError(`ng-moderndatepicker: invalid position property value '${this.position}' (expected: ${this.positions.join(', ')})`);
}
}
ngOnChanges(changes: SimpleChanges) {
this.setOptions();
this.initDayNames();
this.init();
this.initYears();
this.initMonthName();
}
get defaultFieldId(): string {
// Only evaluate and increment if required
const value = `datepicker-${counter++}`;
Object.defineProperty(this, 'defaultFieldId', {value});
return value;
}
setOptions(): void {
const today = new Date(); // this const was added because during my tests, I noticed that at this level this.date is undefined
this.minYear = this.options && this.options.minYear || getYear(today) - 30;
this.maxYear = this.options && this.options.maxYear || getYear(today) + 30;
this.displayFormat = this.options && this.options.displayFormat || 'MMM D[,] YYYY';
this.barTitleFormat = 'YYYY';
this.dayNamesFormat = this.options && this.options.dayNamesFormat || 'ddd';
this.monthNamesFormat = this.options && this.options.monthNamesFormat || 'MMM';
this.barTitleIfEmpty = (new Date().getFullYear()).toString();
this.firstCalendarDay = this.options && this.options.firstCalendarDay || 0;
this.locale = this.options && { locale: this.options.locale } || {};
this.placeholder = this.options && this.options.placeholder || '';
this.addClass = this.options && this.options.addClass || {};
this.addStyle = this.options && this.options.addStyle || {};
this.fieldId = this.options && this.options.fieldId || this.defaultFieldId;
}
nextYear(): void {
this.date = addYears(this.date, 1);
this.barTitle = format(this.date, this.barTitleFormat, this.locale);
this.init();
this.initMonthName();
}
prevYear(): void {
this.date = subYears(this.date, 1);
this.barTitle = format(this.date, this.barTitleFormat, this.locale);
this.init();
this.initMonthName();
}
setDate(i: number): void {
this.date = this.days[i].date;
this.value = this.date;
this.init();
this.close();
}
setYear(i: number): void {
this.date = setYear(this.date, this.years[i].year);
this.init();
this.initMonthName();
this.view = 'year';
}
selectMonth(i: number): void {
this.date = setMonth(this.date,i);
this.init();
this.initMonthName();
this.view = 'year';
}
/**
* Checks if specified date is in range of min and max dates
* @param date
*/
private isDateSelectable(date: Date): boolean {
if (isNil(this.options)) {
return true;
}
const minDateSet = !isNil(this.options.minDate);
const maxDateSet = !isNil(this.options.maxDate);
const timestamp = date.valueOf();
if (minDateSet && (timestamp < this.options.minDate.valueOf())) {
return false;
}
if (maxDateSet && (timestamp > this.options.maxDate.valueOf())) {
return false;
}
return true;
}
private isWeekendDay (date: Date): boolean {
const weekendsDay = Array.isArray(this.options.weekendsDay);
if(weekendsDay) {
return this.options.weekendsDay.indexOf(getDay(date)) != -1 ? true : false;
}
return false;
}
private isHoliday (date: Date): boolean {
const areHolidays = Array.isArray(this.options.holidayList);
if(areHolidays) {
return (this.options.holidayList.filter((day)=> isSameDay(day,date))).length ? true : false;
}
return false;
}
init(): void {
// this.date may be null after .reset(); fall back to current date.
const actualDate = this.date || new Date();
const start = startOfMonth(actualDate);
const end = endOfMonth(actualDate);
this.days = eachDay(start, end).map(date => {
return {
date: date,
day: getDate(date),
month: getMonth(date),
year: getYear(date),
inThisMonth: true,
isToday: isToday(date),
isSelected: isSameDay(date, this.innerValue) && isSameMonth(date, this.innerValue) && isSameYear(date, this.innerValue),
isSelectable: this.isDateSelectable(date),
isWeekend: this.isWeekendDay(date),
isHoliday: this.isHoliday(date)
};
});
const tmp = getDay(start) - this.firstCalendarDay;
const prevDays = tmp < 0 ? 7 - this.firstCalendarDay : tmp;
for (let i = 1; i <= prevDays; i++) {
const date = subDays(start, i);
this.days.unshift({
date: date,
day: getDate(date),
month: getMonth(date),
year: getYear(date),
inThisMonth: false,
isToday: isToday(date),
isSelected: isSameDay(date, this.innerValue) && isSameMonth(date, this.innerValue) && isSameYear(date, this.innerValue),
isSelectable: this.isDateSelectable(date),
isWeekend : this.isWeekendDay(date),
isHoliday: this.isHoliday(date)
});
}
if (this.innerValue) {
this.displayValue = format(this.innerValue, this.displayFormat, this.locale);
this.barTitle = format(start, this.barTitleFormat, this.locale);
} else {
this.displayValue = '';
this.barTitle = this.useEmptyBarTitle ? this.barTitleIfEmpty : format(this.date, this.barTitleFormat, this.locale);
}
}
initYears(): void {
const range = this.maxYear - this.minYear;
this.years = Array.from(new Array(range), (x, i) => i + this.minYear).map(year => {
return {
year: year,
isThisYear: year === getYear(this.date),
isToday : year === getYear(new Date()),
isSelectable: this.isYearSelectable(year)
};
});
}
private isYearSelectable(date: any) : boolean {
const minDate = isNil(this.options.minDate) ? false : this.options.minDate;
const maxDate = isNil(this.options.maxDate) ? false : this.options.maxDate;
if ( minDate && maxDate ) {
return minDate.getFullYear() <= date && date <= maxDate.getFullYear();
} else if (minDate) {
return minDate.getFullYear() <= date;
} else if (maxDate) {
return date <= maxDate.getFullYear();
}
return true;
}
initDayNames(): void {
this.dayNames = [];
const start = this.firstCalendarDay;
for (let i = start; i <= 6 + start; i++) {
const date = setDay(new Date(), i);
this.dayNames.push(format(date, this.dayNamesFormat, this.locale));
}
}
initMonthName(): void {
let monthNames = [];
const actualDate = this.date || new Date();
let currentDate = new Date(actualDate);
const start = subYears(currentDate.setMonth(11),1);
for (let i = 1; i <= 12 ; i++) {
const date = addMonths(start, i);
monthNames.push({
name: format(date, this.monthNamesFormat, this.locale),
isSelected: date.getMonth() === actualDate.getMonth(),
isThisMonth: isSameMonth(date,new Date()) && isSameYear(actualDate,new Date()),
isSelectable: this.isMonthSelectable(date)
});
}
this.monthNames = monthNames;
}
private isMonthSelectable(date: Date): boolean {
const minDate = isNil(this.options.minDate) ? false : this.options.minDate;
const maxDate = isNil(this.options.maxDate) ? false : this.options.maxDate;
if ( minDate && maxDate ) {
if(minDate.getFullYear() < date.getFullYear() && date.getFullYear() < maxDate.getFullYear()){
return true;
} else if (minDate.getFullYear() < date.getFullYear() && date.getFullYear() == maxDate.getFullYear()){
if(date.getMonth() <= maxDate.getMonth()){
return true;
} else { return false;}
} else if (minDate.getFullYear() == date.getFullYear() && date.getFullYear() < maxDate.getFullYear()){
if(minDate.getMonth() <= date.getMonth()){
return true;
} else { return false;}
} else if (minDate.getFullYear() == date.getFullYear() && date.getFullYear() == maxDate.getFullYear()){
if(minDate.getMonth() <= date.getMonth() && date.getMonth() <= maxDate.getMonth()){
return true;
} else { return false;}
} else {
return false;
}
} else if (minDate) {
if(minDate.getFullYear() < date.getFullYear()){
return true;
} else if (minDate.getFullYear() == date.getFullYear()){
if(minDate.getMonth() <= date.getMonth()){
return true;
} else { return false;}
} else {
return false;
}
} else if (maxDate) {
if(date.getFullYear() < maxDate.getFullYear()){
return true;
} else if (date.getFullYear() == maxDate.getFullYear()){
if(date.getMonth() <= maxDate.getMonth()){
return true;
} else { return false;}
} else {
return false;
}
}
return true;
}
toggleView(): void {
this.view = this.view === 'year' ? 'years' : 'year';
}
toggle(): void {
this.isOpened = !this.isOpened;
if (!this.isOpened && this.view === 'years') {
this.toggleView();
}
}
close(): void {
this.isOpened = false;
if (this.view === 'years') {
this.toggleView();
}
}
reset(fireValueChangeEvent = false): void {
this.date = null;
this.innerValue = null;
this.init();
this.initMonthName();
if (fireValueChangeEvent && this.onChangeCallback) {
this.onChangeCallback(this.innerValue);
}
}
writeValue(val: Date) {
if (val) {
this.date = val;
this.innerValue = val;
this.init();
this.initMonthName();
this.displayValue = format(this.innerValue, this.displayFormat, this.locale);
this.barTitle = format(startOfMonth(val), this.barTitleFormat, this.locale);
}
}
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
@HostListener('document:click', ['$event']) onBlur(e: MouseEvent) {
if (!this.isOpened) {
return;
}
const input = document.querySelector('.ngx-moderndatepicker-input');
if (input == null) {
return;
}
if (e.target === input || input.contains(<any>e.target)) {
return;
}
const container = document.querySelector('.ngx-moderndatepicker-calendar-container');
if (container && container !== e.target && !container.contains(<any>e.target) && !(<any>e.target).classList.contains('year-unit') && !(<any>e.target).classList.contains('month-unit')) {
this.close();
}
}
}
| NgxModerndatepickerComponent |
0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-10 08:47
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_status', models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('id_required', 'Identification Required'), ('returns_required', 'Returns Completion Required'), ('amendment_required', 'Amendment Required'), ('id_and_amendment_required', 'Identification/Amendments Required'), ('id_and_returns_required', 'Identification/Returns Required'), ('returns_and_amendment_required', 'Returns/Amendments Required'), ('id_and_returns_and_amendment_required', 'Identification/Returns/Amendments Required'), ('approved', 'Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('correctness_disclaimer', models.BooleanField(default=False)),
('further_information_disclaimer', models.BooleanField(default=False)),
('lodgement_number', models.CharField(blank=True, default='', max_length=9)),
('lodgement_sequence', models.IntegerField(blank=True, default=0)),
('lodgement_date', models.DateField(blank=True, null=True)),
('processing_status', models.CharField(choices=[('draft', 'Draft'), ('new', 'New'), ('renewal', 'Renewal'), ('ready_for_action', 'Ready for Action'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('issued', 'Issued'), ('declined', 'Declined')], default='draft', max_length=30, verbose_name='Processing Status')),
('id_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'), ('updated', 'Updated'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Identification Check Status')),
('returns_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Return Check Status')),
('character_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Character Check Status')),
('review_status', models.CharField(choices=[('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'), ('accepted', 'Accepted')], default='not_reviewed', max_length=30, verbose_name='Review Status')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ApplicationLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('acceptance_status', models.CharField(choices=[('not_specified', 'Not Specified'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='not_specified', max_length=20, verbose_name='Acceptance Status')),
],
),
migrations.CreateModel(
name='AmendmentRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('requested', 'Requested'), ('amended', 'Amended')], default='requested', max_length=30, verbose_name='Status')),
('reason', models.CharField(choices=[('insufficient_detail', 'The information provided was insufficient'), ('missing_information', 'There was missing information'), ('other', 'Other')], default='insufficient_detail', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='Assessment',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('awaiting_assessment', 'Awaiting Assessment'), ('assessed', 'Assessed')], default='awaiting_assessment', max_length=20, verbose_name='Status')),
('comment', models.TextField(blank=True)),
('purpose', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='CustomLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=200, verbose_name='Subject / Description')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='EmailLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=500)),
('to', models.CharField(blank=True, max_length=500, verbose_name='To')),
('from_email', models.CharField(blank=True, max_length=200, verbose_name='From')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='IDRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('missing', 'There is currently no Photographic Identification uploaded'), ('expired', 'The current identification has expired'), ('not_recognised', 'The current identification is not recognised by the Department of Parks and Wildlife'), ('illegible', 'The current identification image is of poor quality and cannot be made out.'), ('other', 'Other')], default='missing', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='ReturnsRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('outstanding', 'There are currently outstanding returns for the previous licence'), ('other', 'Other')], default='outstanding', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
] |
|
db_containers.go | package main
import (
"database/sql"
"fmt"
"time"
"github.com/lxc/lxd/shared"
log "gopkg.in/inconshreveable/log15.v2"
)
type containerType int
const (
cTypeRegular containerType = 0
cTypeSnapshot containerType = 1
)
func dbContainerRemove(db *sql.DB, name string) error {
id, err := dbContainerId(db, name)
if err != nil {
return err
}
tx, err := dbBegin(db)
if err != nil {
return err
}
err = dbContainerConfigClear(tx, id)
if err != nil {
tx.Rollback()
return err
}
| if err != nil {
tx.Rollback()
return err
}
return txCommit(tx)
}
func dbContainerName(db *sql.DB, id int) (string, error) {
q := "SELECT name FROM containers WHERE id=?"
name := ""
arg1 := []interface{}{id}
arg2 := []interface{}{&name}
err := dbQueryRowScan(db, q, arg1, arg2)
return name, err
}
func dbContainerId(db *sql.DB, name string) (int, error) {
q := "SELECT id FROM containers WHERE name=?"
id := -1
arg1 := []interface{}{name}
arg2 := []interface{}{&id}
err := dbQueryRowScan(db, q, arg1, arg2)
return id, err
}
func dbContainerGet(db *sql.DB, name string) (containerArgs, error) {
args := containerArgs{}
args.Name = name
ephemInt := -1
q := "SELECT id, architecture, type, ephemeral, creation_date FROM containers WHERE name=?"
arg1 := []interface{}{name}
arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype, &ephemInt, &args.CreationDate}
err := dbQueryRowScan(db, q, arg1, arg2)
if err != nil {
return args, err
}
if args.Id == -1 {
return args, fmt.Errorf("Unknown container")
}
if ephemInt == 1 {
args.Ephemeral = true
}
config, err := dbContainerConfig(db, args.Id)
if err != nil {
return args, err
}
args.Config = config
profiles, err := dbContainerProfiles(db, args.Id)
if err != nil {
return args, err
}
args.Profiles = profiles
/* get container_devices */
args.Devices = shared.Devices{}
newdevs, err := dbDevices(db, name, false)
if err != nil {
return args, err
}
for k, v := range newdevs {
args.Devices[k] = v
}
return args, nil
}
func dbContainerCreate(db *sql.DB, args containerArgs) (int, error) {
id, err := dbContainerId(db, args.Name)
if err == nil {
return 0, DbErrAlreadyDefined
}
tx, err := dbBegin(db)
if err != nil {
return 0, err
}
ephemInt := 0
if args.Ephemeral == true {
ephemInt = 1
}
args.CreationDate = time.Now().UTC()
str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date) VALUES (?, ?, ?, ?, ?)")
stmt, err := tx.Prepare(str)
if err != nil {
tx.Rollback()
return 0, err
}
defer stmt.Close()
result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix())
if err != nil {
tx.Rollback()
return 0, err
}
id64, err := result.LastInsertId()
if err != nil {
tx.Rollback()
return 0, fmt.Errorf("Error inserting %s into database", args.Name)
}
// TODO: is this really int64? we should fix it everywhere if so
id = int(id64)
if err := dbContainerConfigInsert(tx, id, args.Config); err != nil {
tx.Rollback()
return 0, err
}
if err := dbContainerProfilesInsert(tx, id, args.Profiles); err != nil {
tx.Rollback()
return 0, err
}
if err := dbDevicesAdd(tx, "container", int64(id), args.Devices); err != nil {
tx.Rollback()
return 0, err
}
return id, txCommit(tx)
}
func dbContainerConfigClear(tx *sql.Tx, id int) error {
_, err := tx.Exec("DELETE FROM containers_config WHERE container_id=?", id)
if err != nil {
return err
}
_, err = tx.Exec("DELETE FROM containers_profiles WHERE container_id=?", id)
if err != nil {
return err
}
_, err = tx.Exec(`DELETE FROM containers_devices_config WHERE id IN
(SELECT containers_devices_config.id
FROM containers_devices_config JOIN containers_devices
ON containers_devices_config.container_device_id=containers_devices.id
WHERE containers_devices.container_id=?)`, id)
if err != nil {
return err
}
_, err = tx.Exec("DELETE FROM containers_devices WHERE container_id=?", id)
return err
}
func dbContainerConfigInsert(tx *sql.Tx, id int, config map[string]string) error {
str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)"
stmt, err := tx.Prepare(str)
if err != nil {
return err
}
defer stmt.Close()
for k, v := range config {
_, err := stmt.Exec(id, k, v)
if err != nil {
shared.Debugf("Error adding configuration item %s = %s to container %d",
k, v, id)
return err
}
}
return nil
}
func dbContainerConfigRemove(db *sql.DB, id int, name string) error {
_, err := dbExec(db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id)
return err
}
func dbContainerProfilesInsert(tx *sql.Tx, id int, profiles []string) error {
applyOrder := 1
str := `INSERT INTO containers_profiles (container_id, profile_id, apply_order) VALUES
(?, (SELECT id FROM profiles WHERE name=?), ?);`
stmt, err := tx.Prepare(str)
if err != nil {
return err
}
defer stmt.Close()
for _, p := range profiles {
_, err = stmt.Exec(id, p, applyOrder)
if err != nil {
shared.Debugf("Error adding profile %s to container: %s",
p, err)
return err
}
applyOrder = applyOrder + 1
}
return nil
}
// Get a list of profiles for a given container id.
func dbContainerProfiles(db *sql.DB, containerId int) ([]string, error) {
var name string
var profiles []string
query := `
SELECT name FROM containers_profiles
JOIN profiles ON containers_profiles.profile_id=profiles.id
WHERE container_id=?
ORDER BY containers_profiles.apply_order`
inargs := []interface{}{containerId}
outfmt := []interface{}{name}
results, err := dbQueryScan(db, query, inargs, outfmt)
if err != nil {
return nil, err
}
for _, r := range results {
name = r[0].(string)
profiles = append(profiles, name)
}
return profiles, nil
}
// dbContainerConfig gets the container configuration map from the DB
func dbContainerConfig(db *sql.DB, containerId int) (map[string]string, error) {
var key, value string
q := `SELECT key, value FROM containers_config WHERE container_id=?`
inargs := []interface{}{containerId}
outfmt := []interface{}{key, value}
// Results is already a slice here, not db Rows anymore.
results, err := dbQueryScan(db, q, inargs, outfmt)
if err != nil {
return nil, err //SmartError will wrap this and make "not found" errors pretty
}
config := map[string]string{}
for _, r := range results {
key = r[0].(string)
value = r[1].(string)
config[key] = value
}
return config, nil
}
func dbContainersList(db *sql.DB, cType containerType) ([]string, error) {
q := fmt.Sprintf("SELECT name FROM containers WHERE type=? ORDER BY name")
inargs := []interface{}{cType}
var container string
outfmt := []interface{}{container}
result, err := dbQueryScan(db, q, inargs, outfmt)
if err != nil {
return nil, err
}
var ret []string
for _, container := range result {
ret = append(ret, container[0].(string))
}
return ret, nil
}
func dbContainerRename(db *sql.DB, oldName string, newName string) error {
tx, err := dbBegin(db)
if err != nil {
return err
}
str := fmt.Sprintf("UPDATE containers SET name = ? WHERE name = ?")
stmt, err := tx.Prepare(str)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
shared.Log.Debug(
"Calling SQL Query",
log.Ctx{
"query": "UPDATE containers SET name = ? WHERE name = ?",
"oldName": oldName,
"newName": newName})
if _, err := stmt.Exec(newName, oldName); err != nil {
tx.Rollback()
return err
}
return txCommit(tx)
}
func dbContainerUpdate(tx *sql.Tx, id int, architecture int, ephemeral bool) error {
str := fmt.Sprintf("UPDATE containers SET architecture=?, ephemeral=? WHERE id=?")
stmt, err := tx.Prepare(str)
if err != nil {
return err
}
defer stmt.Close()
ephemeralInt := 0
if ephemeral {
ephemeralInt = 1
}
if _, err := stmt.Exec(architecture, ephemeralInt, id); err != nil {
return err
}
return nil
}
func dbContainerGetSnapshots(db *sql.DB, name string) ([]string, error) {
result := []string{}
regexp := name + shared.SnapshotDelimiter
length := len(regexp)
q := "SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?"
inargs := []interface{}{cTypeSnapshot, length, regexp}
outfmt := []interface{}{name}
dbResults, err := dbQueryScan(db, q, inargs, outfmt)
if err != nil {
return result, err
}
for _, r := range dbResults {
result = append(result, r[0].(string))
}
return result, nil
} | _, err = tx.Exec("DELETE FROM containers WHERE id=?", id) |
version.ts | // Copyright (c) Jan Freyberg
// Distributed under the terms of the Modified BSD License.
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore
// eslint-disable-next-line @typescript-eslint/no-var-requires
const data = require('../package.json');
/**
* The _model_module_version/_view_module_version this package implements.
*
* The html widget manager assumes that this is the same as the npm package
* version number. | */
export const MODULE_VERSION = data.version;
/*
* The current package name.
*/
export const MODULE_NAME = data.name; | |
optimizer.go | // Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"context"
"math"
"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/auth"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/lock"
"github.com/pingcap/tidb/planner/property"
"github.com/pingcap/tidb/privilege"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/set"
"go.uber.org/atomic"
)
// OptimizeAstNode optimizes the query to a physical plan directly.
var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error)
// AllowCartesianProduct means whether tidb allows cartesian join without equal conditions.
var AllowCartesianProduct = atomic.NewBool(true)
const (
flagGcSubstitute uint64 = 1 << iota
flagPrunColumns
flagBuildKeyInfo
flagDecorrelate
flagEliminateAgg
flagEliminateProjection
flagMaxMinEliminate
flagPredicatePushDown
flagEliminateOuterJoin
flagPartitionProcessor
flagPushDownAgg
flagPushDownTopN
flagJoinReOrder
flagPrunColumnsAgain
)
var optRuleList = []logicalOptRule{
&gcSubstituter{},
&columnPruner{},
&buildKeySolver{},
&decorrelateSolver{},
&aggregationEliminator{},
&projectionEliminator{},
&maxMinEliminator{},
&ppdSolver{},
&outerJoinEliminator{},
&partitionProcessor{},
&aggregationPushDownSolver{},
&pushDownTopNOptimizer{},
&joinReOrderSolver{},
&columnPruner{}, // column pruning again at last, note it will mess up the results of buildKeySolver
}
// logicalOptRule means a logical optimizing rule, which contains decorrelate, ppd, column pruning, etc.
type logicalOptRule interface {
optimize(context.Context, LogicalPlan) (LogicalPlan, error)
name() string
}
// BuildLogicalPlan used to build logical plan from ast.Node.
func BuildLogicalPlan(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) {
sctx.GetSessionVars().PlanID = 0
sctx.GetSessionVars().PlanColumnID = 0
builder := NewPlanBuilder(sctx, is, &BlockHintProcessor{})
p, err := builder.Build(ctx, node)
if err != nil {
return nil, nil, err
}
return p, p.OutputNames(), err
}
// CheckPrivilege checks the privilege for a user.
func CheckPrivilege(activeRoles []*auth.RoleIdentity, pm privilege.Manager, vs []visitInfo) error {
for _, v := range vs {
if !pm.RequestVerification(activeRoles, v.db, v.table, v.column, v.privilege) {
if v.err == nil {
return ErrPrivilegeCheckFail
}
return v.err
}
}
return nil
}
// CheckTableLock checks the table lock.
func CheckTableLock(ctx sessionctx.Context, is infoschema.InfoSchema, vs []visitInfo) error {
if !config.TableLockEnabled() {
return nil
}
checker := lock.NewChecker(ctx, is)
for i := range vs {
err := checker.CheckTableLock(vs[i].db, vs[i].table, vs[i].privilege)
if err != nil {
return err
}
}
return nil
}
// DoOptimize optimizes a logical plan to a physical plan.
func | (ctx context.Context, flag uint64, logic LogicalPlan) (PhysicalPlan, float64, error) {
logic, err := logicalOptimize(ctx, flag, logic)
if err != nil {
return nil, 0, err
}
if !AllowCartesianProduct.Load() && existsCartesianProduct(logic) {
return nil, 0, errors.Trace(ErrCartesianProductUnsupported)
}
physical, cost, err := physicalOptimize(logic)
if err != nil {
return nil, 0, err
}
finalPlan := postOptimize(physical)
return finalPlan, cost, nil
}
func postOptimize(plan PhysicalPlan) PhysicalPlan {
plan = eliminatePhysicalProjection(plan)
plan = injectExtraProjection(plan)
return plan
}
func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (LogicalPlan, error) {
var err error
for i, rule := range optRuleList {
// The order of flags is same as the order of optRule in the list.
// We use a bitmask to record which opt rules should be used. If the i-th bit is 1, it means we should
// apply i-th optimizing rule.
if flag&(1<<uint(i)) == 0 || isLogicalRuleDisabled(rule) {
continue
}
logic, err = rule.optimize(ctx, logic)
if err != nil {
return nil, err
}
}
return logic, err
}
func isLogicalRuleDisabled(r logicalOptRule) bool {
disabled := DefaultDisabledLogicalRulesList.Load().(set.StringSet).Exist(r.name())
return disabled
}
func physicalOptimize(logic LogicalPlan) (PhysicalPlan, float64, error) {
if _, err := logic.recursiveDeriveStats(); err != nil {
return nil, 0, err
}
preparePossibleProperties(logic)
prop := &property.PhysicalProperty{
TaskTp: property.RootTaskType,
ExpectedCnt: math.MaxFloat64,
}
t, err := logic.findBestTask(prop)
if err != nil {
return nil, 0, err
}
if t.invalid() {
return nil, 0, ErrInternal.GenWithStackByArgs("Can't find a proper physical plan for this query")
}
err = t.plan().ResolveIndices()
return t.plan(), t.cost(), err
}
func existsCartesianProduct(p LogicalPlan) bool {
if join, ok := p.(*LogicalJoin); ok && len(join.EqualConditions) == 0 {
return join.JoinType == InnerJoin || join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin
}
for _, child := range p.Children() {
if existsCartesianProduct(child) {
return true
}
}
return false
}
// DefaultDisabledLogicalRulesList indicates the logical rules which should be banned.
var DefaultDisabledLogicalRulesList *atomic.Value
func init() {
expression.EvalAstExpr = evalAstExpr
DefaultDisabledLogicalRulesList = new(atomic.Value)
DefaultDisabledLogicalRulesList.Store(set.NewStringSet())
}
| DoOptimize |
ps.go | package commands
import (
"bytes"
"fmt"
"io"
"os/exec"
"regexp"
"strings"
"sync"
"github.com/reyahsolutions/orchestra/services"
"github.com/urfave/cli/v2"
"github.com/wsxiaoys/terminal"
)
var PsCommand = &cli.Command{
Name: "ps",
Usage: "Outputs the status of all services",
Action: BeforeAfterWrapper(PsAction),
}
// PsAction checks the status for every service and output
func PsAction(c *cli.Context) error {
svcs := services.Sort(FilterServices(c))
var wg sync.WaitGroup
for _, svc := range svcs {
wg.Add(1)
go func(s *services.Service) {
s.Ports = getPorts(s)
wg.Done()
}(svc)
}
wg.Wait()
for _, service := range svcs {
spacing := strings.Repeat(" ", services.MaxServiceNameLength+2-len(service.Name))
if service.Process != nil {
terminal.Stdout.Colorf("@{g}%s", service.Name).Reset().Colorf("%s|", spacing).Print(" running ").Colorf(" %d %s\n", service.Process.Pid, service.Ports)
} else {
terminal.Stdout.Colorf("@{r}%s", service.Name).Reset().Colorf("%s|", spacing).Reset().Print(" aborted\n")
}
}
return nil
}
func getPorts(service *services.Service) string | {
if service.Process == nil {
return ""
}
re := regexp.MustCompile("LISTEN")
cmd := exec.Command("lsof", "-P", "-p", fmt.Sprintf("%d", service.Process.Pid))
output := bytes.NewBuffer([]byte{})
cmd.Stdout = output
cmd.Stderr = output
err := cmd.Run()
if err != nil {
return fmt.Sprintf("error: %v", err)
}
lsofOutput := ""
for {
s, err := output.ReadString('\n')
if err == io.EOF {
break
}
matched := re.MatchString(s)
if matched {
fields := strings.Fields(s)
lsofOutput += fmt.Sprintf("%s/%s ", fields[8], strings.ToLower(fields[7]))
}
}
return lsofOutput
} |
|
bigfloat.go | package bigfloat
import (
"database/sql/driver"
"errors"
"fmt"
"math/big"
"strconv"
"strings"
"github.com/totoval/framework/model/types/bigint"
)
// These constants define supported rounding modes.
const (
ToNearestEven big.RoundingMode = iota // == IEEE 754-2008 roundTiesToEven
ToNearestAway // == IEEE 754-2008 roundTiesToAway
ToZero // == IEEE 754-2008 roundTowardZero
AwayFromZero // no IEEE 754-2008 equivalent
ToNegativeInf // == IEEE 754-2008 roundTowardNegative
ToPositiveInf // == IEEE 754-2008 roundTowardPositive
)
type BigFloat struct {
_bf big.Float
normalCount uint
decimalCount uint
}
const AutoPrec = 512 // 256 -> decimal 32 512 -> decimal 78
func Zero() *BigFloat {
zero := &BigFloat{}
_ = zero.CreateFromString("0", ToNearestEven)
return zero
}
func (bf *BigFloat) Convert(f *big.Float) error {
// int(f.Prec()) uint to int may cause precision loss
prec := f.Prec()
if prec > big.MaxExp {
return errors.New("precision is too large, may cause precision loss")
}
return bf.CreateFromString(f.Text('f', int(prec)), ToNearestEven)
}
func (bf *BigFloat) Float() *big.Float {
return &bf._bf
}
func (bf *BigFloat) BF() big.Float {
return bf._bf
}
func (bf BigFloat) Value() (driver.Value, error) {
// debug.Dump(bf._bf.Prec(), bf.Text('f', 100), bf.String())
return []byte(bf.String()), nil
}
func (bf *BigFloat) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return bf.scanBytes(src)
case string:
return bf.scanBytes([]byte(src))
case nil:
bf = nil
return nil
default:
return fmt.Errorf("pq: cannot convert %T to BigFloat", src)
}
}
func (bf *BigFloat) scanBytes(src []byte) error {
return bf.CreateFromString(string(src), ToNearestEven)
}
func (bf *BigFloat) String() string {
// result := bf.Text('f', int(bf.Prec()))
//
// switch bf.Acc() {
// case big.Above:
// for i := bf.Prec(); i > 0; i-- {
// result = bf.Text('f', int(i))
// if bf.Acc() == big.Exact {
// break
// }
// }
// break
// case big.Below:
// for i := uint(0); i <= bf.Prec(); i++ {
// result = bf.Text('f', int(i))
// if bf.Acc() == big.Exact {
// break
// }
// }
// break
// case big.Exact:
// break
// }
//
// trimResult := strings.TrimRight(result, "0")
//
// if trimResult[len(trimResult)-1:] == "." {
// trimResult = trimResult[:len(trimResult)-1]
// }
//
// return trimResult
result := bf._bf.Text('f', int(bf.decimalCount)/2)
trimResult := result
if strings.Contains(result, ".") {
trimResult = strings.TrimRight(result, "0")
if trimResult[len(trimResult)-1:] == "." {
trimResult = trimResult[:len(trimResult)-1]
}
}
return trimResult
}
func (bf *BigFloat) SetInt(i *bigint.BigInt, mode big.RoundingMode) error {
return bf.CreateFromString(i.String(), mode)
}
func (bf *BigFloat) setDecimal(d uint) { // @todo 0 is infinity
bf.decimalCount = d * 2
}
func (bf *BigFloat) Copy(newBf *BigFloat) error {
return newBf.CreateFromString(bf.String(), bf._bf.Mode()) //@todo accuracy above, may caused by String func()
}
type RoundType byte
const (
RoundUpAlways RoundType = iota
RoundDown
RoundUpAuto
)
func createCarry(lastDecimal uint, newDecimalPartPlusStr string) (*BigFloat, error) {
decimal := len(newDecimalPartPlusStr)
carryLastDecimal := uint(0)
if lastDecimal > 0 {
carryLastDecimal = 10 - lastDecimal
} else {
carryLastDecimal = 0
}
// tmp := ""
// if lastDecimal == 0{
// tmp = newDecimalPartPlusStr
// }else{
// tmp =
// }
// newDecimalPartPlusStr[:len(newDecimalPartPlusStr)-1]
// var newDecimalPartPlus BigFloat
// err := newDecimalPartPlus.CreateFromString(newDecimalPartPlusStr, ToNearestEven)
// if err != nil {
// return nil, err
// }
carryStr := "0."
for i := 0; i < decimal; i++ {
if i == decimal-1 {
carryStr += fmt.Sprintf("%d", carryLastDecimal)
} else {
carryStr += "0"
}
}
var carry BigFloat
if err := carry.CreateFromString(carryStr, ToNearestEven); err != nil {
return nil, err
}
return &carry, nil
}
func (bf *BigFloat) Round(decimal uint, roundType RoundType) (*BigFloat, error) {
var bfCopy BigFloat
if err := bf.Copy(&bfCopy); err != nil {
return nil, err
}
if decimal <= 64 {
bfCopy.decimalCount = 64
} else {
bfCopy.decimalCount = AutoPrec / 2
}
parts := strings.Split(bfCopy.String(), ".")
normalPart := ""
decimalPart := ""
if len(parts) == 1 {
normalPart = parts[0]
decimalPart = ""
bfCopy.setDecimal(0)
} else if len(parts) == 2 {
normalPart = parts[0]
decimalPart = parts[1]
} else {
return nil, errors.New("cannot parse " + bfCopy.String())
}
// if provide decimal is greater than the real decimal, then there isn't any precision problem, so directly return
if int(decimal) >= len(decimalPart) {
return &bfCopy, nil
}
result := &BigFloat{}
var err error
// check is greater than 0
if bfCopy.Cmp(*Zero()) < 0 {
//return nil, errors.New("currently not support for number smaller than 0")
//@todo small than 0
result, err = smallerThanZero(decimalPart, normalPart, decimal, roundType)
} else {
result, err = greaterOrEqualThanZero(decimalPart, normalPart, decimal, roundType)
}
if err != nil {
return nil, err
}
// result.setDecimal(decimal)
if err := result.CreateFromString(result.String(), ToNearestEven); err != nil {
return nil, err
}
return result, nil
}
func greaterOrEqualThanZero(decimalPart string, normalPart string, decimal uint, roundType RoundType) (*BigFloat, error) |
func smallerThanZero(decimalPart string, normalPart string, decimal uint, roundType RoundType) (*BigFloat, error) {
//debug.Dump(normalPart, decimalPart, decimal, roundType)
// -123.12345
// decimal 0
newDecimalPart := decimalPart[:decimal] // ""
lastDecimalStr := decimalPart[decimal : decimal+1] // 1
//debug.Dump(lastDecimalStr, newDecimalPart)
lastDecimal, err := strconv.ParseUint(lastDecimalStr, 10, 32) // 1
if err != nil {
return nil, err
}
newDecimalPartPlus := newDecimalPart + lastDecimalStr // 1
// create roundDownPlus with RoundDown decimal + 1 decimal = 2 1000.1234 => 1000.123
roundUpPlusStr := normalPart + "." + newDecimalPartPlus // -123.1
var roundUpPlus BigFloat
if err := roundUpPlus.CreateFromString(roundUpPlusStr, ToNearestEven); err != nil {
return nil, err
}
//debug.Dump(roundDownPlusStr)
// create roundUp with RoundUp decimal = 2 1000.123 => 1000.12
roundUpStr := normalPart + "." + newDecimalPart // -123.
var roundUp BigFloat
if err := roundUp.CreateFromString(roundUpStr, ToNearestEven); err != nil {
return nil, err
}
//debug.Dump(roundUp)
// create carry
carry, err := createCarry(uint(lastDecimal), newDecimalPartPlus)
// debug.Dump(lastDecimal, newDecimalPartPlus, carry)
if err != nil {
return nil, err
}
result := &BigFloat{}
switch roundType {
case RoundUpAlways:
result = &roundUp
break
case RoundUpAuto:
if lastDecimal <= 5 {
result = &roundUp
} else {
result.Sub(roundUpPlus, *carry)
}
break
case RoundDown:
if lastDecimal > 0 {
result.Sub(roundUpPlus, *carry)
} else {
result = &roundUp
}
break
default:
return nil, errors.New("unknown roundType")
}
return result, nil
}
func (bf *BigFloat) Ceil() (*BigFloat, error) {
return bf.Round(0, RoundUpAlways)
}
func (bf *BigFloat) Floor() (*BigFloat, error) {
return bf.Round(0, RoundDown)
}
func (bf *BigFloat) CreateFromString(s string, mode big.RoundingMode) error {
// parse number string
parts := strings.Split(s, ".")
if len(parts) == 1 {
// There is no decimal point, we can just parse the original string as
// an int
bf.normalCount = uint(len(parts[0])) * 2
bf.setDecimal(0)
} else if len(parts) == 2 {
// strip the insignificant digits for more accurate comparisons.
decimalPart := strings.TrimRight(parts[1], "0")
bf.normalCount = uint(len(parts[0])) * 2
bf.setDecimal(uint(len(decimalPart)))
} else {
return errors.New("can't convert " + s + " to decimal")
}
// string to BigFloat
// _bf, _, err := big.ParseFloat(s, 10, bf.normalCount*2+bf.decimalCount*2+8, mode)
_bf, _, err := big.ParseFloat(s, 10, AutoPrec, mode)
// _bf, _, err := big.ParseFloat(s, 10, 2, mode)
if err != nil {
return err
}
bf._bf = *_bf
return nil
// tmp := &big.Float{}
// // _, _, err := tmp.Parse(s, 10)
// // tmp, _, err := big.ParseFloat(s, 10, bf.normalCount*2+bf.decimalCount*2+8, mode)
// tmp, _, err := big.ParseFloat(s, 10, 168, mode)
// if err != nil {
// return err
// }
// fmt.Println(tmp.Acc())
// bf._bf = *tmp
// bf.SetPrec(prec).SetMode(mode)
// _, err := fmt.Sscan(s, &bf._bf)
// return err
}
// @todo xml protobuf ...
func (bf BigFloat) MarshalJSON() ([]byte, error) {
// fix https://github.com/golang/go/issues/20651
return []byte(`"` + bf.String() + `"`), nil
}
func (bf *BigFloat) UnmarshalJSON(src []byte) error {
return bf.scanBytes(src)
}
func (bf *BigFloat) UnmarshalBinary(data []byte) error {
return bf.scanBytes(data)
}
func (bf BigFloat) MarshalBinary() (data []byte, err error) {
return []byte(bf.String()), nil
}
func (bf *BigFloat) useBiggerDecimal(a BigFloat, b BigFloat) {
if a.decimalCount > b.decimalCount {
bf.decimalCount = a.decimalCount
} else {
bf.decimalCount = b.decimalCount
}
if a.normalCount > b.normalCount {
bf.normalCount = a.normalCount
} else {
bf.normalCount = b.normalCount
}
}
func (bf *BigFloat) mergeDecimal(a BigFloat, b BigFloat) {
bf.decimalCount = a.decimalCount + b.decimalCount
}
func (bf *BigFloat) mergeDecimalDiv(a BigFloat, b BigFloat, isInf ...bool) {
decimalA := a.decimalCount
decimalB := b.decimalCount
if len(isInf) > 0 {
if isInf[0] {
bf.decimalCount = AutoPrec / 2
return
}
}
if decimalA == 0 && decimalB == 0 {
// may be infinitive
bf.decimalCount = AutoPrec / 2
return
}
if decimalA == 0 {
decimalA = 1
}
if decimalB == 0 {
decimalB = 1
}
bf.decimalCount = decimalA * decimalB
return
}
//@todo calc pointer param
func (bf *BigFloat) Add(a BigFloat, b BigFloat) {
bf.useBiggerDecimal(a, b)
bf._bf.Add(&a._bf, &b._bf)
}
func (bf *BigFloat) Sub(a BigFloat, b BigFloat) {
bf.useBiggerDecimal(a, b)
bf._bf.Sub(&a._bf, &b._bf)
}
func (bf *BigFloat) Mul(a BigFloat, b BigFloat) {
bf.mergeDecimal(a, b)
bf._bf.Mul(&a._bf, &b._bf)
}
func (bf *BigFloat) Div(a BigFloat, b BigFloat, isInf ...bool) {
bf.mergeDecimalDiv(a, b, isInf...)
bf._bf.Quo(&a._bf, &b._bf)
}
func (bf *BigFloat) Abs(a BigFloat) {
bf._bf.Abs(&a._bf)
}
func (bf *BigFloat) Cmp(a BigFloat) int {
return bf._bf.Cmp(&a._bf)
}
//
// func main(){
// a := BigFloat{}
// a.SetString("10", 10)
// b := BigFloat{}
// b.SetString("11", 10)
// c := BigFloat{}
// c.Add(&a.BF, &b.BF)
// }
| {
newDecimalPart := decimalPart[:decimal]
lastDecimalStr := decimalPart[decimal : decimal+1]
lastDecimal, err := strconv.ParseUint(lastDecimalStr, 10, 32)
if err != nil {
return nil, err
}
newDecimalPartPlus := newDecimalPart + lastDecimalStr
// create roundDownPlus with RoundDown decimal + 1 decimal = 2 1000.1234 => 1000.123
roundDownPlusStr := normalPart + "." + newDecimalPartPlus
var roundDownPlus BigFloat
if err := roundDownPlus.CreateFromString(roundDownPlusStr, ToNearestEven); err != nil {
return nil, err
}
// create roundDown with RoundDown decimal = 2 1000.123 => 1000.12
roundDownStr := normalPart + "." + newDecimalPart
var roundDown BigFloat
if err := roundDown.CreateFromString(roundDownStr, ToNearestEven); err != nil {
return nil, err
}
// create carry
carry, err := createCarry(uint(lastDecimal), newDecimalPartPlus)
if err != nil {
return nil, err
}
result := &BigFloat{}
switch roundType {
case RoundUpAlways:
if lastDecimal > 0 {
result.Add(roundDownPlus, *carry)
} else {
result = &roundDown
}
break
case RoundUpAuto:
if lastDecimal >= 5 {
result.Add(roundDownPlus, *carry)
} else {
result = &roundDown
}
break
case RoundDown:
result = &roundDown
break
default:
return nil, errors.New("unknown roundType")
}
return result, nil
} |
cloudsearch.go | // THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package cloudsearch provides a client for Amazon CloudSearch.
package cloudsearch
import (
"net/http"
"time"
"github.com/hashicorp/aws-sdk-go/aws"
"github.com/hashicorp/aws-sdk-go/gen/endpoints"
)
import (
"encoding/xml"
"io"
)
// CloudSearch is a client for Amazon CloudSearch.
type CloudSearch struct {
client *aws.QueryClient
}
// New returns a new CloudSearch client.
func New(creds aws.CredentialsProvider, region string, client *http.Client) *CloudSearch |
// BuildSuggesters indexes the search suggestions. For more information,
// see Configuring Suggesters in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) BuildSuggesters(req *BuildSuggestersRequest) (resp *BuildSuggestersResult, err error) {
resp = &BuildSuggestersResult{}
err = c.client.Do("BuildSuggesters", "POST", "/", req, resp)
return
}
// CreateDomain creates a new search domain. For more information, see
// Creating a Search Domain in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) CreateDomain(req *CreateDomainRequest) (resp *CreateDomainResult, err error) {
resp = &CreateDomainResult{}
err = c.client.Do("CreateDomain", "POST", "/", req, resp)
return
}
// DefineAnalysisScheme configures an analysis scheme that can be applied
// to a text or text-array field to define language-specific text
// processing options. For more information, see Configuring Analysis
// Schemes in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DefineAnalysisScheme(req *DefineAnalysisSchemeRequest) (resp *DefineAnalysisSchemeResult, err error) {
resp = &DefineAnalysisSchemeResult{}
err = c.client.Do("DefineAnalysisScheme", "POST", "/", req, resp)
return
}
// DefineExpression configures an Expression for the search domain. Used to
// create new expressions and modify existing ones. If the expression
// exists, the new configuration replaces the old one. For more
// information, see Configuring Expressions in the Amazon CloudSearch
// Developer Guide
func (c *CloudSearch) DefineExpression(req *DefineExpressionRequest) (resp *DefineExpressionResult, err error) {
resp = &DefineExpressionResult{}
err = c.client.Do("DefineExpression", "POST", "/", req, resp)
return
}
// DefineIndexField configures an IndexField for the search domain. Used to
// create new fields and modify existing ones. You must specify the name of
// the domain you are configuring and an index field configuration. The
// index field configuration specifies a unique name, the index field type,
// and the options you want to configure for the field. The options you can
// specify depend on the IndexFieldType . If the field exists, the new
// configuration replaces the old one. For more information, see
// Configuring Index Fields in the Amazon CloudSearch Developer Guide .
func (c *CloudSearch) DefineIndexField(req *DefineIndexFieldRequest) (resp *DefineIndexFieldResult, err error) {
resp = &DefineIndexFieldResult{}
err = c.client.Do("DefineIndexField", "POST", "/", req, resp)
return
}
// DefineSuggester configures a suggester for a domain. A suggester enables
// you to display possible matches before users finish typing their
// queries. When you configure a suggester, you must specify the name of
// the text field you want to search for possible matches and a unique name
// for the suggester. For more information, see Getting Search Suggestions
// in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DefineSuggester(req *DefineSuggesterRequest) (resp *DefineSuggesterResult, err error) {
resp = &DefineSuggesterResult{}
err = c.client.Do("DefineSuggester", "POST", "/", req, resp)
return
}
// DeleteAnalysisScheme deletes an analysis scheme. For more information,
// see Configuring Analysis Schemes in the Amazon CloudSearch Developer
// Guide .
func (c *CloudSearch) DeleteAnalysisScheme(req *DeleteAnalysisSchemeRequest) (resp *DeleteAnalysisSchemeResult, err error) {
resp = &DeleteAnalysisSchemeResult{}
err = c.client.Do("DeleteAnalysisScheme", "POST", "/", req, resp)
return
}
// DeleteDomain permanently deletes a search domain and all of its data.
// Once a domain has been deleted, it cannot be recovered. For more
// information, see Deleting a Search Domain in the Amazon CloudSearch
// Developer Guide .
func (c *CloudSearch) DeleteDomain(req *DeleteDomainRequest) (resp *DeleteDomainResult, err error) {
resp = &DeleteDomainResult{}
err = c.client.Do("DeleteDomain", "POST", "/", req, resp)
return
}
// DeleteExpression removes an Expression from the search domain. For more
// information, see Configuring Expressions in the Amazon CloudSearch
// Developer Guide
func (c *CloudSearch) DeleteExpression(req *DeleteExpressionRequest) (resp *DeleteExpressionResult, err error) {
resp = &DeleteExpressionResult{}
err = c.client.Do("DeleteExpression", "POST", "/", req, resp)
return
}
// DeleteIndexField removes an IndexField from the search domain. For more
// information, see Configuring Index Fields in the Amazon CloudSearch
// Developer Guide
func (c *CloudSearch) DeleteIndexField(req *DeleteIndexFieldRequest) (resp *DeleteIndexFieldResult, err error) {
resp = &DeleteIndexFieldResult{}
err = c.client.Do("DeleteIndexField", "POST", "/", req, resp)
return
}
// DeleteSuggester deletes a suggester. For more information, see Getting
// Search Suggestions in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DeleteSuggester(req *DeleteSuggesterRequest) (resp *DeleteSuggesterResult, err error) {
resp = &DeleteSuggesterResult{}
err = c.client.Do("DeleteSuggester", "POST", "/", req, resp)
return
}
// DescribeAnalysisSchemes gets the analysis schemes configured for a
// domain. An analysis scheme defines language-specific text processing
// options for a text field. Can be limited to specific analysis schemes by
// name. By default, shows all analysis schemes and includes any pending
// changes to the configuration. Set the Deployed option to true to show
// the active configuration and exclude pending changes. For more
// information, see Configuring Analysis Schemes in the Amazon CloudSearch
// Developer Guide
func (c *CloudSearch) DescribeAnalysisSchemes(req *DescribeAnalysisSchemesRequest) (resp *DescribeAnalysisSchemesResult, err error) {
resp = &DescribeAnalysisSchemesResult{}
err = c.client.Do("DescribeAnalysisSchemes", "POST", "/", req, resp)
return
}
// DescribeAvailabilityOptions gets the availability options configured for
// a domain. By default, shows the configuration with any pending changes.
// Set the Deployed option to true to show the active configuration and
// exclude pending changes. For more information, see Configuring
// Availability Options in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeAvailabilityOptions(req *DescribeAvailabilityOptionsRequest) (resp *DescribeAvailabilityOptionsResult, err error) {
resp = &DescribeAvailabilityOptionsResult{}
err = c.client.Do("DescribeAvailabilityOptions", "POST", "/", req, resp)
return
}
// DescribeDomains gets information about the search domains owned by this
// account. Can be limited to specific domains. Shows all domains by
// default. To get the number of searchable documents in a domain, use the
// console or submit a matchall request to your domain's search endpoint:
// q=matchall&q.parser=structured&size=0 . For more information, see
// Getting Information about a Search Domain in the Amazon CloudSearch
// Developer Guide
func (c *CloudSearch) DescribeDomains(req *DescribeDomainsRequest) (resp *DescribeDomainsResult, err error) {
resp = &DescribeDomainsResult{}
err = c.client.Do("DescribeDomains", "POST", "/", req, resp)
return
}
// DescribeExpressions gets the expressions configured for the search
// domain. Can be limited to specific expressions by name. By default,
// shows all expressions and includes any pending changes to the
// configuration. Set the Deployed option to true to show the active
// configuration and exclude pending changes. For more information, see
// Configuring Expressions in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeExpressions(req *DescribeExpressionsRequest) (resp *DescribeExpressionsResult, err error) {
resp = &DescribeExpressionsResult{}
err = c.client.Do("DescribeExpressions", "POST", "/", req, resp)
return
}
// DescribeIndexFields gets information about the index fields configured
// for the search domain. Can be limited to specific fields by name. By
// default, shows all fields and includes any pending changes to the
// configuration. Set the Deployed option to true to show the active
// configuration and exclude pending changes. For more information, see
// Getting Domain Information in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeIndexFields(req *DescribeIndexFieldsRequest) (resp *DescribeIndexFieldsResult, err error) {
resp = &DescribeIndexFieldsResult{}
err = c.client.Do("DescribeIndexFields", "POST", "/", req, resp)
return
}
// DescribeScalingParameters gets the scaling parameters configured for a
// domain. A domain's scaling parameters specify the desired search
// instance type and replication count. For more information, see
// Configuring Scaling Options in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeScalingParameters(req *DescribeScalingParametersRequest) (resp *DescribeScalingParametersResult, err error) {
resp = &DescribeScalingParametersResult{}
err = c.client.Do("DescribeScalingParameters", "POST", "/", req, resp)
return
}
// DescribeServiceAccessPolicies gets information about the access policies
// that control access to the domain's document and search endpoints. By
// default, shows the configuration with any pending changes. Set the
// Deployed option to true to show the active configuration and exclude
// pending changes. For more information, see Configuring Access for a
// Search Domain in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeServiceAccessPolicies(req *DescribeServiceAccessPoliciesRequest) (resp *DescribeServiceAccessPoliciesResult, err error) {
resp = &DescribeServiceAccessPoliciesResult{}
err = c.client.Do("DescribeServiceAccessPolicies", "POST", "/", req, resp)
return
}
// DescribeSuggesters gets the suggesters configured for a domain. A
// suggester enables you to display possible matches before users finish
// typing their queries. Can be limited to specific suggesters by name. By
// default, shows all suggesters and includes any pending changes to the
// configuration. Set the Deployed option to true to show the active
// configuration and exclude pending changes. For more information, see
// Getting Search Suggestions in the Amazon CloudSearch Developer Guide
func (c *CloudSearch) DescribeSuggesters(req *DescribeSuggestersRequest) (resp *DescribeSuggestersResult, err error) {
resp = &DescribeSuggestersResult{}
err = c.client.Do("DescribeSuggesters", "POST", "/", req, resp)
return
}
// IndexDocuments tells the search domain to start indexing its documents
// using the latest indexing options. This operation must be invoked to
// activate options whose OptionStatus is RequiresIndexDocuments
func (c *CloudSearch) IndexDocuments(req *IndexDocumentsRequest) (resp *IndexDocumentsResult, err error) {
resp = &IndexDocumentsResult{}
err = c.client.Do("IndexDocuments", "POST", "/", req, resp)
return
}
// ListDomainNames is undocumented.
func (c *CloudSearch) ListDomainNames() (resp *ListDomainNamesResult, err error) {
resp = &ListDomainNamesResult{}
err = c.client.Do("ListDomainNames", "POST", "/", nil, resp)
return
}
// UpdateAvailabilityOptions configures the availability options for a
// domain. Enabling the Multi-AZ option expands an Amazon CloudSearch
// domain to an additional Availability Zone in the same Region to increase
// fault tolerance in the event of a service disruption. Changes to the
// Multi-AZ option can take about half an hour to become active. For more
// information, see Configuring Availability Options in the Amazon
// CloudSearch Developer Guide
func (c *CloudSearch) UpdateAvailabilityOptions(req *UpdateAvailabilityOptionsRequest) (resp *UpdateAvailabilityOptionsResult, err error) {
resp = &UpdateAvailabilityOptionsResult{}
err = c.client.Do("UpdateAvailabilityOptions", "POST", "/", req, resp)
return
}
// UpdateScalingParameters configures scaling parameters for a domain. A
// domain's scaling parameters specify the desired search instance type and
// replication count. Amazon CloudSearch will still automatically scale
// your domain based on the volume of data and traffic, but not below the
// desired instance type and replication count. If the Multi-AZ option is
// enabled, these values control the resources used per Availability Zone.
// For more information, see Configuring Scaling Options in the Amazon
// CloudSearch Developer Guide .
func (c *CloudSearch) UpdateScalingParameters(req *UpdateScalingParametersRequest) (resp *UpdateScalingParametersResult, err error) {
resp = &UpdateScalingParametersResult{}
err = c.client.Do("UpdateScalingParameters", "POST", "/", req, resp)
return
}
// UpdateServiceAccessPolicies configures the access rules that control
// access to the domain's document and search endpoints. For more
// information, see Configuring Access for an Amazon CloudSearch Domain
func (c *CloudSearch) UpdateServiceAccessPolicies(req *UpdateServiceAccessPoliciesRequest) (resp *UpdateServiceAccessPoliciesResult, err error) {
resp = &UpdateServiceAccessPoliciesResult{}
err = c.client.Do("UpdateServiceAccessPolicies", "POST", "/", req, resp)
return
}
// AccessPoliciesStatus is undocumented.
type AccessPoliciesStatus struct {
Options aws.StringValue `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// Possible values for CloudSearch.
const (
AlgorithmicStemmingFull = "full"
AlgorithmicStemmingLight = "light"
AlgorithmicStemmingMinimal = "minimal"
AlgorithmicStemmingNone = "none"
)
// AnalysisOptions is undocumented.
type AnalysisOptions struct {
AlgorithmicStemming aws.StringValue `query:"AlgorithmicStemming" xml:"AlgorithmicStemming"`
JapaneseTokenizationDictionary aws.StringValue `query:"JapaneseTokenizationDictionary" xml:"JapaneseTokenizationDictionary"`
StemmingDictionary aws.StringValue `query:"StemmingDictionary" xml:"StemmingDictionary"`
Stopwords aws.StringValue `query:"Stopwords" xml:"Stopwords"`
Synonyms aws.StringValue `query:"Synonyms" xml:"Synonyms"`
}
// AnalysisScheme is undocumented.
type AnalysisScheme struct {
AnalysisOptions *AnalysisOptions `query:"AnalysisOptions" xml:"AnalysisOptions"`
AnalysisSchemeLanguage aws.StringValue `query:"AnalysisSchemeLanguage" xml:"AnalysisSchemeLanguage"`
AnalysisSchemeName aws.StringValue `query:"AnalysisSchemeName" xml:"AnalysisSchemeName"`
}
// Possible values for CloudSearch.
const (
AnalysisSchemeLanguageAr = "ar"
AnalysisSchemeLanguageBg = "bg"
AnalysisSchemeLanguageCa = "ca"
AnalysisSchemeLanguageCs = "cs"
AnalysisSchemeLanguageDa = "da"
AnalysisSchemeLanguageDe = "de"
AnalysisSchemeLanguageEl = "el"
AnalysisSchemeLanguageEn = "en"
AnalysisSchemeLanguageEs = "es"
AnalysisSchemeLanguageEu = "eu"
AnalysisSchemeLanguageFa = "fa"
AnalysisSchemeLanguageFi = "fi"
AnalysisSchemeLanguageFr = "fr"
AnalysisSchemeLanguageGa = "ga"
AnalysisSchemeLanguageGl = "gl"
AnalysisSchemeLanguageHe = "he"
AnalysisSchemeLanguageHi = "hi"
AnalysisSchemeLanguageHu = "hu"
AnalysisSchemeLanguageHy = "hy"
AnalysisSchemeLanguageID = "id"
AnalysisSchemeLanguageIt = "it"
AnalysisSchemeLanguageJa = "ja"
AnalysisSchemeLanguageKo = "ko"
AnalysisSchemeLanguageLv = "lv"
AnalysisSchemeLanguageMul = "mul"
AnalysisSchemeLanguageNl = "nl"
AnalysisSchemeLanguageNo = "no"
AnalysisSchemeLanguagePt = "pt"
AnalysisSchemeLanguageRo = "ro"
AnalysisSchemeLanguageRu = "ru"
AnalysisSchemeLanguageSv = "sv"
AnalysisSchemeLanguageTh = "th"
AnalysisSchemeLanguageTr = "tr"
AnalysisSchemeLanguageZhHans = "zh-Hans"
AnalysisSchemeLanguageZhHant = "zh-Hant"
)
// AnalysisSchemeStatus is undocumented.
type AnalysisSchemeStatus struct {
Options *AnalysisScheme `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// AvailabilityOptionsStatus is undocumented.
type AvailabilityOptionsStatus struct {
Options aws.BooleanValue `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// BuildSuggestersRequest is undocumented.
type BuildSuggestersRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// BuildSuggestersResponse is undocumented.
type BuildSuggestersResponse struct {
FieldNames []string `query:"FieldNames.member" xml:"BuildSuggestersResult>FieldNames>member"`
}
// CreateDomainRequest is undocumented.
type CreateDomainRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// CreateDomainResponse is undocumented.
type CreateDomainResponse struct {
DomainStatus *DomainStatus `query:"DomainStatus" xml:"CreateDomainResult>DomainStatus"`
}
// DateArrayOptions is undocumented.
type DateArrayOptions struct {
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SourceFields aws.StringValue `query:"SourceFields" xml:"SourceFields"`
}
// DateOptions is undocumented.
type DateOptions struct {
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// DefineAnalysisSchemeRequest is undocumented.
type DefineAnalysisSchemeRequest struct {
AnalysisScheme *AnalysisScheme `query:"AnalysisScheme" xml:"AnalysisScheme"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DefineAnalysisSchemeResponse is undocumented.
type DefineAnalysisSchemeResponse struct {
AnalysisScheme *AnalysisSchemeStatus `query:"AnalysisScheme" xml:"DefineAnalysisSchemeResult>AnalysisScheme"`
}
// DefineExpressionRequest is undocumented.
type DefineExpressionRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
Expression *Expression `query:"Expression" xml:"Expression"`
}
// DefineExpressionResponse is undocumented.
type DefineExpressionResponse struct {
Expression *ExpressionStatus `query:"Expression" xml:"DefineExpressionResult>Expression"`
}
// DefineIndexFieldRequest is undocumented.
type DefineIndexFieldRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
IndexField *IndexField `query:"IndexField" xml:"IndexField"`
}
// DefineIndexFieldResponse is undocumented.
type DefineIndexFieldResponse struct {
IndexField *IndexFieldStatus `query:"IndexField" xml:"DefineIndexFieldResult>IndexField"`
}
// DefineSuggesterRequest is undocumented.
type DefineSuggesterRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
Suggester *Suggester `query:"Suggester" xml:"Suggester"`
}
// DefineSuggesterResponse is undocumented.
type DefineSuggesterResponse struct {
Suggester *SuggesterStatus `query:"Suggester" xml:"DefineSuggesterResult>Suggester"`
}
// DeleteAnalysisSchemeRequest is undocumented.
type DeleteAnalysisSchemeRequest struct {
AnalysisSchemeName aws.StringValue `query:"AnalysisSchemeName" xml:"AnalysisSchemeName"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DeleteAnalysisSchemeResponse is undocumented.
type DeleteAnalysisSchemeResponse struct {
AnalysisScheme *AnalysisSchemeStatus `query:"AnalysisScheme" xml:"DeleteAnalysisSchemeResult>AnalysisScheme"`
}
// DeleteDomainRequest is undocumented.
type DeleteDomainRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DeleteDomainResponse is undocumented.
type DeleteDomainResponse struct {
DomainStatus *DomainStatus `query:"DomainStatus" xml:"DeleteDomainResult>DomainStatus"`
}
// DeleteExpressionRequest is undocumented.
type DeleteExpressionRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
ExpressionName aws.StringValue `query:"ExpressionName" xml:"ExpressionName"`
}
// DeleteExpressionResponse is undocumented.
type DeleteExpressionResponse struct {
Expression *ExpressionStatus `query:"Expression" xml:"DeleteExpressionResult>Expression"`
}
// DeleteIndexFieldRequest is undocumented.
type DeleteIndexFieldRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
IndexFieldName aws.StringValue `query:"IndexFieldName" xml:"IndexFieldName"`
}
// DeleteIndexFieldResponse is undocumented.
type DeleteIndexFieldResponse struct {
IndexField *IndexFieldStatus `query:"IndexField" xml:"DeleteIndexFieldResult>IndexField"`
}
// DeleteSuggesterRequest is undocumented.
type DeleteSuggesterRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
SuggesterName aws.StringValue `query:"SuggesterName" xml:"SuggesterName"`
}
// DeleteSuggesterResponse is undocumented.
type DeleteSuggesterResponse struct {
Suggester *SuggesterStatus `query:"Suggester" xml:"DeleteSuggesterResult>Suggester"`
}
// DescribeAnalysisSchemesRequest is undocumented.
type DescribeAnalysisSchemesRequest struct {
AnalysisSchemeNames []string `query:"AnalysisSchemeNames.member" xml:"AnalysisSchemeNames>member"`
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DescribeAnalysisSchemesResponse is undocumented.
type DescribeAnalysisSchemesResponse struct {
AnalysisSchemes []AnalysisSchemeStatus `query:"AnalysisSchemes.member" xml:"DescribeAnalysisSchemesResult>AnalysisSchemes>member"`
}
// DescribeAvailabilityOptionsRequest is undocumented.
type DescribeAvailabilityOptionsRequest struct {
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DescribeAvailabilityOptionsResponse is undocumented.
type DescribeAvailabilityOptionsResponse struct {
AvailabilityOptions *AvailabilityOptionsStatus `query:"AvailabilityOptions" xml:"DescribeAvailabilityOptionsResult>AvailabilityOptions"`
}
// DescribeDomainsRequest is undocumented.
type DescribeDomainsRequest struct {
DomainNames []string `query:"DomainNames.member" xml:"DomainNames>member"`
}
// DescribeDomainsResponse is undocumented.
type DescribeDomainsResponse struct {
DomainStatusList []DomainStatus `query:"DomainStatusList.member" xml:"DescribeDomainsResult>DomainStatusList>member"`
}
// DescribeExpressionsRequest is undocumented.
type DescribeExpressionsRequest struct {
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
ExpressionNames []string `query:"ExpressionNames.member" xml:"ExpressionNames>member"`
}
// DescribeExpressionsResponse is undocumented.
type DescribeExpressionsResponse struct {
Expressions []ExpressionStatus `query:"Expressions.member" xml:"DescribeExpressionsResult>Expressions>member"`
}
// DescribeIndexFieldsRequest is undocumented.
type DescribeIndexFieldsRequest struct {
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
FieldNames []string `query:"FieldNames.member" xml:"FieldNames>member"`
}
// DescribeIndexFieldsResponse is undocumented.
type DescribeIndexFieldsResponse struct {
IndexFields []IndexFieldStatus `query:"IndexFields.member" xml:"DescribeIndexFieldsResult>IndexFields>member"`
}
// DescribeScalingParametersRequest is undocumented.
type DescribeScalingParametersRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DescribeScalingParametersResponse is undocumented.
type DescribeScalingParametersResponse struct {
ScalingParameters *ScalingParametersStatus `query:"ScalingParameters" xml:"DescribeScalingParametersResult>ScalingParameters"`
}
// DescribeServiceAccessPoliciesRequest is undocumented.
type DescribeServiceAccessPoliciesRequest struct {
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// DescribeServiceAccessPoliciesResponse is undocumented.
type DescribeServiceAccessPoliciesResponse struct {
AccessPolicies *AccessPoliciesStatus `query:"AccessPolicies" xml:"DescribeServiceAccessPoliciesResult>AccessPolicies"`
}
// DescribeSuggestersRequest is undocumented.
type DescribeSuggestersRequest struct {
Deployed aws.BooleanValue `query:"Deployed" xml:"Deployed"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
SuggesterNames []string `query:"SuggesterNames.member" xml:"SuggesterNames>member"`
}
// DescribeSuggestersResponse is undocumented.
type DescribeSuggestersResponse struct {
Suggesters []SuggesterStatus `query:"Suggesters.member" xml:"DescribeSuggestersResult>Suggesters>member"`
}
// DocumentSuggesterOptions is undocumented.
type DocumentSuggesterOptions struct {
FuzzyMatching aws.StringValue `query:"FuzzyMatching" xml:"FuzzyMatching"`
SortExpression aws.StringValue `query:"SortExpression" xml:"SortExpression"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
type DomainNameMap map[string]string
// UnmarshalXML implements xml.UnmarshalXML interface for map
func (m *DomainNameMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
if *m == nil {
(*m) = make(DomainNameMap)
}
for {
var e struct {
Key string `xml:"key"`
Value string `xml:"value"`
}
err := d.DecodeElement(&e, &start)
if err != nil && err != io.EOF {
return err
}
if err == io.EOF {
break
}
(*m)[e.Key] = e.Value
}
return nil
}
// DomainStatus is undocumented.
type DomainStatus struct {
ARN aws.StringValue `query:"ARN" xml:"ARN"`
Created aws.BooleanValue `query:"Created" xml:"Created"`
Deleted aws.BooleanValue `query:"Deleted" xml:"Deleted"`
DocService *ServiceEndpoint `query:"DocService" xml:"DocService"`
DomainID aws.StringValue `query:"DomainId" xml:"DomainId"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
Limits *Limits `query:"Limits" xml:"Limits"`
Processing aws.BooleanValue `query:"Processing" xml:"Processing"`
RequiresIndexDocuments aws.BooleanValue `query:"RequiresIndexDocuments" xml:"RequiresIndexDocuments"`
SearchInstanceCount aws.IntegerValue `query:"SearchInstanceCount" xml:"SearchInstanceCount"`
SearchInstanceType aws.StringValue `query:"SearchInstanceType" xml:"SearchInstanceType"`
SearchPartitionCount aws.IntegerValue `query:"SearchPartitionCount" xml:"SearchPartitionCount"`
SearchService *ServiceEndpoint `query:"SearchService" xml:"SearchService"`
}
// DoubleArrayOptions is undocumented.
type DoubleArrayOptions struct {
DefaultValue aws.DoubleValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SourceFields aws.StringValue `query:"SourceFields" xml:"SourceFields"`
}
// DoubleOptions is undocumented.
type DoubleOptions struct {
DefaultValue aws.DoubleValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// Expression is undocumented.
type Expression struct {
ExpressionName aws.StringValue `query:"ExpressionName" xml:"ExpressionName"`
ExpressionValue aws.StringValue `query:"ExpressionValue" xml:"ExpressionValue"`
}
// ExpressionStatus is undocumented.
type ExpressionStatus struct {
Options *Expression `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// IndexDocumentsRequest is undocumented.
type IndexDocumentsRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// IndexDocumentsResponse is undocumented.
type IndexDocumentsResponse struct {
FieldNames []string `query:"FieldNames.member" xml:"IndexDocumentsResult>FieldNames>member"`
}
// IndexField is undocumented.
type IndexField struct {
DateArrayOptions *DateArrayOptions `query:"DateArrayOptions" xml:"DateArrayOptions"`
DateOptions *DateOptions `query:"DateOptions" xml:"DateOptions"`
DoubleArrayOptions *DoubleArrayOptions `query:"DoubleArrayOptions" xml:"DoubleArrayOptions"`
DoubleOptions *DoubleOptions `query:"DoubleOptions" xml:"DoubleOptions"`
IndexFieldName aws.StringValue `query:"IndexFieldName" xml:"IndexFieldName"`
IndexFieldType aws.StringValue `query:"IndexFieldType" xml:"IndexFieldType"`
IntArrayOptions *IntArrayOptions `query:"IntArrayOptions" xml:"IntArrayOptions"`
IntOptions *IntOptions `query:"IntOptions" xml:"IntOptions"`
LatLonOptions *LatLonOptions `query:"LatLonOptions" xml:"LatLonOptions"`
LiteralArrayOptions *LiteralArrayOptions `query:"LiteralArrayOptions" xml:"LiteralArrayOptions"`
LiteralOptions *LiteralOptions `query:"LiteralOptions" xml:"LiteralOptions"`
TextArrayOptions *TextArrayOptions `query:"TextArrayOptions" xml:"TextArrayOptions"`
TextOptions *TextOptions `query:"TextOptions" xml:"TextOptions"`
}
// IndexFieldStatus is undocumented.
type IndexFieldStatus struct {
Options *IndexField `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// Possible values for CloudSearch.
const (
IndexFieldTypeDate = "date"
IndexFieldTypeDateArray = "date-array"
IndexFieldTypeDouble = "double"
IndexFieldTypeDoubleArray = "double-array"
IndexFieldTypeInt = "int"
IndexFieldTypeIntArray = "int-array"
IndexFieldTypeLatlon = "latlon"
IndexFieldTypeLiteral = "literal"
IndexFieldTypeLiteralArray = "literal-array"
IndexFieldTypeText = "text"
IndexFieldTypeTextArray = "text-array"
)
// IntArrayOptions is undocumented.
type IntArrayOptions struct {
DefaultValue aws.LongValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SourceFields aws.StringValue `query:"SourceFields" xml:"SourceFields"`
}
// IntOptions is undocumented.
type IntOptions struct {
DefaultValue aws.LongValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// LatLonOptions is undocumented.
type LatLonOptions struct {
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// Limits is undocumented.
type Limits struct {
MaximumPartitionCount aws.IntegerValue `query:"MaximumPartitionCount" xml:"MaximumPartitionCount"`
MaximumReplicationCount aws.IntegerValue `query:"MaximumReplicationCount" xml:"MaximumReplicationCount"`
}
// ListDomainNamesResponse is undocumented.
type ListDomainNamesResponse struct {
DomainNames DomainNameMap `query:"DomainNames.entry" xml:"ListDomainNamesResult>DomainNames>entry"`
}
// LiteralArrayOptions is undocumented.
type LiteralArrayOptions struct {
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SourceFields aws.StringValue `query:"SourceFields" xml:"SourceFields"`
}
// LiteralOptions is undocumented.
type LiteralOptions struct {
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
FacetEnabled aws.BooleanValue `query:"FacetEnabled" xml:"FacetEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SearchEnabled aws.BooleanValue `query:"SearchEnabled" xml:"SearchEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// Possible values for CloudSearch.
const (
OptionStateActive = "Active"
OptionStateFailedToValidate = "FailedToValidate"
OptionStateProcessing = "Processing"
OptionStateRequiresIndexDocuments = "RequiresIndexDocuments"
)
// OptionStatus is undocumented.
type OptionStatus struct {
CreationDate time.Time `query:"CreationDate" xml:"CreationDate"`
PendingDeletion aws.BooleanValue `query:"PendingDeletion" xml:"PendingDeletion"`
State aws.StringValue `query:"State" xml:"State"`
UpdateDate time.Time `query:"UpdateDate" xml:"UpdateDate"`
UpdateVersion aws.IntegerValue `query:"UpdateVersion" xml:"UpdateVersion"`
}
// Possible values for CloudSearch.
const (
PartitionInstanceTypeSearchM1Large = "search.m1.large"
PartitionInstanceTypeSearchM1Small = "search.m1.small"
PartitionInstanceTypeSearchM22xlarge = "search.m2.2xlarge"
PartitionInstanceTypeSearchM2Xlarge = "search.m2.xlarge"
)
// ScalingParameters is undocumented.
type ScalingParameters struct {
DesiredInstanceType aws.StringValue `query:"DesiredInstanceType" xml:"DesiredInstanceType"`
DesiredPartitionCount aws.IntegerValue `query:"DesiredPartitionCount" xml:"DesiredPartitionCount"`
DesiredReplicationCount aws.IntegerValue `query:"DesiredReplicationCount" xml:"DesiredReplicationCount"`
}
// ScalingParametersStatus is undocumented.
type ScalingParametersStatus struct {
Options *ScalingParameters `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// ServiceEndpoint is undocumented.
type ServiceEndpoint struct {
Endpoint aws.StringValue `query:"Endpoint" xml:"Endpoint"`
}
// Suggester is undocumented.
type Suggester struct {
DocumentSuggesterOptions *DocumentSuggesterOptions `query:"DocumentSuggesterOptions" xml:"DocumentSuggesterOptions"`
SuggesterName aws.StringValue `query:"SuggesterName" xml:"SuggesterName"`
}
// Possible values for CloudSearch.
const (
SuggesterFuzzyMatchingHigh = "high"
SuggesterFuzzyMatchingLow = "low"
SuggesterFuzzyMatchingNone = "none"
)
// SuggesterStatus is undocumented.
type SuggesterStatus struct {
Options *Suggester `query:"Options" xml:"Options"`
Status *OptionStatus `query:"Status" xml:"Status"`
}
// TextArrayOptions is undocumented.
type TextArrayOptions struct {
AnalysisScheme aws.StringValue `query:"AnalysisScheme" xml:"AnalysisScheme"`
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
HighlightEnabled aws.BooleanValue `query:"HighlightEnabled" xml:"HighlightEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SourceFields aws.StringValue `query:"SourceFields" xml:"SourceFields"`
}
// TextOptions is undocumented.
type TextOptions struct {
AnalysisScheme aws.StringValue `query:"AnalysisScheme" xml:"AnalysisScheme"`
DefaultValue aws.StringValue `query:"DefaultValue" xml:"DefaultValue"`
HighlightEnabled aws.BooleanValue `query:"HighlightEnabled" xml:"HighlightEnabled"`
ReturnEnabled aws.BooleanValue `query:"ReturnEnabled" xml:"ReturnEnabled"`
SortEnabled aws.BooleanValue `query:"SortEnabled" xml:"SortEnabled"`
SourceField aws.StringValue `query:"SourceField" xml:"SourceField"`
}
// UpdateAvailabilityOptionsRequest is undocumented.
type UpdateAvailabilityOptionsRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
MultiAZ aws.BooleanValue `query:"MultiAZ" xml:"MultiAZ"`
}
// UpdateAvailabilityOptionsResponse is undocumented.
type UpdateAvailabilityOptionsResponse struct {
AvailabilityOptions *AvailabilityOptionsStatus `query:"AvailabilityOptions" xml:"UpdateAvailabilityOptionsResult>AvailabilityOptions"`
}
// UpdateScalingParametersRequest is undocumented.
type UpdateScalingParametersRequest struct {
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
ScalingParameters *ScalingParameters `query:"ScalingParameters" xml:"ScalingParameters"`
}
// UpdateScalingParametersResponse is undocumented.
type UpdateScalingParametersResponse struct {
ScalingParameters *ScalingParametersStatus `query:"ScalingParameters" xml:"UpdateScalingParametersResult>ScalingParameters"`
}
// UpdateServiceAccessPoliciesRequest is undocumented.
type UpdateServiceAccessPoliciesRequest struct {
AccessPolicies aws.StringValue `query:"AccessPolicies" xml:"AccessPolicies"`
DomainName aws.StringValue `query:"DomainName" xml:"DomainName"`
}
// UpdateServiceAccessPoliciesResponse is undocumented.
type UpdateServiceAccessPoliciesResponse struct {
AccessPolicies *AccessPoliciesStatus `query:"AccessPolicies" xml:"UpdateServiceAccessPoliciesResult>AccessPolicies"`
}
// BuildSuggestersResult is a wrapper for BuildSuggestersResponse.
type BuildSuggestersResult struct {
FieldNames []string `query:"FieldNames.member" xml:"BuildSuggestersResult>FieldNames>member"`
}
// CreateDomainResult is a wrapper for CreateDomainResponse.
type CreateDomainResult struct {
DomainStatus *DomainStatus `query:"DomainStatus" xml:"CreateDomainResult>DomainStatus"`
}
// DefineAnalysisSchemeResult is a wrapper for DefineAnalysisSchemeResponse.
type DefineAnalysisSchemeResult struct {
AnalysisScheme *AnalysisSchemeStatus `query:"AnalysisScheme" xml:"DefineAnalysisSchemeResult>AnalysisScheme"`
}
// DefineExpressionResult is a wrapper for DefineExpressionResponse.
type DefineExpressionResult struct {
Expression *ExpressionStatus `query:"Expression" xml:"DefineExpressionResult>Expression"`
}
// DefineIndexFieldResult is a wrapper for DefineIndexFieldResponse.
type DefineIndexFieldResult struct {
IndexField *IndexFieldStatus `query:"IndexField" xml:"DefineIndexFieldResult>IndexField"`
}
// DefineSuggesterResult is a wrapper for DefineSuggesterResponse.
type DefineSuggesterResult struct {
Suggester *SuggesterStatus `query:"Suggester" xml:"DefineSuggesterResult>Suggester"`
}
// DeleteAnalysisSchemeResult is a wrapper for DeleteAnalysisSchemeResponse.
type DeleteAnalysisSchemeResult struct {
AnalysisScheme *AnalysisSchemeStatus `query:"AnalysisScheme" xml:"DeleteAnalysisSchemeResult>AnalysisScheme"`
}
// DeleteDomainResult is a wrapper for DeleteDomainResponse.
type DeleteDomainResult struct {
DomainStatus *DomainStatus `query:"DomainStatus" xml:"DeleteDomainResult>DomainStatus"`
}
// DeleteExpressionResult is a wrapper for DeleteExpressionResponse.
type DeleteExpressionResult struct {
Expression *ExpressionStatus `query:"Expression" xml:"DeleteExpressionResult>Expression"`
}
// DeleteIndexFieldResult is a wrapper for DeleteIndexFieldResponse.
type DeleteIndexFieldResult struct {
IndexField *IndexFieldStatus `query:"IndexField" xml:"DeleteIndexFieldResult>IndexField"`
}
// DeleteSuggesterResult is a wrapper for DeleteSuggesterResponse.
type DeleteSuggesterResult struct {
Suggester *SuggesterStatus `query:"Suggester" xml:"DeleteSuggesterResult>Suggester"`
}
// DescribeAnalysisSchemesResult is a wrapper for DescribeAnalysisSchemesResponse.
type DescribeAnalysisSchemesResult struct {
AnalysisSchemes []AnalysisSchemeStatus `query:"AnalysisSchemes.member" xml:"DescribeAnalysisSchemesResult>AnalysisSchemes>member"`
}
// DescribeAvailabilityOptionsResult is a wrapper for DescribeAvailabilityOptionsResponse.
type DescribeAvailabilityOptionsResult struct {
AvailabilityOptions *AvailabilityOptionsStatus `query:"AvailabilityOptions" xml:"DescribeAvailabilityOptionsResult>AvailabilityOptions"`
}
// DescribeDomainsResult is a wrapper for DescribeDomainsResponse.
type DescribeDomainsResult struct {
DomainStatusList []DomainStatus `query:"DomainStatusList.member" xml:"DescribeDomainsResult>DomainStatusList>member"`
}
// DescribeExpressionsResult is a wrapper for DescribeExpressionsResponse.
type DescribeExpressionsResult struct {
Expressions []ExpressionStatus `query:"Expressions.member" xml:"DescribeExpressionsResult>Expressions>member"`
}
// DescribeIndexFieldsResult is a wrapper for DescribeIndexFieldsResponse.
type DescribeIndexFieldsResult struct {
IndexFields []IndexFieldStatus `query:"IndexFields.member" xml:"DescribeIndexFieldsResult>IndexFields>member"`
}
// DescribeScalingParametersResult is a wrapper for DescribeScalingParametersResponse.
type DescribeScalingParametersResult struct {
ScalingParameters *ScalingParametersStatus `query:"ScalingParameters" xml:"DescribeScalingParametersResult>ScalingParameters"`
}
// DescribeServiceAccessPoliciesResult is a wrapper for DescribeServiceAccessPoliciesResponse.
type DescribeServiceAccessPoliciesResult struct {
AccessPolicies *AccessPoliciesStatus `query:"AccessPolicies" xml:"DescribeServiceAccessPoliciesResult>AccessPolicies"`
}
// DescribeSuggestersResult is a wrapper for DescribeSuggestersResponse.
type DescribeSuggestersResult struct {
Suggesters []SuggesterStatus `query:"Suggesters.member" xml:"DescribeSuggestersResult>Suggesters>member"`
}
// IndexDocumentsResult is a wrapper for IndexDocumentsResponse.
type IndexDocumentsResult struct {
FieldNames []string `query:"FieldNames.member" xml:"IndexDocumentsResult>FieldNames>member"`
}
// ListDomainNamesResult is a wrapper for ListDomainNamesResponse.
type ListDomainNamesResult struct {
DomainNames DomainNameMap `query:"DomainNames.entry" xml:"ListDomainNamesResult>DomainNames>entry"`
}
// UpdateAvailabilityOptionsResult is a wrapper for UpdateAvailabilityOptionsResponse.
type UpdateAvailabilityOptionsResult struct {
AvailabilityOptions *AvailabilityOptionsStatus `query:"AvailabilityOptions" xml:"UpdateAvailabilityOptionsResult>AvailabilityOptions"`
}
// UpdateScalingParametersResult is a wrapper for UpdateScalingParametersResponse.
type UpdateScalingParametersResult struct {
ScalingParameters *ScalingParametersStatus `query:"ScalingParameters" xml:"UpdateScalingParametersResult>ScalingParameters"`
}
// UpdateServiceAccessPoliciesResult is a wrapper for UpdateServiceAccessPoliciesResponse.
type UpdateServiceAccessPoliciesResult struct {
AccessPolicies *AccessPoliciesStatus `query:"AccessPolicies" xml:"UpdateServiceAccessPoliciesResult>AccessPolicies"`
}
// avoid errors if the packages aren't referenced
var _ time.Time
var _ xml.Decoder
var _ = io.EOF
| {
if client == nil {
client = http.DefaultClient
}
endpoint, service, region := endpoints.Lookup("cloudsearch", region)
return &CloudSearch{
client: &aws.QueryClient{
Context: aws.Context{
Credentials: creds,
Service: service,
Region: region,
},
Client: client,
Endpoint: endpoint,
APIVersion: "2013-01-01",
},
}
} |
tokenizer.go | package token
import runtime "github.com/nu11ptr/parsegen/runtime/go"
// *** Potentially Generated ***
type Mode int
const (
REGULAR Mode = iota
CHAR_CLASS
)
const (
// Mode: Regular
// Char set
RULE_NAME runtime.TokenType = iota + runtime.EOF + 1
TOKEN_NAME
// Sequences
TOKEN_LIT
// Keywords
FRAGMENT
SKIP_ACTION
PUSH_ACTION
POP_ACTION
// Basic Sequences
RARROW
DOT
COLON
SEMI
PIPE
LPAREN
RPAREN
PLUS
STAR
QUEST_MARK
TILDE
COMMA
LBRACK
// Mode: CHAR_CLASS
// Char set
BASIC_CHAR
// Basic Sequences
UNICODE_ESCAPE_CHAR
ESCAPE_CHAR
DASH
RBRACK
)
var (
keywords = map[string]runtime.TokenType{
"fragment": FRAGMENT,
"skip": SKIP_ACTION,
"pushMode": PUSH_ACTION,
"popMode": POP_ACTION,
}
)
type Tokenizer struct {
lex *runtime.Lexer
mode Mode
}
func New(lex *runtime.Lexer) *Tokenizer {
return &Tokenizer{lex: lex, mode: REGULAR}
}
func (t *Tokenizer) processRuleName(tok *runtime.Token) bool {
// [a-z]
if !t.lex.MatchCharInRange('a', 'z') {
return false
}
// [A-Za-z0-9_]*
for t.lex.MatchCharInRange('A', 'Z') || t.lex.MatchCharInRange('a', 'z') ||
t.lex.MatchCharInRange('0', '9') || t.lex.MatchChar('_') {
}
t.lex.BuildTokenData(RULE_NAME, tok)
// Possible conflicting keyword
tt, ok := keywords[tok.Data]
if ok {
tok.Type = tt
tok.Data = ""
}
return true
}
func (t *Tokenizer) processTokenName(tok *runtime.Token) bool {
// [A-Z]
if !t.lex.MatchCharInRange('A', 'Z') {
return false
}
// [A-Za-z0-9_]*
for t.lex.MatchCharInRange('A', 'Z') || t.lex.MatchCharInRange('a', 'z') ||
t.lex.MatchCharInRange('0', '9') || t.lex.MatchChar('_') {
}
t.lex.BuildTokenData(TOKEN_NAME, tok)
return true
} |
func (t *Tokenizer) charClassNextToken(ch rune, tok *runtime.Token) {
switch ch {
case '\\':
switch t.lex.NextChar() {
case 'u':
t.lex.NextChar()
// HEX_DIGIT+
t.lex.MarkPos()
matched := false
for t.lex.MatchCharInRange('A', 'F') || t.lex.MatchCharInRange('a', 'f') ||
t.lex.MatchCharInRange('0', '9') {
matched = true
}
if matched {
t.lex.BuildTokenData(UNICODE_ESCAPE_CHAR, tok)
break
}
// '{'
t.lex.ResetPos()
if !t.lex.MatchChar('{') {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
break
}
// HEX_DIGIT+
matched = false
for t.lex.MatchCharInRange('A', 'F') || t.lex.MatchCharInRange('a', 'f') ||
t.lex.MatchCharInRange('0', '9') {
matched = true
}
if !matched {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
break
}
if !t.lex.MatchChar('}') {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
break
}
t.lex.BuildTokenData(UNICODE_ESCAPE_CHAR, tok)
default:
t.lex.BuildTokenDataNext(ESCAPE_CHAR, tok)
}
case '-':
t.lex.BuildTokenNext(DASH, tok)
case ']':
t.lex.BuildTokenNext(RBRACK, tok)
t.mode = REGULAR
default:
t.lex.BuildTokenDataNext(BASIC_CHAR, tok)
}
}
func (t *Tokenizer) NextToken(tok *runtime.Token) {
if t.mode == CHAR_CLASS {
t.charClassNextToken(t.lex.CurrChar(), tok)
return
}
// Skip
skipping := true
for skipping {
switch t.lex.CurrChar() {
case '/':
switch t.lex.NextChar() {
// '//'
case '/':
t.lex.NextChar()
// ~[\r\n]*
for t.lex.MatchCharExceptInSeq("\r\n") {
}
t.lex.DiscardTokenData()
// '/*'
case '*':
t.lex.NextChar()
t.lex.MatchUntilSeq("*/")
t.lex.MatchSeq("*/")
t.lex.DiscardTokenData()
default:
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
return
}
// [ \t\r\n\f]+
case ' ', '\t', '\r', '\n', '\f':
t.lex.NextChar()
for t.lex.MatchCharInSeq(" \t\r\n\f") {
}
t.lex.DiscardTokenData()
default:
skipping = false
}
}
if t.processRuleName(tok) {
return
}
if t.processTokenName(tok) {
return
}
switch t.lex.CurrChar() {
case '\'':
t.lex.NextChar()
// ('\\\'' | ~'\'')+
matched := false
for t.lex.MatchSeq("\\'") || t.lex.MatchCharExcept('\'') {
matched = true
}
if !matched {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
return
}
// '\''
if !t.lex.MatchChar('\'') {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
return
}
t.lex.BuildTokenData(TOKEN_LIT, tok)
case '-':
t.lex.NextChar()
// '>'
if !t.lex.MatchChar('>') {
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
return
}
t.lex.BuildToken(RARROW, tok)
case '.':
t.lex.BuildTokenNext(DOT, tok)
case ':':
t.lex.BuildTokenNext(COLON, tok)
case ';':
t.lex.BuildTokenNext(SEMI, tok)
case '|':
t.lex.BuildTokenNext(PIPE, tok)
case '(':
t.lex.BuildTokenNext(LPAREN, tok)
case ')':
t.lex.BuildTokenNext(RPAREN, tok)
case '+':
t.lex.BuildTokenNext(PLUS, tok)
case '*':
t.lex.BuildTokenNext(STAR, tok)
case '?':
t.lex.BuildTokenNext(QUEST_MARK, tok)
case '~':
t.lex.BuildTokenNext(TILDE, tok)
case ',':
t.lex.BuildTokenNext(COMMA, tok)
case '[':
t.lex.BuildTokenNext(LBRACK, tok)
t.mode = CHAR_CLASS
case runtime.EOFChar:
t.lex.BuildToken(runtime.EOF, tok)
default:
t.lex.BuildTokenDataNext(runtime.ILLEGAL, tok)
}
} | |
brokerchannel_types.go | /*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"knative.dev/pkg/apis"
"knative.dev/pkg/apis/duck"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/webhook/resourcesemantics"
)
// +genclient | // +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec holds the desired state of the SampleSource (from the client).
Spec BrokerChannelSpec `json:"spec,omitempty"`
// Status communicates the observed state of the SampleSource (from the controller).
// +optional
Status BrokerChannelStatus `json:"status,omitempty"`
}
// GetGroupVersionKind returns the GroupVersionKind.
func (*BrokerChannel) GetGroupVersionKind() schema.GroupVersionKind {
return SchemeGroupVersion.WithKind("BrokerChannel")
}
var (
// Check that SampleSource can be validated and defaulted.
_ apis.Defaultable = (*BrokerChannel)(nil)
_ apis.Validatable = (*BrokerChannel)(nil)
// Check that we can create OwnerReferences to a SampleSource.
_ kmeta.OwnerRefable = (*BrokerChannel)(nil)
// Check that SampleSource is a runtime.Object.
_ runtime.Object = (*BrokerChannel)(nil)
// Check that SampleSource satisfies resourcesemantics.GenericCRD.
_ resourcesemantics.GenericCRD = (*BrokerChannel)(nil)
// Check that SampleSource implements the Conditions duck type.
_ = duck.VerifyType(&BrokerChannel{}, &duckv1.Conditions{})
// Check that the type conforms to the duck Knative Resource shape.
_ duckv1.KRShaped = (*BrokerChannel)(nil)
)
// SampleSourceSpec holds the desired state of the SampleSource (from the client).
type BrokerChannelSpec struct {
BrokerAddr string `json:"brokeraddr"`
// +optional
BrokerPort int `json:"brokerport"`
Topic string `json:"topic"`
// +optional
duckv1.SourceSpec `json:",inline"`
}
// SampleSourceStatus communicates the observed state of the SampleSource (from the controller).
type BrokerChannelStatus struct {
// inherits duck/v1 SourceStatus, which currently provides:
// * ObservedGeneration - the 'Generation' of the Service that was last
// processed by the controller.
// * Conditions - the latest available observations of a resource's current
// state.
// * SinkURI - the current active sink URI that has been configured for the
// Source.
duckv1.SourceStatus `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SampleSourceList is a list of SampleSource resources
type BrokerChannelList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []BrokerChannel `json:"items"`
}
// GetStatus retrieves the status of the resource. Implements the KRShaped interface.
func (bc *BrokerChannel) GetStatus() *duckv1.Status {
return &bc.Status.Status
} | // +genreconciler
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type BrokerChannel struct {
metav1.TypeMeta `json:",inline"` |
uname.rs | // This file is part of the uutils coreutils package.
//
// (c) Joao Oliveira <[email protected]>
// (c) Jian Zeng <anonymousknight96 AT gmail.com>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// last synced with: uname (GNU coreutils) 8.21
// spell-checker:ignore (ToDO) nodename kernelname kernelrelease kernelversion sysname hwplatform mnrsv
use clap::{crate_version, Arg, Command};
use platform_info::*;
use uucore::{
error::{FromIo, UResult},
format_usage,
};
const ABOUT: &str = "Print certain system information. With no OPTION, same as -s.";
const USAGE: &str = "{} [OPTION]...";
pub mod options {
pub static ALL: &str = "all";
pub static KERNELNAME: &str = "kernel-name";
pub static NODENAME: &str = "nodename";
pub static KERNELVERSION: &str = "kernel-version";
pub static KERNELRELEASE: &str = "kernel-release";
pub static MACHINE: &str = "machine";
pub static PROCESSOR: &str = "processor";
pub static HWPLATFORM: &str = "hardware-platform";
pub static OS: &str = "operating-system";
}
#[cfg(all(target_os = "linux", any(target_env = "gnu", target_env = "")))]
const HOST_OS: &str = "GNU/Linux";
#[cfg(all(target_os = "linux", not(any(target_env = "gnu", target_env = ""))))]
const HOST_OS: &str = "Linux";
#[cfg(target_os = "android")]
const HOST_OS: &str = "Android";
#[cfg(target_os = "windows")]
const HOST_OS: &str = "Windows NT";
#[cfg(target_os = "freebsd")]
const HOST_OS: &str = "FreeBSD";
#[cfg(target_os = "netbsd")]
const HOST_OS: &str = "NetBSD";
#[cfg(target_os = "openbsd")]
const HOST_OS: &str = "OpenBSD";
#[cfg(target_vendor = "apple")]
const HOST_OS: &str = "Darwin";
#[cfg(target_os = "fuchsia")]
const HOST_OS: &str = "Fuchsia";
#[cfg(target_os = "redox")]
const HOST_OS: &str = "Redox";
#[uucore::main]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let matches = uu_app().get_matches_from(args);
let uname =
PlatformInfo::new().map_err_context(|| "failed to create PlatformInfo".to_string())?;
let mut output = String::new();
let all = matches.is_present(options::ALL);
let kernelname = matches.is_present(options::KERNELNAME);
let nodename = matches.is_present(options::NODENAME);
let kernelrelease = matches.is_present(options::KERNELRELEASE);
let kernelversion = matches.is_present(options::KERNELVERSION);
let machine = matches.is_present(options::MACHINE);
let processor = matches.is_present(options::PROCESSOR);
let hwplatform = matches.is_present(options::HWPLATFORM);
let os = matches.is_present(options::OS);
let none = !(all
|| kernelname
|| nodename
|| kernelrelease
|| kernelversion
|| machine
|| os
|| processor
|| hwplatform);
if kernelname || all || none {
output.push_str(&uname.sysname());
output.push(' ');
}
if nodename || all {
output.push_str(&uname.nodename());
output.push(' ');
}
if kernelrelease || all {
output.push_str(&uname.release());
output.push(' ');
}
if kernelversion || all {
output.push_str(&uname.version());
output.push(' ');
}
if machine || all {
output.push_str(&uname.machine());
output.push(' ');
}
if processor || all {
// According to https://stackoverflow.com/posts/394271/revisions
// Most of the time, it returns unknown
output.push_str("unknown");
output.push(' ');
}
if hwplatform || all {
// According to https://lists.gnu.org/archive/html/bug-coreutils/2005-09/msg00063.html
// Most of the time, it returns unknown
output.push_str("unknown");
output.push(' ');
}
if os || all {
output.push_str(HOST_OS);
output.push(' ');
}
println!("{}", output.trim_end());
Ok(())
}
pub fn uu_app<'a>() -> Command<'a> | {
Command::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.override_usage(format_usage(USAGE))
.infer_long_args(true)
.arg(Arg::new(options::ALL)
.short('a')
.long(options::ALL)
.help("Behave as though all of the options -mnrsv were specified."))
.arg(Arg::new(options::KERNELNAME)
.short('s')
.long(options::KERNELNAME)
.alias("sysname") // Obsolescent option in GNU uname
.help("print the kernel name."))
.arg(Arg::new(options::NODENAME)
.short('n')
.long(options::NODENAME)
.help("print the nodename (the nodename may be a name that the system is known by to a communications network)."))
.arg(Arg::new(options::KERNELRELEASE)
.short('r')
.long(options::KERNELRELEASE)
.alias("release") // Obsolescent option in GNU uname
.help("print the operating system release."))
.arg(Arg::new(options::KERNELVERSION)
.short('v')
.long(options::KERNELVERSION)
.help("print the operating system version."))
.arg(Arg::new(options::HWPLATFORM)
.short('i')
.long(options::HWPLATFORM)
.help("print the hardware platform (non-portable)"))
.arg(Arg::new(options::MACHINE)
.short('m')
.long(options::MACHINE)
.help("print the machine hardware name."))
.arg(Arg::new(options::PROCESSOR)
.short('p')
.long(options::PROCESSOR)
.help("print the processor type (non-portable)"))
.arg(Arg::new(options::OS)
.short('o')
.long(options::OS)
.help("print the operating system name."))
} |
|
main.rs | use std::thread;
use structopt::StructOpt;
// This is copied and pasted willy-nilly from Rust CLI book
// Just messing around with StructOpts
/// Search for a pattern in a file and display the lines that contain it.
#[derive(StructOpt)]
#[structopt(name = "cli-args", about = "An example of Command Line Arg usage.")]
struct | {
/// Activate debug mode
// short and long flags (-d, --debug) will be deduced from the field's name
#[structopt(short, long)]
debug: bool,
/// The pattern to look for
#[structopt(default_value="SHA256")]
hash_method: String,
/// The path to the file to read
#[structopt(parse(from_os_str))]
path: std::path::PathBuf,
}
use failure::ResultExt;
use exitfailure::ExitFailure;
// fn main() -> Result<(), ExitFailure> {
// let path = "test.txt";
// let content = std::fs::read_to_string(path)
// .with_context(|_| format!("could not read file `{}`", path))?;
// println!("file content: {}", content);
// Ok(())
// }
fn do_hard_work() {
for i in 1..123 {
let x = 1;
}
}
fn main() {
let pb = indicatif::ProgressBar::new(100);
let opt = CliOptions::from_args();
println!("Debug: {:?}, Hash Method: {:?}, Path: {:?}", opt.debug, opt.hash_method, opt.path);
let known_hash_methods = ["SHA256", "MD5", "SHA1"];
if known_hash_methods.contains(&&String::from(opt.hash_method).to_uppercase()[..]) {
println!("Known Hash Method: {:?}", opt.hash_method);
} else {
println!("Please Use a Known Hash Method: {:?}", known_hash_methods)
}
for i in 0..10 {
// do_hard_work();
for i in 1..5 {
pb.set_message(&format!("{}: {}", "Working...", i));
thread::sleep_ms(30);
}
pb.println(format!("[+] finished #{}", i));
pb.inc(1);
}
pb.finish_with_message("done");
} | CliOptions |
auth.py | """
sentry.utils.auth
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from sentry.models import User
def parse_auth_header(header):
return dict(map(lambda x: x.strip().split('='), header.split(' ', 1)[1].split(',')))
def get_auth_providers():
return [
key for key, cfg_names
in settings.AUTH_PROVIDERS.iteritems()
if all(getattr(settings, c, None) for c in cfg_names)
]
def find_users(username, with_valid_password=True):
"""
Return a list of users that match a username
and falling back to email
"""
qs = User.objects
if with_valid_password:
qs = qs.exclude(password='!')
try:
# First, assume username is an iexact match for username
user = qs.get(username__iexact=username)
return [user]
except User.DoesNotExist:
# If not, we can take a stab at guessing it's an email address
if '@' in username:
# email isn't guaranteed unique
return list(qs.filter(email__iexact=username))
return None
class EmailAuthBackend(ModelBackend):
"""
Authenticate against django.contrib.auth.models.User.
Supports authenticating via an email address or a username.
"""
def authenticate(self, username=None, password=None):
users = find_users(username)
if users:
for user in users:
try:
if user.password and user.check_password(password):
|
except ValueError:
continue
return None
| return user |
user.js | import request from '@/utils/request'
export function login(data) {
return request({
url: '/API/UserInfo/UserLogin', | method: 'post',
data
})
}
export function getInfo(userId) {
return request({
url: '/API/UserInfo/GetUserDetail',
method: 'get',
params: { userId }
})
}
export function logout() {
return request({
url: '/vue-element-admin/user/logout',
method: 'post'
})
}
export function getAuthMenu() {
return request({
url: '/API/users/getMenu',
method: 'get'
})
} | |
forms.py | from wtforms import DecimalField, Form, RadioField
from wtforms.validators import NumberRange
class NCForm(Form):
c8_0 = DecimalField('C8:0', validators=[NumberRange()])
c10_0 = DecimalField('C10:0', validators=[NumberRange()])
c12_0 = DecimalField('C12:0', validators=[NumberRange()])
c14_0 = DecimalField('C14:0', validators=[NumberRange()]) | c18_2 = DecimalField('C18:2', validators=[NumberRange()])
c18_3 = DecimalField('C18:3', validators=[NumberRange()])
c18_1_oh = DecimalField('C18:1 OH', validators=[NumberRange()])
c20_0 = DecimalField('C20:0', validators=[NumberRange()])
c20_1 = DecimalField('C20:1', validators=[NumberRange()])
c22_1 = DecimalField('C22:1', validators=[NumberRange()])
outros = DecimalField('Outros', validators=[NumberRange()])
regression = RadioField(choices=[('pls', 'PLS Regression'), ('mlr', 'MLR Regression'), ('svr', 'SVR Regression')], default='pls') | c16_0 = DecimalField('C16:0', validators=[NumberRange()])
c18_0 = DecimalField('C18:0', validators=[NumberRange()])
c18_1 = DecimalField('C18:1', validators=[NumberRange()]) |
desaf101_funcao_2_voto.py | def voto(n):
from datetime import date
global ano
ano = date.today().year - n
if 65 > ano >= 18:
r = 'OBRIGATÓRIO'
return r
if ano < 18:
r | if ano > 65:
r = 'OPCIONAL'
return r
# programa principal
print('-=' * 20)
ano = int(input('Ano de nascimento: '))
r = voto(ano)
print(f'Com {ano} anos seu voto é {r}')
| = 'NEGADO'
return r
|
mutations.go | package postgres
import (
"fmt"
"time"
)
func addZone(chainID string) string {
return fmt.Sprintf(addZoneQuery,
fmt.Sprintf("('%s', '%s', %t, %t)", chainID, chainID, true, false),
true,
)
}
func addImplicitZones(clients map[string]string) string {
query := ""
for _, chainID := range clients {
query += fmt.Sprintf("('%s', '%s', %t, %t),", chainID, chainID, false, false)
}
if len(query) > 0 {
query = query[:len(query)-1]
}
return fmt.Sprintf(addImplicitZoneQuery, query)
}
func markBlock(chainID string) string {
t := time.Now().Format(Format)
return markBlockConstruct(chainID, t)
}
func markBlockConstruct(chainID string, t string) string {
return fmt.Sprintf(markBlockQuery,
fmt.Sprintf("('%s', %d, '%s')", chainID, 1, t), t)
}
//func addTxStats(stats processor.TxStats) string {
// return fmt.Sprintf(addTxStatsQuery,
// fmt.Sprintf("('%s', '%s', %d, %d, %d, %d, %d)", stats.ChainID, stats.Hour.Format(Format), stats.Count,
// stats.TxWithIBCTransfer, 1, stats.TxWithIBCTransferFail, stats.TurnoverAmount),
// stats.Count,
// stats.TxWithIBCTransfer,
// stats.TxWithIBCTransferFail,
// stats.TurnoverAmount,
// )
//}
//func addActiveAddressesStats(stats processor.TxStats, address string) string {
// return fmt.Sprintf(addActiveAddressesQuery,
// fmt.Sprintf("('%s', '%s', '%s', %d)", address, stats.ChainID, stats.Hour.Format(Format), 1),
// )
//}
func addClients(origin string, clients map[string]string) string {
values := ""
for clientID, chainID := range clients {
values += fmt.Sprintf("('%s', '%s', '%s'),", origin, clientID, chainID)
}
if len(values) > 0 {
values = values[:len(values)-1]
}
return fmt.Sprintf(addClientsQuery, values)
}
func | (origin string, data map[string]string) string {
values := ""
for connectionID, clientID := range data {
values += fmt.Sprintf("('%s', '%s', '%s'),", origin, connectionID, clientID)
}
if len(values) > 0 {
values = values[:len(values)-1]
}
return fmt.Sprintf(addConnectionsQuery, values)
}
func addChannels(origin string, data map[string]string) string {
values := ""
for channelID, connectionID := range data {
values += fmt.Sprintf("('%s', '%s', '%s',%t),", origin, channelID, connectionID, false)
}
if len(values) > 0 {
values = values[:len(values)-1]
}
return fmt.Sprintf(addChannelsQuery, values)
}
func markChannel(origin, channelID string, state bool) string {
return fmt.Sprintf(markChannelQuery,
state,
origin,
channelID)
}
//func addIbcStats(origin string, ibcData map[string]map[string]map[time.Time]int) []string {
// // buffer for our queries
// queries := make([]string, 0, 32)
//
// // process ibc transfers
// for source, destMap := range ibcData {
// for dest, hourMap := range destMap {
// for hour, count := range hourMap {
// queries = append(queries, fmt.Sprintf(addIbcStatsQuery,
// fmt.Sprintf("('%s', '%s', '%s', '%s', %d, %d)", origin, source, dest, hour.Format(Format), count, 1),
// count))
// }
// }
// }
// return queries
//}
| addConnections |
className.spec.tsx | describe('className / class', () => {
it('should have className', () => {
expect(<aside className="one" />).toHaveClass('one'); | });
}); | });
it('should have a class attribute', () => {
expect(<i class="a b c" />).toHaveClass('a b c'); |
resource.rs | use std::cell::RefCell;
use std::fmt;
use std::rc::Rc;
use actix_http::{Error, Extensions, Response};
use actix_service::boxed::{self, BoxedNewService, BoxedService};
use actix_service::{
apply_transform, IntoNewService, IntoTransform, NewService, Service, Transform,
};
use futures::future::{ok, Either, FutureResult};
use futures::{Async, Future, IntoFuture, Poll};
use crate::data::Data;
use crate::dev::{insert_slash, AppService, HttpServiceFactory, ResourceDef};
use crate::extract::FromRequest;
use crate::guard::Guard;
use crate::handler::{AsyncFactory, Factory};
use crate::responder::Responder;
use crate::route::{CreateRouteService, Route, RouteService};
use crate::service::{ServiceRequest, ServiceResponse};
type HttpService = BoxedService<ServiceRequest, ServiceResponse, Error>;
pub type HttpNewService = BoxedNewService<(), ServiceRequest, ServiceResponse, Error, ()>;
/// *Resource* is an entry in resources table which corresponds to requested URL.
///
/// Resource in turn has at least one route.
/// Route consists of an handlers objects and list of guards
/// (objects that implement `Guard` trait).
/// Resources and routes uses builder-like pattern for configuration.
/// During request handling, resource object iterate through all routes
/// and check guards for specific route, if request matches all
/// guards, route considered matched and route handler get called.
///
/// ```rust
/// use actix_web::{web, App, HttpResponse};
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/")
/// .route(web::get().to(|| HttpResponse::Ok())));
/// }
/// ```
///
/// If no matching route could be found, *405* response code get returned.
/// Default behavior could be overriden with `default_resource()` method.
pub struct Resource<T = ResourceEndpoint> {
endpoint: T,
rdef: String,
name: Option<String>,
routes: Vec<Route>,
data: Option<Extensions>,
guards: Vec<Box<dyn Guard>>,
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
factory_ref: Rc<RefCell<Option<ResourceFactory>>>,
}
impl Resource {
pub fn new(path: &str) -> Resource {
let fref = Rc::new(RefCell::new(None));
Resource {
routes: Vec::new(),
rdef: path.to_string(),
name: None,
endpoint: ResourceEndpoint::new(fref.clone()),
factory_ref: fref,
guards: Vec::new(),
data: None,
default: Rc::new(RefCell::new(None)),
}
}
}
impl<T> Resource<T>
where
T: NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
{
/// Set resource name.
///
/// Name is used for url generation.
pub fn name(mut self, name: &str) -> Self {
self.name = Some(name.to_string());
self
}
/// Add match guard to a resource.
///
/// ```rust
/// use actix_web::{web, guard, App, HttpResponse};
///
/// fn index(data: web::Path<(String, String)>) -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new()
/// .service(
/// web::resource("/app")
/// .guard(guard::Header("content-type", "text/plain"))
/// .route(web::get().to(index))
/// )
/// .service(
/// web::resource("/app")
/// .guard(guard::Header("content-type", "text/json"))
/// .route(web::get().to(|| HttpResponse::MethodNotAllowed()))
/// );
/// }
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Box::new(guard));
self
}
pub(crate) fn add_guards(mut self, guards: Vec<Box<dyn Guard>>) -> Self {
self.guards.extend(guards);
self
}
/// Register a new route.
///
/// ```rust
/// use actix_web::{web, guard, App, HttpResponse};
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/").route(
/// web::route()
/// .guard(guard::Any(guard::Get()).or(guard::Put()))
/// .guard(guard::Header("Content-Type", "text/plain"))
/// .to(|| HttpResponse::Ok()))
/// );
/// }
/// ```
///
/// Multiple routes could be added to a resource. Resource object uses
/// match guards for route selection.
///
/// ```rust
/// use actix_web::{web, guard, App, HttpResponse};
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/container/")
/// .route(web::get().to(get_handler))
/// .route(web::post().to(post_handler))
/// .route(web::delete().to(delete_handler))
/// );
/// }
/// # fn get_handler() {}
/// # fn post_handler() {}
/// # fn delete_handler() {}
/// ```
pub fn route(mut self, route: Route) -> Self {
self.routes.push(route);
self
}
/// Provide resource specific data. This method allows to add extractor
/// configuration or specific state available via `Data<T>` extractor.
/// Provided data is available for all routes registered for the current resource.
/// Resource data overrides data registered by `App::data()` method.
///
/// ```rust
/// use actix_web::{web, App, FromRequest};
///
/// /// extract text data from request
/// fn index(body: String) -> String {
/// format!("Body {}!", body)
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/index.html")
/// // limit size of the payload
/// .data(String::configure(|cfg| {
/// cfg.limit(4096)
/// }))
/// .route(
/// web::get()
/// // register handler
/// .to(index)
/// ));
/// }
/// ```
pub fn data<U: 'static>(mut self, data: U) -> Self {
if self.data.is_none() {
self.data = Some(Extensions::new());
}
self.data.as_mut().unwrap().insert(Data::new(data));
self
}
/// Register a new route and add handler. This route matches all requests.
///
/// ```rust
/// use actix_web::*;
///
/// fn index(req: HttpRequest) -> HttpResponse {
/// unimplemented!()
/// }
///
/// App::new().service(web::resource("/").to(index));
/// ```
///
/// This is shortcut for:
///
/// ```rust
/// # extern crate actix_web;
/// # use actix_web::*;
/// # fn index(req: HttpRequest) -> HttpResponse { unimplemented!() } | F: Factory<I, R> + 'static,
I: FromRequest + 'static,
R: Responder + 'static,
{
self.routes.push(Route::new().to(handler));
self
}
/// Register a new route and add async handler.
///
/// ```rust
/// use actix_web::*;
/// use futures::future::{ok, Future};
///
/// fn index(req: HttpRequest) -> impl Future<Item=HttpResponse, Error=Error> {
/// ok(HttpResponse::Ok().finish())
/// }
///
/// App::new().service(web::resource("/").to_async(index));
/// ```
///
/// This is shortcut for:
///
/// ```rust
/// # use actix_web::*;
/// # use futures::future::Future;
/// # fn index(req: HttpRequest) -> Box<dyn Future<Item=HttpResponse, Error=Error>> {
/// # unimplemented!()
/// # }
/// App::new().service(web::resource("/").route(web::route().to_async(index)));
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn to_async<F, I, R>(mut self, handler: F) -> Self
where
F: AsyncFactory<I, R>,
I: FromRequest + 'static,
R: IntoFuture + 'static,
R::Item: Responder,
R::Error: Into<Error>,
{
self.routes.push(Route::new().to_async(handler));
self
}
/// Register a resource middleware.
///
/// This is similar to `App's` middlewares, but middleware get invoked on resource level.
/// Resource level middlewares are not allowed to change response
/// type (i.e modify response's body).
///
/// **Note**: middlewares get called in opposite order of middlewares registration.
pub fn wrap<M, F>(
self,
mw: F,
) -> Resource<
impl NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
>
where
M: Transform<
T::Service,
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
F: IntoTransform<M, T::Service>,
{
let endpoint = apply_transform(mw, self.endpoint);
Resource {
endpoint,
rdef: self.rdef,
name: self.name,
guards: self.guards,
routes: self.routes,
default: self.default,
data: self.data,
factory_ref: self.factory_ref,
}
}
/// Register a resource middleware function.
///
/// This function accepts instance of `ServiceRequest` type and
/// mutable reference to the next middleware in chain.
///
/// This is similar to `App's` middlewares, but middleware get invoked on resource level.
/// Resource level middlewares are not allowed to change response
/// type (i.e modify response's body).
///
/// ```rust
/// use actix_service::Service;
/// # use futures::Future;
/// use actix_web::{web, App};
/// use actix_web::http::{header::CONTENT_TYPE, HeaderValue};
///
/// fn index() -> &'static str {
/// "Welcome!"
/// }
///
/// fn main() {
/// let app = App::new().service(
/// web::resource("/index.html")
/// .wrap_fn(|req, srv|
/// srv.call(req).map(|mut res| {
/// res.headers_mut().insert(
/// CONTENT_TYPE, HeaderValue::from_static("text/plain"),
/// );
/// res
/// }))
/// .route(web::get().to(index)));
/// }
/// ```
pub fn wrap_fn<F, R>(
self,
mw: F,
) -> Resource<
impl NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
>
where
F: FnMut(ServiceRequest, &mut T::Service) -> R + Clone,
R: IntoFuture<Item = ServiceResponse, Error = Error>,
{
self.wrap(mw)
}
/// Default service to be used if no matching route could be found.
/// By default *405* response get returned. Resource does not use
/// default handler from `App` or `Scope`.
pub fn default_service<F, U>(mut self, f: F) -> Self
where
F: IntoNewService<U>,
U: NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
> + 'static,
U::InitError: fmt::Debug,
{
// create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::new_service(
f.into_new_service().map_init_err(|e| {
log::error!("Can not construct default service: {:?}", e)
}),
)))));
self
}
}
impl<T> HttpServiceFactory for Resource<T>
where
T: NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
> + 'static,
{
fn register(mut self, config: &mut AppService) {
let guards = if self.guards.is_empty() {
None
} else {
Some(std::mem::replace(&mut self.guards, Vec::new()))
};
let mut rdef = if config.is_root() || !self.rdef.is_empty() {
ResourceDef::new(&insert_slash(&self.rdef))
} else {
ResourceDef::new(&self.rdef)
};
if let Some(ref name) = self.name {
*rdef.name_mut() = name.clone();
}
// custom app data storage
if let Some(ref mut ext) = self.data {
config.set_service_data(ext);
}
config.register_service(rdef, guards, self, None)
}
}
impl<T> IntoNewService<T> for Resource<T>
where
T: NewService<
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
InitError = (),
>,
{
fn into_new_service(self) -> T {
*self.factory_ref.borrow_mut() = Some(ResourceFactory {
routes: self.routes,
data: self.data.map(Rc::new),
default: self.default,
});
self.endpoint
}
}
pub struct ResourceFactory {
pub routes: Vec<Route>,
pub data: Option<Rc<Extensions>>,
pub default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
}
impl NewService for ResourceFactory {
type Config = ();
type Request = ServiceRequest;
type Response = ServiceResponse;
type Error = Error;
type InitError = ();
type Service = ResourceService;
type Future = CreateResourceService;
fn new_service(&self, _: &()) -> Self::Future {
let default_fut = if let Some(ref default) = *self.default.borrow() {
Some(default.new_service(&()))
} else {
None
};
CreateResourceService {
fut: self
.routes
.iter()
.map(|route| CreateRouteServiceItem::Future(route.new_service(&())))
.collect(),
data: self.data.clone(),
default: None,
default_fut,
}
}
}
pub enum CreateRouteServiceItem {
Future(CreateRouteService),
Service(RouteService),
}
pub struct CreateResourceService {
pub fut: Vec<CreateRouteServiceItem>,
pub data: Option<Rc<Extensions>>,
pub default: Option<HttpService>,
pub default_fut: Option<Box<dyn Future<Item = HttpService, Error = ()>>>,
}
impl Future for CreateResourceService {
type Item = ResourceService;
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut done = true;
if let Some(ref mut fut) = self.default_fut {
match fut.poll()? {
Async::Ready(default) => self.default = Some(default),
Async::NotReady => done = false,
}
}
// poll http services
for item in &mut self.fut {
match item {
CreateRouteServiceItem::Future(ref mut fut) => match fut.poll()? {
Async::Ready(route) => {
*item = CreateRouteServiceItem::Service(route)
}
Async::NotReady => {
done = false;
}
},
CreateRouteServiceItem::Service(_) => continue,
};
}
if done {
let routes = self
.fut
.drain(..)
.map(|item| match item {
CreateRouteServiceItem::Service(service) => service,
CreateRouteServiceItem::Future(_) => unreachable!(),
})
.collect();
Ok(Async::Ready(ResourceService {
routes,
data: self.data.clone(),
default: self.default.take(),
}))
} else {
Ok(Async::NotReady)
}
}
}
pub struct ResourceService {
routes: Vec<RouteService>,
data: Option<Rc<Extensions>>,
default: Option<HttpService>,
}
impl Service for ResourceService {
type Request = ServiceRequest;
type Response = ServiceResponse;
type Error = Error;
type Future = Either<
FutureResult<ServiceResponse, Error>,
Box<dyn Future<Item = ServiceResponse, Error = Error>>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, mut req: ServiceRequest) -> Self::Future {
for route in self.routes.iter_mut() {
if route.check(&mut req) {
if let Some(ref data) = self.data {
req.set_data_container(data.clone());
}
return route.call(req);
}
}
if let Some(ref mut default) = self.default {
default.call(req)
} else {
let req = req.into_parts().0;
Either::A(ok(ServiceResponse::new(
req,
Response::MethodNotAllowed().finish(),
)))
}
}
}
#[doc(hidden)]
pub struct ResourceEndpoint {
factory: Rc<RefCell<Option<ResourceFactory>>>,
}
impl ResourceEndpoint {
fn new(factory: Rc<RefCell<Option<ResourceFactory>>>) -> Self {
ResourceEndpoint { factory }
}
}
impl NewService for ResourceEndpoint {
type Config = ();
type Request = ServiceRequest;
type Response = ServiceResponse;
type Error = Error;
type InitError = ();
type Service = ResourceService;
type Future = CreateResourceService;
fn new_service(&self, _: &()) -> Self::Future {
self.factory.borrow_mut().as_mut().unwrap().new_service(&())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use actix_service::Service;
use futures::{Future, IntoFuture};
use tokio_timer::sleep;
use crate::http::{header, HeaderValue, Method, StatusCode};
use crate::service::{ServiceRequest, ServiceResponse};
use crate::test::{call_service, init_service, TestRequest};
use crate::{guard, web, App, Error, HttpResponse};
fn md<S, B>(
req: ServiceRequest,
srv: &mut S,
) -> impl IntoFuture<Item = ServiceResponse<B>, Error = Error>
where
S: Service<
Request = ServiceRequest,
Response = ServiceResponse<B>,
Error = Error,
>,
{
srv.call(req).map(|mut res| {
res.headers_mut()
.insert(header::CONTENT_TYPE, HeaderValue::from_static("0001"));
res
})
}
#[test]
fn test_middleware() {
let mut srv = init_service(
App::new().service(
web::resource("/test")
.name("test")
.wrap(md)
.route(web::get().to(|| HttpResponse::Ok())),
),
);
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
HeaderValue::from_static("0001")
);
}
#[test]
fn test_middleware_fn() {
let mut srv = init_service(
App::new().service(
web::resource("/test")
.wrap_fn(|req, srv| {
srv.call(req).map(|mut res| {
res.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static("0001"),
);
res
})
})
.route(web::get().to(|| HttpResponse::Ok())),
),
);
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::CONTENT_TYPE).unwrap(),
HeaderValue::from_static("0001")
);
}
#[test]
fn test_to_async() {
let mut srv =
init_service(App::new().service(web::resource("/test").to_async(|| {
sleep(Duration::from_millis(100)).then(|_| HttpResponse::Ok())
})));
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
}
#[test]
fn test_default_resource() {
let mut srv = init_service(
App::new()
.service(
web::resource("/test").route(web::get().to(|| HttpResponse::Ok())),
)
.default_service(|r: ServiceRequest| {
r.into_response(HttpResponse::BadRequest())
}),
);
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/test")
.method(Method::POST)
.to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED);
let mut srv = init_service(
App::new().service(
web::resource("/test")
.route(web::get().to(|| HttpResponse::Ok()))
.default_service(|r: ServiceRequest| {
r.into_response(HttpResponse::BadRequest())
}),
),
);
let req = TestRequest::with_uri("/test").to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/test")
.method(Method::POST)
.to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
}
#[test]
fn test_resource_guards() {
let mut srv = init_service(
App::new()
.service(
web::resource("/test/{p}")
.guard(guard::Get())
.to(|| HttpResponse::Ok()),
)
.service(
web::resource("/test/{p}")
.guard(guard::Put())
.to(|| HttpResponse::Created()),
)
.service(
web::resource("/test/{p}")
.guard(guard::Delete())
.to(|| HttpResponse::NoContent()),
),
);
let req = TestRequest::with_uri("/test/it")
.method(Method::GET)
.to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::OK);
let req = TestRequest::with_uri("/test/it")
.method(Method::PUT)
.to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::CREATED);
let req = TestRequest::with_uri("/test/it")
.method(Method::DELETE)
.to_request();
let resp = call_service(&mut srv, req);
assert_eq!(resp.status(), StatusCode::NO_CONTENT);
}
} | /// App::new().service(web::resource("/").route(web::route().to(index)));
/// ```
pub fn to<F, I, R>(mut self, handler: F) -> Self
where |
__init__.py | #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""ovirt-host-setup vmconsole_proxy plugin."""
from otopi import util
from . import config
from . import pki
from . import system
@util.export
def createPlugins(context):
|
# vim: expandtab tabstop=4 shiftwidth=4
| config.Plugin(context=context)
pki.Plugin(context=context)
system.Plugin(context=context) |
events.pb.go | // Copyright 2018 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0-devel
// protoc v3.12.1
// source: go.chromium.org/luci/cipd/api/cipd/v1/events.proto
package api
import (
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type EventKind int32
const (
EventKind_EVENT_KIND_UNSPECIFIED EventKind = 0
// Prefix events: relate to some CIPD prefix.
EventKind_PREFIX_ACL_CHANGED EventKind = 100
// Package events: relate to a package (as a whole).
EventKind_PACKAGE_CREATED EventKind = 200
EventKind_PACKAGE_DELETED EventKind = 201
EventKind_PACKAGE_HIDDEN EventKind = 202
EventKind_PACKAGE_UNHIDDEN EventKind = 203
// Instance events: relate to a particular package instance.
EventKind_INSTANCE_CREATED EventKind = 300
EventKind_INSTANCE_DELETED EventKind = 301
EventKind_INSTANCE_REF_SET EventKind = 302
EventKind_INSTANCE_REF_UNSET EventKind = 303
EventKind_INSTANCE_TAG_ATTACHED EventKind = 304
EventKind_INSTANCE_TAG_DETACHED EventKind = 305
)
// Enum value maps for EventKind.
var (
EventKind_name = map[int32]string{
0: "EVENT_KIND_UNSPECIFIED",
100: "PREFIX_ACL_CHANGED",
200: "PACKAGE_CREATED",
201: "PACKAGE_DELETED",
202: "PACKAGE_HIDDEN",
203: "PACKAGE_UNHIDDEN",
300: "INSTANCE_CREATED",
301: "INSTANCE_DELETED",
302: "INSTANCE_REF_SET",
303: "INSTANCE_REF_UNSET",
304: "INSTANCE_TAG_ATTACHED",
305: "INSTANCE_TAG_DETACHED",
}
EventKind_value = map[string]int32{
"EVENT_KIND_UNSPECIFIED": 0,
"PREFIX_ACL_CHANGED": 100,
"PACKAGE_CREATED": 200,
"PACKAGE_DELETED": 201,
"PACKAGE_HIDDEN": 202,
"PACKAGE_UNHIDDEN": 203,
"INSTANCE_CREATED": 300,
"INSTANCE_DELETED": 301,
"INSTANCE_REF_SET": 302,
"INSTANCE_REF_UNSET": 303,
"INSTANCE_TAG_ATTACHED": 304,
"INSTANCE_TAG_DETACHED": 305,
}
)
func (x EventKind) Enum() *EventKind {
p := new(EventKind)
*p = x
return p
}
func (x EventKind) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (EventKind) Descriptor() protoreflect.EnumDescriptor {
return file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_enumTypes[0].Descriptor()
}
func (EventKind) Type() protoreflect.EnumType {
return &file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_enumTypes[0]
}
func (x EventKind) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use EventKind.Descriptor instead.
func (EventKind) EnumDescriptor() ([]byte, []int) {
return file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescGZIP(), []int{0}
}
// Event in a global structured event log.
//
// It exists in both BigQuery (for adhoc queries) and in Datastore (for showing
// in web UI, e.g. for "recent tags" feature).
//
// Datastore entities contains serialized Event as is, plus a copy of some of
// its fields for indexing.
type Event struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Kind EventKind `protobuf:"varint,1,opt,name=kind,proto3,enum=cipd.EventKind" json:"kind,omitempty"`
Who string `protobuf:"bytes,2,opt,name=who,proto3" json:"who,omitempty"` // an identity string, e.g. "user:<email>"
// Real time is used only for up to millisecond precisions. Nanoseconds are
// abused to order events emitted by a single transaction.
When *timestamp.Timestamp `protobuf:"bytes,3,opt,name=when,proto3" json:"when,omitempty"`
Package string `protobuf:"bytes,4,opt,name=package,proto3" json:"package,omitempty"` // a package name or a prefix (for PREFIX_* events)
Instance string `protobuf:"bytes,5,opt,name=instance,proto3" json:"instance,omitempty"` // an instance ID for INSTANCE_*
Ref string `protobuf:"bytes,6,opt,name=ref,proto3" json:"ref,omitempty"` // a ref name for INSTANCE_REF_*
Tag string `protobuf:"bytes,7,opt,name=tag,proto3" json:"tag,omitempty"` // a tag (in 'k:v' form) for INSTANCE_TAG_*
// An ACL diff for PREFIX_ACL_CHANGED.
GrantedRole []*PrefixMetadata_ACL `protobuf:"bytes,8,rep,name=granted_role,json=grantedRole,proto3" json:"granted_role,omitempty"`
RevokedRole []*PrefixMetadata_ACL `protobuf:"bytes,9,rep,name=revoked_role,json=revokedRole,proto3" json:"revoked_role,omitempty"`
}
func (x *Event) Reset() {
*x = Event{}
if protoimpl.UnsafeEnabled {
mi := &file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Event) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Event) ProtoMessage() {}
func (x *Event) ProtoReflect() protoreflect.Message {
mi := &file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Event.ProtoReflect.Descriptor instead.
func (*Event) Descriptor() ([]byte, []int) {
return file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescGZIP(), []int{0}
}
func (x *Event) GetKind() EventKind {
if x != nil {
return x.Kind
}
return EventKind_EVENT_KIND_UNSPECIFIED
}
func (x *Event) GetWho() string {
if x != nil {
return x.Who
}
return ""
}
func (x *Event) GetWhen() *timestamp.Timestamp {
if x != nil {
return x.When
}
return nil
}
func (x *Event) GetPackage() string {
if x != nil {
return x.Package
}
return ""
}
func (x *Event) GetInstance() string {
if x != nil {
return x.Instance
}
return ""
}
func (x *Event) GetRef() string {
if x != nil {
return x.Ref
}
return ""
}
func (x *Event) GetTag() string {
if x != nil {
return x.Tag
}
return ""
}
func (x *Event) GetGrantedRole() []*PrefixMetadata_ACL {
if x != nil {
return x.GrantedRole
}
return nil
}
func (x *Event) GetRevokedRole() []*PrefixMetadata_ACL {
if x != nil {
return x.RevokedRole
}
return nil
}
var File_go_chromium_org_luci_cipd_api_cipd_v1_events_proto protoreflect.FileDescriptor
var file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDesc = []byte{
0x0a, 0x32, 0x67, 0x6f, 0x2e, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72,
0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f, 0x63, 0x69, 0x70, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x63, 0x69, 0x70, 0x64, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x69, 0x70, 0x64, 0x1a, 0x30, 0x67, 0x6f, 0x2e, 0x63,
0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75, 0x6d, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69,
0x2f, 0x63, 0x69, 0x70, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x69, 0x70, 0x64, 0x2f, 0x76,
0x31, 0x2f, 0x72, 0x65, 0x70, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, 0x02,
0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x63, 0x69, 0x70, 0x64, 0x2e, 0x45, 0x76, 0x65,
0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x10, 0x0a, 0x03,
0x77, 0x68, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x77, 0x68, 0x6f, 0x12, 0x2e,
0x0a, 0x04, 0x77, 0x68, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x77, 0x68, 0x65, 0x6e, 0x12, 0x18,
0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28,
0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x3b, 0x0a, 0x0c, 0x67, 0x72, 0x61, 0x6e,
0x74, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18,
0x2e, 0x63, 0x69, 0x70, 0x64, 0x2e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x43, 0x4c, 0x52, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x65,
0x64, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64,
0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x69,
0x70, 0x64, 0x2e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x2e, 0x41, 0x43, 0x4c, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x6f,
0x6c, 0x65, 0x2a, 0xad, 0x02, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4b, 0x69, 0x6e, 0x64,
0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55,
0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12,
0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x41, 0x43, 0x4c, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47,
0x45, 0x44, 0x10, 0x64, 0x12, 0x14, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f,
0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0xc8, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x50, 0x41,
0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0xc9, 0x01,
0x12, 0x13, 0x0a, 0x0e, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x44, 0x44,
0x45, 0x4e, 0x10, 0xca, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45,
0x5f, 0x55, 0x4e, 0x48, 0x49, 0x44, 0x44, 0x45, 0x4e, 0x10, 0xcb, 0x01, 0x12, 0x15, 0x0a, 0x10,
0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44,
0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f,
0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0xad, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, | 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x52, 0x45, 0x46, 0x5f, 0x53, 0x45, 0x54, 0x10, 0xae,
0x02, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x52, 0x45,
0x46, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0xaf, 0x02, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e,
0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x54, 0x41, 0x47, 0x5f, 0x41, 0x54, 0x54, 0x41, 0x43,
0x48, 0x45, 0x44, 0x10, 0xb0, 0x02, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e,
0x43, 0x45, 0x5f, 0x54, 0x41, 0x47, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x43, 0x48, 0x45, 0x44, 0x10,
0xb1, 0x02, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x6f, 0x2e, 0x63, 0x68, 0x72, 0x6f, 0x6d, 0x69, 0x75,
0x6d, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x6c, 0x75, 0x63, 0x69, 0x2f, 0x63, 0x69, 0x70, 0x64, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x63, 0x69, 0x70, 0x64, 0x2f, 0x76, 0x31, 0x3b, 0x61, 0x70, 0x69, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescOnce sync.Once
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescData = file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDesc
)
func file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescGZIP() []byte {
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescOnce.Do(func() {
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescData)
})
return file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDescData
}
var file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_goTypes = []interface{}{
(EventKind)(0), // 0: cipd.EventKind
(*Event)(nil), // 1: cipd.Event
(*timestamp.Timestamp)(nil), // 2: google.protobuf.Timestamp
(*PrefixMetadata_ACL)(nil), // 3: cipd.PrefixMetadata.ACL
}
var file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_depIdxs = []int32{
0, // 0: cipd.Event.kind:type_name -> cipd.EventKind
2, // 1: cipd.Event.when:type_name -> google.protobuf.Timestamp
3, // 2: cipd.Event.granted_role:type_name -> cipd.PrefixMetadata.ACL
3, // 3: cipd.Event.revoked_role:type_name -> cipd.PrefixMetadata.ACL
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_init() }
func file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_init() {
if File_go_chromium_org_luci_cipd_api_cipd_v1_events_proto != nil {
return
}
file_go_chromium_org_luci_cipd_api_cipd_v1_repo_proto_init()
if !protoimpl.UnsafeEnabled {
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Event); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDesc,
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_goTypes,
DependencyIndexes: file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_depIdxs,
EnumInfos: file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_enumTypes,
MessageInfos: file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_msgTypes,
}.Build()
File_go_chromium_org_luci_cipd_api_cipd_v1_events_proto = out.File
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_rawDesc = nil
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_goTypes = nil
file_go_chromium_org_luci_cipd_api_cipd_v1_events_proto_depIdxs = nil
} | |
command_bar.rs | //
// The command bar describes the bar which is constanly
// shown to the user.
//
// In general does is consists of following components:
//
// {CWD}{SVN Status}{PROMPT_SYMBOL}{INPUT_FIELD}|{OPTIONALS}
//
//
// CWD = Current working directory, fold if to long or referencing user directories
// SVN = Quite common now, shows current branch and current changes
// PROMPT_SYMBOL = Usually something like ">" or "$"
// INPUT_FIELD = Where the use input text is displayed
// OPTIONALS = Zsh shows the current times or amount of inputed commands
//
//
// Civa provides customization
//
//
use std::fs;
use log::info;
use yaml_rust::{Yaml, YamlLoader};
use super::error::ConfigError;
use super::{Color, Style};
#[derive(Debug)]
pub enum CommandBarComponents {
CWD,
SVN,
PROMPT,
USER,
UNDEFINED,
}
#[derive(Debug)]
pub struct Sorround {
pub left: String,
pub right: String,
}
impl Sorround {
fn default() -> Self {
Self {
left: String::new(),
right: String::new(),
}
}
fn new(left: &str, right: &str) -> Self {
Self {
left: String::from(left),
right: String::from(right),
}
}
}
#[derive(Debug)]
pub struct Component {
pub color: Color,
pub style: Style,
pub sorround: Sorround,
pub component_type: CommandBarComponents,
}
impl Component {
fn from_string(component_name: &str, color: Color, style: Style, sorround: Sorround) -> Self {
let comp = match component_name.to_lowercase().as_str() {
"cwd" => CommandBarComponents::CWD,
"svn" => CommandBarComponents::SVN,
"prompt" => CommandBarComponents::PROMPT,
"user" => CommandBarComponents::USER,
_ => CommandBarComponents::UNDEFINED,
};
Self {
component_type: comp,
color,
style,
sorround,
}
}
fn default(component: CommandBarComponents) -> Self {
Self {
color: Color::default(),
style: Style::default(),
component_type: component,
sorround: Sorround::default(),
}
}
}
#[derive(Debug)]
pub struct Prompt {
pub symbol: String,
pub style: Style,
pub color: Color,
pub sorround: Sorround,
}
impl Prompt {
fn default() -> Self |
}
#[derive(Debug)]
pub struct CommandBarConfig {
pub components: Vec<Component>,
pub prompt: Prompt,
}
impl<'a> CommandBarConfig {
pub fn default() -> Self {
Self {
components: vec![
Component::default(CommandBarComponents::CWD),
Component::default(CommandBarComponents::SVN),
Component::default(CommandBarComponents::PROMPT),
],
prompt: Prompt::default(),
}
}
}
pub fn command_bar_config_reader(config_file: &str) -> Result<CommandBarConfig, ConfigError> {
let content = match fs::read_to_string(config_file) {
Ok(c) => c,
Err(_) => return Ok(CommandBarConfig::default()),
};
let config = match YamlLoader::load_from_str(content.as_str()) {
Err(err) => {
return Err(ConfigError {
message: format!("{:?}", err),
})
}
Ok(c) => c,
};
Ok(config_builder(config))
}
fn config_builder(config: Vec<Yaml>) -> CommandBarConfig {
let config = &config[0];
let component_order: Vec<&str> = config["component_order"]
.as_vec()
.unwrap()
.iter()
.map(|c| c.as_str().unwrap())
.collect();
let mut components: Vec<Component> = Vec::new();
let mut maybe_prompt: Option<Prompt> = None;
for component_name in component_order {
info!("Component: {}", component_name);
let component_config = &config[component_name];
info!("With config: {:?}", component_config);
let color = match component_config["color"].as_str() {
Some(color_string) => Color::from_string(color_string),
None => Color::default(),
};
let style = match component_config["style"].as_str() {
Some(style_string) => Style::from_string(style_string),
None => Style::default(),
};
let sorround_left = component_config["sorround"]["left"]
.as_str()
.unwrap_or_default();
let sorround_right = component_config["sorround"]["right"]
.as_str()
.unwrap_or_default();
let sorround = Sorround::new(sorround_left, sorround_right);
if component_name == "prompt" {
let symbol_string = component_config["symbol"].as_str();
info!("Promp symbol: {}", symbol_string.unwrap());
let symbol = match symbol_string {
Some(s) => String::from(s),
None => String::from(">"),
};
maybe_prompt = Some(Prompt {
symbol,
style,
color,
sorround,
});
continue;
}
components.push(Component::from_string(
component_name,
color,
style,
sorround,
))
}
match maybe_prompt {
Some(prompt) => CommandBarConfig { components, prompt },
None => CommandBarConfig {
components,
prompt: Prompt::default(),
},
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_config_yaml() {
let c = command_bar_config_reader("examples/.civa.bar.yaml");
}
}
| {
Self {
symbol: String::from(">"),
style: Style::default(),
color: Color::default(),
sorround: Sorround::default(),
}
} |
ZipOutputStream.js | Clazz.declarePackage ("java.util.zip");
Clazz.load (["java.util.zip.DeflaterOutputStream", "$.ZipConstants", "java.util.Hashtable", "java.util.zip.CRC32", "JU.Lst"], "java.util.zip.ZipOutputStream", ["JU.ZStream", "java.io.IOException", "java.lang.Boolean", "$.IllegalArgumentException", "$.IndexOutOfBoundsException", "$.Long", "java.util.zip.Deflater", "$.ZipException"], function () {
c$ = Clazz.decorateAsClass (function () {
this.current = null;
this.xentries = null;
this.names = null;
this.crc = null;
this.written = 0;
this.locoff = 0;
this.comment = null;
this.method = 8;
this.finished = false;
this.$closed = false;
Clazz.instantialize (this, arguments);
}, java.util.zip, "ZipOutputStream", java.util.zip.DeflaterOutputStream, java.util.zip.ZipConstants);
Clazz.prepareFields (c$, function () {
this.xentries = new JU.Lst ();
this.names = new java.util.Hashtable ();
this.crc = new java.util.zip.CRC32 ();
});
c$.version = Clazz.defineMethod (c$, "version",
function (e) {
switch (e.method) {
case 8:
return 20;
case 0:
return 10;
default:
throw new java.util.zip.ZipException ("unsupported compression method");
}
}, "java.util.zip.ZipEntry");
Clazz.defineMethod (c$, "ensureOpen",
function () {
if (this.$closed) {
throw new java.io.IOException ("Stream closed");
}});
Clazz.makeConstructor (c$,
function () {
Clazz.superConstructor (this, java.util.zip.ZipOutputStream, []);
});
Clazz.defineMethod (c$, "setZOS",
function (out) {
this.setDOS (out, java.util.zip.ZipOutputStream.newDeflater ());
return this;
}, "java.io.OutputStream");
c$.newDeflater = Clazz.defineMethod (c$, "newDeflater",
function () {
return ( new java.util.zip.Deflater (2147483647)).init (-1, 0, true);
});
Clazz.defineMethod (c$, "setComment",
function (comment) {
if (comment != null) {
this.comment = JU.ZStream.getBytes (comment);
if (this.comment.length > 0xffff) throw new IllegalArgumentException ("ZIP file comment too long.");
}}, "~S");
Clazz.defineMethod (c$, "putNextEntry",
function (e) {
this.ensureOpen ();
if (this.current != null) {
this.closeEntry ();
}if (e.time == -1) {
e.setTime (System.currentTimeMillis ());
}if (e.method == -1) {
e.method = this.method;
}e.flag = 0;
switch (e.method) {
case 8:
if (e.size == -1 || e.csize == -1 || e.crc == -1) e.flag = 8;
break;
case 0:
if (e.size == -1) {
e.size = e.csize;
} else if (e.csize == -1) {
e.csize = e.size;
} else if (e.size != e.csize) {
throw new java.util.zip.ZipException ("STORED entry where compressed != uncompressed size");
}if (e.size == -1 || e.crc == -1) {
throw new java.util.zip.ZipException ("STORED entry missing size, compressed size, or crc-32");
}break;
default:
throw new java.util.zip.ZipException ("unsupported compression method");
}
if (this.names.containsKey (e.name)) {
throw new java.util.zip.ZipException ("duplicate entry: " + e.name);
}this.names.put (e.name, Boolean.TRUE);
e.flag |= 2048;
this.current = e;
this.current.offset = this.written;
this.xentries.addLast (this.current);
this.writeLOC (this.current);
}, "java.util.zip.ZipEntry");
Clazz.defineMethod (c$, "closeEntry",
function () {
this.ensureOpen ();
if (this.current != null) {
var e = this.current;
switch (e.method) {
case 8:
this.deflater.finish ();
Clazz.superCall (this, java.util.zip.ZipOutputStream, "finish", []);
if ((e.flag & 8) == 0) {
if (e.size != this.deflater.getBytesRead ()) {
throw new java.util.zip.ZipException ("invalid entry size (expected " + e.size + " but got " + this.deflater.getBytesRead () + " bytes)");
}if (e.csize != this.deflater.getBytesWritten ()) {
throw new java.util.zip.ZipException ("invalid entry compressed size (expected " + e.csize + " but got " + this.deflater.getBytesWritten () + " bytes)");
}if (e.crc != this.crc.getValue ()) {
throw new java.util.zip.ZipException ("invalid entry CRC-32 (expected 0x" + Long.toHexString (e.crc) + " but got 0x" + Long.toHexString (this.crc.getValue ()) + ")");
}} else {
e.size = this.deflater.getBytesRead ();
e.csize = this.deflater.getBytesWritten ();
e.crc = this.crc.getValue ();
this.writeEXT (e);
}this.deflater = java.util.zip.ZipOutputStream.newDeflater ();
this.written += e.csize;
break;
case 0:
if (e.size != this.written - this.locoff) {
throw new java.util.zip.ZipException ("invalid entry size (expected " + e.size + " but got " + (this.written - this.locoff) + " bytes)");
}if (e.crc != this.crc.getValue ()) {
throw new java.util.zip.ZipException ("invalid entry crc-32 (expected 0x" + Long.toHexString (e.crc) + " but got 0x" + Long.toHexString (this.crc.getValue ()) + ")");
}break;
default:
throw new java.util.zip.ZipException ("invalid compression method");
}
this.crc.reset ();
this.current = null;
}});
Clazz.defineMethod (c$, "write",
function (b, off, len) {
this.ensureOpen ();
if (off < 0 || len < 0 || off > b.length - len) {
throw new IndexOutOfBoundsException ();
} else if (len == 0) {
return;
}if (this.current == null) {
throw new java.util.zip.ZipException ("no current ZIP entry");
}var entry = this.current;
switch (entry.method) {
case 8:
Clazz.superCall (this, java.util.zip.ZipOutputStream, "write", [b, off, len]);
break;
case 0:
this.written += len;
if (this.written - this.locoff > entry.size) {
throw new java.util.zip.ZipException ("attempt to write past end of STORED entry");
}this.out.write (this.buffer, 0, len);
break;
default:
throw new java.util.zip.ZipException ("invalid compression method");
}
this.crc.update (b, off, len);
}, "~A,~N,~N");
Clazz.defineMethod (c$, "finish",
function () {
this.ensureOpen ();
if (this.finished) {
return;
}if (this.current != null) {
this.closeEntry ();
}var off = this.written;
for (var xentry, $xentry = this.xentries.iterator (); $xentry.hasNext () && ((xentry = $xentry.next ()) || true);) this.writeCEN (xentry);
this.writeEND (off, this.written - off);
this.finished = true;
});
Clazz.defineMethod (c$, "close",
function () {
if (!this.$closed) {
Clazz.superCall (this, java.util.zip.ZipOutputStream, "close", []);
this.$closed = true;
}});
Clazz.defineMethod (c$, "writeLOC",
function (entry) {
var e = entry;
var flag = e.flag;
var elen = (e.extra != null) ? e.extra.length : 0;
var hasZip64 = false;
this.writeInt (67324752);
if ((flag & 8) == 8) {
this.writeShort (java.util.zip.ZipOutputStream.version (e));
this.writeShort (flag);
this.writeShort (e.method);
this.writeInt (e.time);
this.writeInt (0);
this.writeInt (0);
this.writeInt (0);
} else {
if (e.csize >= 4294967295 || e.size >= 4294967295) {
hasZip64 = true;
this.writeShort (45);
} else {
this.writeShort (java.util.zip.ZipOutputStream.version (e));
}this.writeShort (flag);
this.writeShort (e.method);
this.writeInt (e.time);
this.writeInt (e.crc);
if (hasZip64) {
this.writeInt (4294967295);
this.writeInt (4294967295);
elen += 20;
} else {
this.writeInt (e.csize);
this.writeInt (e.size);
}}var nameBytes = JU.ZStream.getBytes (e.name);
this.writeShort (nameBytes.length);
this.writeShort (elen);
this.writeBytes (nameBytes, 0, nameBytes.length);
if (hasZip64) {
this.writeShort (1);
this.writeShort (16);
this.writeLong (e.size);
this.writeLong (e.csize);
}if (e.extra != null) {
this.writeBytes (e.extra, 0, e.extra.length);
}this.locoff = this.written;
}, "java.util.zip.ZipEntry");
Clazz.defineMethod (c$, "writeEXT",
function (e) {
this.writeInt (134695760);
this.writeInt (e.crc);
if (e.csize >= 4294967295 || e.size >= 4294967295) {
this.writeLong (e.csize);
this.writeLong (e.size);
} else {
this.writeInt (e.csize);
this.writeInt (e.size);
}}, "java.util.zip.ZipEntry");
Clazz.defineMethod (c$, "writeCEN",
function (entry) {
var e = entry;
var flag = e.flag;
var version = java.util.zip.ZipOutputStream.version (e);
var csize = e.csize;
var size = e.size;
var offset = entry.offset;
var e64len = 0;
var hasZip64 = false;
if (e.csize >= 4294967295) {
csize = 4294967295;
e64len += 8;
hasZip64 = true;
}if (e.size >= 4294967295) {
size = 4294967295;
e64len += 8;
hasZip64 = true;
}if (entry.offset >= 4294967295) {
offset = 4294967295;
e64len += 8;
hasZip64 = true;
}this.writeInt (33639248);
if (hasZip64) {
this.writeShort (45);
this.writeShort (45);
} else {
this.writeShort (version);
this.writeShort (version);
}this.writeShort (flag);
this.writeShort (e.method);
this.writeInt (e.time);
this.writeInt (e.crc);
this.writeInt (csize);
this.writeInt (size);
var nameBytes = JU.ZStream.getBytes (e.name);
this.writeShort (nameBytes.length);
if (hasZip64) {
this.writeShort (e64len + 4 + (e.extra != null ? e.extra.length : 0));
} else {
this.writeShort (e.extra != null ? e.extra.length : 0);
}var commentBytes;
if (e.comment != null) {
commentBytes = JU.ZStream.getBytes (e.comment);
this.writeShort (Math.min (commentBytes.length, 0xffff));
} else {
commentBytes = null;
this.writeShort (0);
}this.writeShort (0);
this.writeShort (0);
this.writeInt (0);
this.writeInt (offset);
this.writeBytes (nameBytes, 0, nameBytes.length);
if (hasZip64) {
this.writeShort (1);
this.writeShort (e64len);
if (size == 4294967295) this.writeLong (e.size);
if (csize == 4294967295) this.writeLong (e.csize);
if (offset == 4294967295) this.writeLong (entry.offset);
}if (e.extra != null) {
this.writeBytes (e.extra, 0, e.extra.length);
}if (commentBytes != null) {
this.writeBytes (commentBytes, 0, Math.min (commentBytes.length, 0xffff));
}}, "java.util.zip.ZipEntry");
Clazz.defineMethod (c$, "writeEND",
function (off, len) {
var hasZip64 = false;
var xlen = len;
var xoff = off;
if (xlen >= 4294967295) {
xlen = 4294967295;
hasZip64 = true;
}if (xoff >= 4294967295) {
xoff = 4294967295;
hasZip64 = true;
}var count = this.xentries.size ();
if (count >= 65535) {
count = 65535;
hasZip64 = true;
}if (hasZip64) {
var off64 = this.written;
this.writeInt (101075792);
this.writeLong (44);
this.writeShort (45);
this.writeShort (45);
this.writeInt (0);
this.writeInt (0);
this.writeLong (this.xentries.size ());
this.writeLong (this.xentries.size ());
this.writeLong (len);
this.writeLong (off);
this.writeInt (117853008);
this.writeInt (0);
this.writeLong (off64);
this.writeInt (1);
}this.writeInt (101010256);
this.writeShort (0);
this.writeShort (0);
this.writeShort (count);
this.writeShort (count);
this.writeInt (xlen);
this.writeInt (xoff);
if (this.comment != null) {
this.writeShort (this.comment.length);
this.writeBytes (this.comment, 0, this.comment.length);
} else {
this.writeShort (0);
}}, "~N,~N");
Clazz.defineMethod (c$, "writeShort",
function (v) {
var out = this.out;
{
out.writeByteAsInt((v >>> 0) & 0xff);
| }this.written += 2;
}, "~N");
Clazz.defineMethod (c$, "writeInt",
function (v) {
var out = this.out;
{
out.writeByteAsInt((v >>> 0) & 0xff);
out.writeByteAsInt((v >>> 8) & 0xff);
out.writeByteAsInt((v >>> 16) & 0xff);
out.writeByteAsInt((v >>> 24) & 0xff);
}this.written += 4;
}, "~N");
Clazz.defineMethod (c$, "writeLong",
function (v) {
var out = this.out;
{
out.writeByteAsInt((v >>> 0) & 0xff);
out.writeByteAsInt((v >>> 8) & 0xff);
out.writeByteAsInt((v >>> 16) & 0xff);
out.writeByteAsInt((v >>> 24) & 0xff);
out.writeByteAsInt(0);
out.writeByteAsInt(0);
out.writeByteAsInt(0);
out.writeByteAsInt(0);
}this.written += 8;
}, "~N");
Clazz.defineMethod (c$, "writeBytes",
function (b, off, len) {
this.out.write (b, off, len);
this.written += len;
}, "~A,~N,~N");
Clazz.defineStatics (c$,
"STORED", 0,
"DEFLATED", 8);
}); | out.writeByteAsInt((v >>> 8) & 0xff);
|
service_backup_configuration_info.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .backup_configuration_info import BackupConfigurationInfo
class ServiceBackupConfigurationInfo(BackupConfigurationInfo):
"""Backup configuration information for a specific Service Fabric service
specifying what backup policy is being applied and suspend description, if
any.
:param policy_name: The name of the backup policy which is applicable to
this Service Fabric application or service or partition.
:type policy_name: str
:param policy_inherited_from: Specifies the scope at which the backup
policy is applied. Possible values include: 'Invalid', 'Partition',
'Service', 'Application'
:type policy_inherited_from: str or
~azure.servicefabric.models.BackupPolicyScope
:param suspension_info: Describes the backup suspension details.
:type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo
:param kind: Constant filled by server.
:type kind: str
:param service_name: The full name of the service with 'fabric:' URI
scheme.
:type service_name: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'policy_name': {'key': 'PolicyName', 'type': 'str'},
'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'},
'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'},
'kind': {'key': 'Kind', 'type': 'str'},
'service_name': {'key': 'ServiceName', 'type': 'str'},
}
def | (self, policy_name=None, policy_inherited_from=None, suspension_info=None, service_name=None):
super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info)
self.service_name = service_name
self.kind = 'Service'
| __init__ |
nats.go | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A Go client for the NATS messaging system (https://nats.io).
package nats
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt"
"github.com/nats-io/nats.go/util"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
)
// Default Constants
const (
Version = "1.9.2"
DefaultURL = "nats://127.0.0.1:4222"
DefaultPort = 4222
DefaultMaxReconnect = 60
DefaultReconnectWait = 2 * time.Second
DefaultTimeout = 2 * time.Second
DefaultPingInterval = 2 * time.Minute
DefaultMaxPingOut = 2
DefaultMaxChanLen = 8192 // 8k
DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB
RequestChanLen = 8
DefaultDrainTimeout = 30 * time.Second
LangString = "go"
)
const (
// STALE_CONNECTION is for detection and proper handling of stale connections.
STALE_CONNECTION = "stale connection"
// PERMISSIONS_ERR is for when nats server subject authorization has failed.
PERMISSIONS_ERR = "permissions violation"
// AUTHORIZATION_ERR is for when nats server user authorization has failed.
AUTHORIZATION_ERR = "authorization violation"
// AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired.
AUTHENTICATION_EXPIRED_ERR = "user authentication expired"
)
// Errors
var (
ErrConnectionClosed = errors.New("nats: connection closed")
ErrConnectionDraining = errors.New("nats: connection draining")
ErrDrainTimeout = errors.New("nats: draining connection timed out")
ErrConnectionReconnecting = errors.New("nats: connection reconnecting")
ErrSecureConnRequired = errors.New("nats: secure connection required")
ErrSecureConnWanted = errors.New("nats: secure connection not available")
ErrBadSubscription = errors.New("nats: invalid subscription")
ErrTypeSubscription = errors.New("nats: invalid subscription type")
ErrBadSubject = errors.New("nats: invalid subject")
ErrBadQueueName = errors.New("nats: invalid queue name")
ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped")
ErrTimeout = errors.New("nats: timeout")
ErrBadTimeout = errors.New("nats: timeout invalid")
ErrAuthorization = errors.New("nats: authorization violation")
ErrAuthExpired = errors.New("nats: authentication expired")
ErrNoServers = errors.New("nats: no servers available for connection")
ErrJsonParse = errors.New("nats: connect message, json parse error")
ErrChanArg = errors.New("nats: argument needs to be a channel type")
ErrMaxPayload = errors.New("nats: maximum payload exceeded")
ErrMaxMessages = errors.New("nats: maximum messages delivered")
ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription")
ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed")
ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received")
ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded")
ErrInvalidConnection = errors.New("nats: invalid connection")
ErrInvalidMsg = errors.New("nats: invalid message or message nil")
ErrInvalidArg = errors.New("nats: invalid argument")
ErrInvalidContext = errors.New("nats: invalid context")
ErrNoDeadlineContext = errors.New("nats: context requires a deadline")
ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server")
ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server")
ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler")
ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler")
ErrNoUserCB = errors.New("nats: user callback not defined")
ErrNkeyAndUser = errors.New("nats: user callback and nkey defined")
ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server")
ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION)
ErrTokenAlreadySet = errors.New("nats: token and token handler both set")
ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection")
ErrMsgNoReply = errors.New("nats: message does not have a reply")
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// GetDefaultOptions returns default configuration options for the client.
func GetDefaultOptions() Options {
return Options{
AllowReconnect: true,
MaxReconnect: DefaultMaxReconnect,
ReconnectWait: DefaultReconnectWait,
Timeout: DefaultTimeout,
PingInterval: DefaultPingInterval,
MaxPingsOut: DefaultMaxPingOut,
SubChanLen: DefaultMaxChanLen,
ReconnectBufSize: DefaultReconnectBufSize,
DrainTimeout: DefaultDrainTimeout,
}
}
// DEPRECATED: Use GetDefaultOptions() instead.
// DefaultOptions is not safe for use by multiple clients.
// For details see #308.
var DefaultOptions = GetDefaultOptions()
// Status represents the state of the connection.
type Status int
const (
DISCONNECTED = Status(iota)
CONNECTED
CLOSED
RECONNECTING
CONNECTING
DRAINING_SUBS
DRAINING_PUBS
)
// ConnHandler is used for asynchronous events such as
// disconnected and closed connections.
type ConnHandler func(*Conn)
// ConnErrHandler is used to process asynchronous events like
// disconnected connection with the error (if any).
type ConnErrHandler func(*Conn, error)
// ErrHandler is used to process asynchronous errors encountered
// while processing inbound messages.
type ErrHandler func(*Conn, *Subscription, error)
// UserJWTHandler is used to fetch and return the account signed
// JWT for this user.
type UserJWTHandler func() (string, error)
// SignatureHandler is used to sign a nonce from the server while
// authenticating with nkeys. The user should sign the nonce and
// return the raw signature. The client will base64 encode this to
// send to the server.
type SignatureHandler func([]byte) ([]byte, error)
// AuthTokenHandler is used to generate a new token.
type AuthTokenHandler func() string
// asyncCB is used to preserve order for async callbacks.
type asyncCB struct {
f func()
next *asyncCB
}
type asyncCallbacksHandler struct {
mu sync.Mutex
cond *sync.Cond
head *asyncCB
tail *asyncCB
}
// Option is a function on the options for a connection.
type Option func(*Options) error
// CustomDialer can be used to specify any dialer, not necessarily
// a *net.Dialer.
type CustomDialer interface {
Dial(network, address string) (net.Conn, error)
}
// Options can be used to create a customized connection.
type Options struct {
// Url represents a single NATS server url to which the client
// will be connecting. If the Servers option is also set, it
// then becomes the first server in the Servers array.
Url string
// Servers is a configured set of servers which this client
// will use when attempting to connect.
Servers []string
// NoRandomize configures whether we will randomize the
// server pool.
NoRandomize bool
// NoEcho configures whether the server will echo back messages
// that are sent on this connection if we also have matching subscriptions.
// Note this is supported on servers >= version 1.2. Proto 1 or greater.
NoEcho bool
// Name is an optional name label which will be sent to the server
// on CONNECT to identify the client.
Name string
// Verbose signals the server to send an OK ack for commands
// successfully processed by the server.
Verbose bool
// Pedantic signals the server whether it should be doing further
// validation of subjects.
Pedantic bool
// Secure enables TLS secure connections that skip server
// verification by default. NOT RECOMMENDED.
Secure bool
// TLSConfig is a custom TLS configuration to use for secure
// transports.
TLSConfig *tls.Config
// AllowReconnect enables reconnection logic to be used when we
// encounter a disconnect from the current server.
AllowReconnect bool
// MaxReconnect sets the number of reconnect attempts that will be
// tried before giving up. If negative, then it will never give up
// trying to reconnect.
MaxReconnect int
// ReconnectWait sets the time to backoff after attempting a reconnect
// to a server that we were already connected to previously.
ReconnectWait time.Duration
// Timeout sets the timeout for a Dial operation on a connection.
Timeout time.Duration
// DrainTimeout sets the timeout for a Drain Operation to complete.
DrainTimeout time.Duration
// FlusherTimeout is the maximum time to wait for write operations
// to the underlying connection to complete (including the flusher loop).
FlusherTimeout time.Duration
// PingInterval is the period at which the client will be sending ping
// commands to the server, disabled if 0 or negative.
PingInterval time.Duration
// MaxPingsOut is the maximum number of pending ping commands that can
// be awaiting a response before raising an ErrStaleConnection error.
MaxPingsOut int
// ClosedCB sets the closed handler that is called when a client will
// no longer be connected.
ClosedCB ConnHandler
// DisconnectedCB sets the disconnected handler that is called
// whenever the connection is disconnected.
// Will not be called if DisconnectedErrCB is set
// DEPRECATED. Use DisconnectedErrCB which passes error that caused
// the disconnect event.
DisconnectedCB ConnHandler
// DisconnectedErrCB sets the disconnected error handler that is called
// whenever the connection is disconnected.
// Disconnected error could be nil, for instance when user explicitly closes the connection.
// DisconnectedCB will not be called if DisconnectedErrCB is set
DisconnectedErrCB ConnErrHandler
// ReconnectedCB sets the reconnected handler called whenever
// the connection is successfully reconnected.
ReconnectedCB ConnHandler
// DiscoveredServersCB sets the callback that is invoked whenever a new
// server has joined the cluster.
DiscoveredServersCB ConnHandler
// AsyncErrorCB sets the async error handler (e.g. slow consumer errors)
AsyncErrorCB ErrHandler
// ReconnectBufSize is the size of the backing bufio during reconnect.
// Once this has been exhausted publish operations will return an error.
ReconnectBufSize int
// SubChanLen is the size of the buffered channel used between the socket
// Go routine and the message delivery for SyncSubscriptions.
// NOTE: This does not affect AsyncSubscriptions which are
// dictated by PendingLimits()
SubChanLen int
// UserJWT sets the callback handler that will fetch a user's JWT.
UserJWT UserJWTHandler
// Nkey sets the public nkey that will be used to authenticate
// when connecting to the server. UserJWT and Nkey are mutually exclusive
// and if defined, UserJWT will take precedence.
Nkey string
// SignatureCB designates the function used to sign the nonce
// presented from the server.
SignatureCB SignatureHandler
// User sets the username to be used when connecting to the server.
User string
// Password sets the password to be used when connecting to a server.
Password string
// Token sets the token to be used when connecting to a server.
Token string
// TokenHandler designates the function used to generate the token to be used when connecting to a server.
TokenHandler AuthTokenHandler
// Dialer allows a custom net.Dialer when forming connections.
// DEPRECATED: should use CustomDialer instead.
Dialer *net.Dialer
// CustomDialer allows to specify a custom dialer (not necessarily
// a *net.Dialer).
CustomDialer CustomDialer
// UseOldRequestStyle forces the old method of Requests that utilize
// a new Inbox and a new Subscription for each request.
UseOldRequestStyle bool
// NoCallbacksAfterClientClose allows preventing the invocation of
// callbacks after Close() is called. Client won't receive notifications
// when Close is invoked by user code. Default is to invoke the callbacks.
NoCallbacksAfterClientClose bool
}
const (
// Scratch storage for assembling protocol headers
scratchSize = 512
// The size of the bufio reader/writer on top of the socket.
defaultBufSize = 32768
// The buffered size of the flush "kick" channel
flushChanSize = 1
// Default server pool size
srvPoolSize = 4
// NUID size
nuidSize = 22
// Default port used if none is specified in given URL(s)
defaultPortString = "4222"
)
// A Conn represents a bare connection to a nats-server.
// It can send and receive []byte payloads.
// The connection is safe to use in multiple Go routines concurrently.
type Conn struct {
// Keep all members for which we use atomic at the beginning of the
// struct and make sure they are all 64bits (or use padding if necessary).
// atomic.* functions crash on 32bit machines if operand is not aligned
// at 64bit. See https://github.com/golang/go/issues/599
Statistics
mu sync.RWMutex
// Opts holds the configuration of the Conn.
// Modifying the configuration of a running Conn is a race.
Opts Options
wg sync.WaitGroup
srvPool []*srv
current *srv
urls map[string]struct{} // Keep track of all known URLs (used by processInfo)
conn net.Conn
bw *bufio.Writer
pending *bytes.Buffer
fch chan struct{}
info serverInfo
ssid int64
subsMu sync.RWMutex
subs map[int64]*Subscription
ach *asyncCallbacksHandler
pongs []chan struct{}
scratch [scratchSize]byte
status Status
initc bool // true if the connection is performing the initial connect
err error
ps *parseState
ptmr *time.Timer
pout int
ar bool // abort reconnect
// New style response handler
respSub string // The wildcard subject
respScanf string // The scanf template to extract mux token
respMux *Subscription // A single response subscription
respMap map[string]chan *Msg // Request map for the response msg channels
respRand *rand.Rand // Used for generating suffix
}
// A Subscription represents interest in a given subject.
type Subscription struct {
mu sync.Mutex
sid int64
// Subject that represents this subscription. This can be different
// than the received subject inside a Msg if this is a wildcard.
Subject string
// Optional queue group name. If present, all subscriptions with the
// same name will form a distributed queue, and each message will
// only be processed by one member of the group.
Queue string
delivered uint64
max uint64
conn *Conn
mcb MsgHandler
mch chan *Msg
closed bool
sc bool
connClosed bool
// Type of Subscription
typ SubscriptionType
// Async linked list
pHead *Msg
pTail *Msg
pCond *sync.Cond
// Pending stats, async subscriptions, high-speed etc.
pMsgs int
pBytes int
pMsgsMax int
pBytesMax int
pMsgsLimit int
pBytesLimit int
dropped int
}
// Msg is a structure used by Subscribers and PublishMsg().
type Msg struct {
Subject string
Reply string
Data []byte
Sub *Subscription
next *Msg
barrier *barrierInfo
}
type barrierInfo struct {
refs int64
f func()
}
// Tracks various stats received and sent on this connection,
// including counts for messages and bytes.
type Statistics struct {
InMsgs uint64
OutMsgs uint64
InBytes uint64
OutBytes uint64
Reconnects uint64
}
// Tracks individual backend servers.
type srv struct {
url *url.URL
didConnect bool
reconnects int
lastAttempt time.Time
lastErr error
isImplicit bool
tlsName string
}
type serverInfo struct {
Id string `json:"server_id"`
Host string `json:"host"`
Port uint `json:"port"`
Version string `json:"version"`
AuthRequired bool `json:"auth_required"`
TLSRequired bool `json:"tls_required"`
MaxPayload int64 `json:"max_payload"`
ConnectURLs []string `json:"connect_urls,omitempty"`
Proto int `json:"proto,omitempty"`
CID uint64 `json:"client_id,omitempty"`
Nonce string `json:"nonce,omitempty"`
}
const (
// clientProtoZero is the original client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
/* clientProtoZero */ _ = iota
// clientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
clientProtoInfo
)
type connectInfo struct {
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
UserJWT string `json:"jwt,omitempty"`
Nkey string `json:"nkey,omitempty"`
Signature string `json:"sig,omitempty"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
Token string `json:"auth_token,omitempty"`
TLS bool `json:"tls_required"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Echo bool `json:"echo"`
}
// MsgHandler is a callback function that processes messages delivered to
// asynchronous subscribers.
type MsgHandler func(msg *Msg)
// Connect will attempt to connect to the NATS system.
// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222
// Comma separated arrays are also supported, e.g. urlA, urlB.
// Options start with the defaults but can be overridden.
func Connect(url string, options ...Option) (*Conn, error) {
opts := GetDefaultOptions()
opts.Servers = processUrlString(url)
for _, opt := range options {
if opt != nil {
if err := opt(&opts); err != nil {
return nil, err
}
}
}
return opts.Connect()
}
// Options that can be passed to Connect.
// Name is an Option to set the client name.
func Name(name string) Option {
return func(o *Options) error {
o.Name = name
return nil
}
}
// Secure is an Option to enable TLS secure connections that skip server verification by default.
// Pass a TLS Configuration for proper TLS.
// NOTE: This should NOT be used in a production setting.
func Secure(tls ...*tls.Config) Option {
return func(o *Options) error {
o.Secure = true
// Use of variadic just simplifies testing scenarios. We only take the first one.
if len(tls) > 1 {
return ErrMultipleTLSConfigs
}
if len(tls) == 1 {
o.TLSConfig = tls[0]
}
return nil
}
}
// RootCAs is a helper option to provide the RootCAs pool from a list of filenames.
// If Secure is not already set this will set it as well.
func RootCAs(file ...string) Option {
return func(o *Options) error {
pool := x509.NewCertPool()
for _, f := range file {
rootPEM, err := ioutil.ReadFile(f)
if err != nil || rootPEM == nil {
return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err)
}
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return fmt.Errorf("nats: failed to parse root certificate from %q", f)
}
}
if o.TLSConfig == nil {
o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
o.TLSConfig.RootCAs = pool
o.Secure = true
return nil
}
}
// ClientCert is a helper option to provide the client certificate from a file.
// If Secure is not already set this will set it as well.
func ClientCert(certFile, keyFile string) Option {
return func(o *Options) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return fmt.Errorf("nats: error loading client certificate: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return fmt.Errorf("nats: error parsing client certificate: %v", err)
}
if o.TLSConfig == nil |
o.TLSConfig.Certificates = []tls.Certificate{cert}
o.Secure = true
return nil
}
}
// NoReconnect is an Option to turn off reconnect behavior.
func NoReconnect() Option {
return func(o *Options) error {
o.AllowReconnect = false
return nil
}
}
// DontRandomize is an Option to turn off randomizing the server pool.
func DontRandomize() Option {
return func(o *Options) error {
o.NoRandomize = true
return nil
}
}
// NoEcho is an Option to turn off messages echoing back from a server.
// Note this is supported on servers >= version 1.2. Proto 1 or greater.
func NoEcho() Option {
return func(o *Options) error {
o.NoEcho = true
return nil
}
}
// ReconnectWait is an Option to set the wait time between reconnect attempts.
func ReconnectWait(t time.Duration) Option {
return func(o *Options) error {
o.ReconnectWait = t
return nil
}
}
// MaxReconnects is an Option to set the maximum number of reconnect attempts.
func MaxReconnects(max int) Option {
return func(o *Options) error {
o.MaxReconnect = max
return nil
}
}
// PingInterval is an Option to set the period for client ping commands.
func PingInterval(t time.Duration) Option {
return func(o *Options) error {
o.PingInterval = t
return nil
}
}
// MaxPingsOutstanding is an Option to set the maximum number of ping requests
// that can go un-answered by the server before closing the connection.
func MaxPingsOutstanding(max int) Option {
return func(o *Options) error {
o.MaxPingsOut = max
return nil
}
}
// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting.
func ReconnectBufSize(size int) Option {
return func(o *Options) error {
o.ReconnectBufSize = size
return nil
}
}
// Timeout is an Option to set the timeout for Dial on a connection.
func Timeout(t time.Duration) Option {
return func(o *Options) error {
o.Timeout = t
return nil
}
}
// FlusherTimeout is an Option to set the write (and flush) timeout on a connection.
func FlusherTimeout(t time.Duration) Option {
return func(o *Options) error {
o.FlusherTimeout = t
return nil
}
}
// DrainTimeout is an Option to set the timeout for draining a connection.
func DrainTimeout(t time.Duration) Option {
return func(o *Options) error {
o.DrainTimeout = t
return nil
}
}
// DisconnectErrHandler is an Option to set the disconnected error handler.
func DisconnectErrHandler(cb ConnErrHandler) Option {
return func(o *Options) error {
o.DisconnectedErrCB = cb
return nil
}
}
// DisconnectHandler is an Option to set the disconnected handler.
// DEPRECATED: Use DisconnectErrHandler.
func DisconnectHandler(cb ConnHandler) Option {
return func(o *Options) error {
o.DisconnectedCB = cb
return nil
}
}
// ReconnectHandler is an Option to set the reconnected handler.
func ReconnectHandler(cb ConnHandler) Option {
return func(o *Options) error {
o.ReconnectedCB = cb
return nil
}
}
// ClosedHandler is an Option to set the closed handler.
func ClosedHandler(cb ConnHandler) Option {
return func(o *Options) error {
o.ClosedCB = cb
return nil
}
}
// DiscoveredServersHandler is an Option to set the new servers handler.
func DiscoveredServersHandler(cb ConnHandler) Option {
return func(o *Options) error {
o.DiscoveredServersCB = cb
return nil
}
}
// ErrorHandler is an Option to set the async error handler.
func ErrorHandler(cb ErrHandler) Option {
return func(o *Options) error {
o.AsyncErrorCB = cb
return nil
}
}
// UserInfo is an Option to set the username and password to
// use when not included directly in the URLs.
func UserInfo(user, password string) Option {
return func(o *Options) error {
o.User = user
o.Password = password
return nil
}
}
// Token is an Option to set the token to use
// when a token is not included directly in the URLs
// and when a token handler is not provided.
func Token(token string) Option {
return func(o *Options) error {
if o.TokenHandler != nil {
return ErrTokenAlreadySet
}
o.Token = token
return nil
}
}
// TokenHandler is an Option to set the token handler to use
// when a token is not included directly in the URLs
// and when a token is not set.
func TokenHandler(cb AuthTokenHandler) Option {
return func(o *Options) error {
if o.Token != "" {
return ErrTokenAlreadySet
}
o.TokenHandler = cb
return nil
}
}
// UserCredentials is a convenience function that takes a filename
// for a user's JWT and a filename for the user's private Nkey seed.
func UserCredentials(userOrChainedFile string, seedFiles ...string) Option {
userCB := func() (string, error) {
return userFromFile(userOrChainedFile)
}
var keyFile string
if len(seedFiles) > 0 {
keyFile = seedFiles[0]
} else {
keyFile = userOrChainedFile
}
sigCB := func(nonce []byte) ([]byte, error) {
return sigHandler(nonce, keyFile)
}
return UserJWT(userCB, sigCB)
}
// UserJWT will set the callbacks to retrieve the user's JWT and
// the signature callback to sign the server nonce. This an the Nkey
// option are mutually exclusive.
func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option {
return func(o *Options) error {
if userCB == nil {
return ErrNoUserCB
}
if sigCB == nil {
return ErrUserButNoSigCB
}
o.UserJWT = userCB
o.SignatureCB = sigCB
return nil
}
}
// Nkey will set the public Nkey and the signature callback to
// sign the server nonce.
func Nkey(pubKey string, sigCB SignatureHandler) Option {
return func(o *Options) error {
o.Nkey = pubKey
o.SignatureCB = sigCB
if pubKey != "" && sigCB == nil {
return ErrNkeyButNoSigCB
}
return nil
}
}
// SyncQueueLen will set the maximum queue len for the internal
// channel used for SubscribeSync().
func SyncQueueLen(max int) Option {
return func(o *Options) error {
o.SubChanLen = max
return nil
}
}
// Dialer is an Option to set the dialer which will be used when
// attempting to establish a connection.
// DEPRECATED: Should use CustomDialer instead.
func Dialer(dialer *net.Dialer) Option {
return func(o *Options) error {
o.Dialer = dialer
return nil
}
}
// SetCustomDialer is an Option to set a custom dialer which will be
// used when attempting to establish a connection. If both Dialer
// and CustomDialer are specified, CustomDialer takes precedence.
func SetCustomDialer(dialer CustomDialer) Option {
return func(o *Options) error {
o.CustomDialer = dialer
return nil
}
}
// UseOldRequestStyle is an Option to force usage of the old Request style.
func UseOldRequestStyle() Option {
return func(o *Options) error {
o.UseOldRequestStyle = true
return nil
}
}
// NoCallbacksAfterClientClose is an Option to disable callbacks when user code
// calls Close(). If close is initiated by any other condition, callbacks
// if any will be invoked.
func NoCallbacksAfterClientClose() Option {
return func(o *Options) error {
o.NoCallbacksAfterClientClose = true
return nil
}
}
// Handler processing
// SetDisconnectHandler will set the disconnect event handler.
// DEPRECATED: Use SetDisconnectErrHandler
func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.DisconnectedCB = dcb
}
// SetDisconnectErrHandler will set the disconnect event handler.
func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.DisconnectedErrCB = dcb
}
// SetReconnectHandler will set the reconnect event handler.
func (nc *Conn) SetReconnectHandler(rcb ConnHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.ReconnectedCB = rcb
}
// SetDiscoveredServersHandler will set the discovered servers handler.
func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.DiscoveredServersCB = dscb
}
// SetClosedHandler will set the reconnect event handler.
func (nc *Conn) SetClosedHandler(cb ConnHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.ClosedCB = cb
}
// SetErrorHandler will set the async error handler.
func (nc *Conn) SetErrorHandler(cb ErrHandler) {
if nc == nil {
return
}
nc.mu.Lock()
defer nc.mu.Unlock()
nc.Opts.AsyncErrorCB = cb
}
// Process the url string argument to Connect.
// Return an array of urls, even if only one.
func processUrlString(url string) []string {
urls := strings.Split(url, ",")
for i, s := range urls {
urls[i] = strings.TrimSpace(s)
}
return urls
}
// Connect will attempt to connect to a NATS server with multiple options.
func (o Options) Connect() (*Conn, error) {
nc := &Conn{Opts: o}
// Some default options processing.
if nc.Opts.MaxPingsOut == 0 {
nc.Opts.MaxPingsOut = DefaultMaxPingOut
}
// Allow old default for channel length to work correctly.
if nc.Opts.SubChanLen == 0 {
nc.Opts.SubChanLen = DefaultMaxChanLen
}
// Default ReconnectBufSize
if nc.Opts.ReconnectBufSize == 0 {
nc.Opts.ReconnectBufSize = DefaultReconnectBufSize
}
// Ensure that Timeout is not 0
if nc.Opts.Timeout == 0 {
nc.Opts.Timeout = DefaultTimeout
}
// Check first for user jwt callback being defined and nkey.
if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" {
return nil, ErrNkeyAndUser
}
// Check if we have an nkey but no signature callback defined.
if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil {
return nil, ErrNkeyButNoSigCB
}
// Allow custom Dialer for connecting using DialTimeout by default
if nc.Opts.Dialer == nil {
nc.Opts.Dialer = &net.Dialer{
Timeout: nc.Opts.Timeout,
}
}
if err := nc.setupServerPool(); err != nil {
return nil, err
}
// Create the async callback handler.
nc.ach = &asyncCallbacksHandler{}
nc.ach.cond = sync.NewCond(&nc.ach.mu)
if err := nc.connect(); err != nil {
return nil, err
}
// Spin up the async cb dispatcher on success
go nc.ach.asyncCBDispatcher()
return nc, nil
}
const (
_CRLF_ = "\r\n"
_EMPTY_ = ""
_SPC_ = " "
_PUB_P_ = "PUB "
)
const (
_OK_OP_ = "+OK"
_ERR_OP_ = "-ERR"
_PONG_OP_ = "PONG"
_INFO_OP_ = "INFO"
)
const (
conProto = "CONNECT %s" + _CRLF_
pingProto = "PING" + _CRLF_
pongProto = "PONG" + _CRLF_
subProto = "SUB %s %s %d" + _CRLF_
unsubProto = "UNSUB %d %s" + _CRLF_
okProto = _OK_OP_ + _CRLF_
)
// Return the currently selected server
func (nc *Conn) currentServer() (int, *srv) {
for i, s := range nc.srvPool {
if s == nil {
continue
}
if s == nc.current {
return i, s
}
}
return -1, nil
}
// Pop the current server and put onto the end of the list. Select head of list as long
// as number of reconnect attempts under MaxReconnect.
func (nc *Conn) selectNextServer() (*srv, error) {
i, s := nc.currentServer()
if i < 0 {
return nil, ErrNoServers
}
sp := nc.srvPool
num := len(sp)
copy(sp[i:num-1], sp[i+1:num])
maxReconnect := nc.Opts.MaxReconnect
if maxReconnect < 0 || s.reconnects < maxReconnect {
nc.srvPool[num-1] = s
} else {
nc.srvPool = sp[0 : num-1]
}
if len(nc.srvPool) <= 0 {
nc.current = nil
return nil, ErrNoServers
}
nc.current = nc.srvPool[0]
return nc.srvPool[0], nil
}
// Will assign the correct server to nc.current
func (nc *Conn) pickServer() error {
nc.current = nil
if len(nc.srvPool) <= 0 {
return ErrNoServers
}
for _, s := range nc.srvPool {
if s != nil {
nc.current = s
return nil
}
}
return ErrNoServers
}
const tlsScheme = "tls"
// Create the server pool using the options given.
// We will place a Url option first, followed by any
// Server Options. We will randomize the server pool unless
// the NoRandomize flag is set.
func (nc *Conn) setupServerPool() error {
nc.srvPool = make([]*srv, 0, srvPoolSize)
nc.urls = make(map[string]struct{}, srvPoolSize)
// Create srv objects from each url string in nc.Opts.Servers
// and add them to the pool.
for _, urlString := range nc.Opts.Servers {
if err := nc.addURLToPool(urlString, false, false); err != nil {
return err
}
}
// Randomize if allowed to
if !nc.Opts.NoRandomize {
nc.shufflePool()
}
// Normally, if this one is set, Options.Servers should not be,
// but we always allowed that, so continue to do so.
if nc.Opts.Url != _EMPTY_ {
// Add to the end of the array
if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil {
return err
}
// Then swap it with first to guarantee that Options.Url is tried first.
last := len(nc.srvPool) - 1
if last > 0 {
nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0]
}
} else if len(nc.srvPool) <= 0 {
// Place default URL if pool is empty.
if err := nc.addURLToPool(DefaultURL, false, false); err != nil {
return err
}
}
// Check for Scheme hint to move to TLS mode.
for _, srv := range nc.srvPool {
if srv.url.Scheme == tlsScheme {
// FIXME(dlc), this is for all in the pool, should be case by case.
nc.Opts.Secure = true
if nc.Opts.TLSConfig == nil {
nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
}
}
}
return nc.pickServer()
}
// Helper function to return scheme
func (nc *Conn) connScheme() string {
if nc.Opts.Secure {
return tlsScheme
}
return "nats"
}
// Return true iff u.Hostname() is an IP address.
func hostIsIP(u *url.URL) bool {
return net.ParseIP(u.Hostname()) != nil
}
// addURLToPool adds an entry to the server pool
func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error {
if !strings.Contains(sURL, "://") {
sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL)
}
var (
u *url.URL
err error
)
for i := 0; i < 2; i++ {
u, err = url.Parse(sURL)
if err != nil {
return err
}
if u.Port() != "" {
break
}
// In case given URL is of the form "localhost:", just add
// the port number at the end, otherwise, add ":4222".
if sURL[len(sURL)-1] != ':' {
sURL += ":"
}
sURL += defaultPortString
}
var tlsName string
if implicit {
curl := nc.current.url
// Check to see if we do not have a url.User but current connected
// url does. If so copy over.
if u.User == nil && curl.User != nil {
u.User = curl.User
}
// We are checking to see if we have a secure connection and are
// adding an implicit server that just has an IP. If so we will remember
// the current hostname we are connected to.
if saveTLSName && hostIsIP(u) {
tlsName = curl.Hostname()
}
}
s := &srv{url: u, isImplicit: implicit, tlsName: tlsName}
nc.srvPool = append(nc.srvPool, s)
nc.urls[u.Host] = struct{}{}
return nil
}
// shufflePool swaps randomly elements in the server pool
func (nc *Conn) shufflePool() {
if len(nc.srvPool) <= 1 {
return
}
source := rand.NewSource(time.Now().UnixNano())
r := rand.New(source)
for i := range nc.srvPool {
j := r.Intn(i + 1)
nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i]
}
}
func (nc *Conn) newBuffer() *bufio.Writer {
var w io.Writer = nc.conn
if nc.Opts.FlusherTimeout > 0 {
w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout}
}
return bufio.NewWriterSize(w, defaultBufSize)
}
// createConn will connect to the server and wrap the appropriate
// bufio structures. It will do the right thing when an existing
// connection is in place.
func (nc *Conn) createConn() (err error) {
if nc.Opts.Timeout < 0 {
return ErrBadTimeout
}
if _, cur := nc.currentServer(); cur == nil {
return ErrNoServers
} else {
cur.lastAttempt = time.Now()
}
// We will auto-expand host names if they resolve to multiple IPs
hosts := []string{}
u := nc.current.url
if net.ParseIP(u.Hostname()) == nil {
addrs, _ := net.LookupHost(u.Hostname())
for _, addr := range addrs {
hosts = append(hosts, net.JoinHostPort(addr, u.Port()))
}
}
// Fall back to what we were given.
if len(hosts) == 0 {
hosts = append(hosts, u.Host)
}
// CustomDialer takes precedence. If not set, use Opts.Dialer which
// is set to a default *net.Dialer (in Connect()) if not explicitly
// set by the user.
dialer := nc.Opts.CustomDialer
if dialer == nil {
// We will copy and shorten the timeout if we have multiple hosts to try.
copyDialer := *nc.Opts.Dialer
copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts))
dialer = ©Dialer
}
if len(hosts) > 1 && !nc.Opts.NoRandomize {
rand.Shuffle(len(hosts), func(i, j int) {
hosts[i], hosts[j] = hosts[j], hosts[i]
})
}
for _, host := range hosts {
nc.conn, err = dialer.Dial("tcp", host)
if err == nil {
break
}
}
if err != nil {
return err
}
if nc.pending != nil && nc.bw != nil {
// Move to pending buffer.
nc.bw.Flush()
}
nc.bw = nc.newBuffer()
return nil
}
// makeTLSConn will wrap an existing Conn using TLS
func (nc *Conn) makeTLSConn() error {
// Allow the user to configure their own tls.Config structure.
var tlsCopy *tls.Config
if nc.Opts.TLSConfig != nil {
tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig)
} else {
tlsCopy = &tls.Config{}
}
// If its blank we will override it with the current host
if tlsCopy.ServerName == _EMPTY_ {
if nc.current.tlsName != _EMPTY_ {
tlsCopy.ServerName = nc.current.tlsName
} else {
h, _, _ := net.SplitHostPort(nc.current.url.Host)
tlsCopy.ServerName = h
}
}
nc.conn = tls.Client(nc.conn, tlsCopy)
conn := nc.conn.(*tls.Conn)
if err := conn.Handshake(); err != nil {
return err
}
nc.bw = nc.newBuffer()
return nil
}
// waitForExits will wait for all socket watcher Go routines to
// be shutdown before proceeding.
func (nc *Conn) waitForExits() {
// Kick old flusher forcefully.
select {
case nc.fch <- struct{}{}:
default:
}
// Wait for any previous go routines.
nc.wg.Wait()
}
// Report the connected server's Url
func (nc *Conn) ConnectedUrl() string {
if nc == nil {
return _EMPTY_
}
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.status != CONNECTED {
return _EMPTY_
}
return nc.current.url.String()
}
// ConnectedAddr returns the connected server's IP
func (nc *Conn) ConnectedAddr() string {
if nc == nil {
return _EMPTY_
}
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.status != CONNECTED {
return _EMPTY_
}
return nc.conn.RemoteAddr().String()
}
// Report the connected server's Id
func (nc *Conn) ConnectedServerId() string {
if nc == nil {
return _EMPTY_
}
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.status != CONNECTED {
return _EMPTY_
}
return nc.info.Id
}
// Low level setup for structs, etc
func (nc *Conn) setup() {
nc.subs = make(map[int64]*Subscription)
nc.pongs = make([]chan struct{}, 0, 8)
nc.fch = make(chan struct{}, flushChanSize)
// Setup scratch outbound buffer for PUB
pub := nc.scratch[:len(_PUB_P_)]
copy(pub, _PUB_P_)
}
// Process a connected connection and initialize properly.
func (nc *Conn) processConnectInit() error {
// Set our deadline for the whole connect process
nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout))
defer nc.conn.SetDeadline(time.Time{})
// Set our status to connecting.
nc.status = CONNECTING
// Process the INFO protocol received from the server
err := nc.processExpectedInfo()
if err != nil {
return err
}
// Send the CONNECT protocol along with the initial PING protocol.
// Wait for the PONG response (or any error that we get from the server).
err = nc.sendConnect()
if err != nil {
return err
}
// Reset the number of PING sent out
nc.pout = 0
// Start or reset Timer
if nc.Opts.PingInterval > 0 {
if nc.ptmr == nil {
nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer)
} else {
nc.ptmr.Reset(nc.Opts.PingInterval)
}
}
// Start the readLoop and flusher go routines, we will wait on both on a reconnect event.
nc.wg.Add(2)
go nc.readLoop()
go nc.flusher()
return nil
}
// Main connect function. Will connect to the nats-server
func (nc *Conn) connect() error {
var returnedErr error
// Create actual socket connection
// For first connect we walk all servers in the pool and try
// to connect immediately.
nc.mu.Lock()
nc.initc = true
// The pool may change inside the loop iteration due to INFO protocol.
for i := 0; i < len(nc.srvPool); i++ {
nc.current = nc.srvPool[i]
if err := nc.createConn(); err == nil {
// This was moved out of processConnectInit() because
// that function is now invoked from doReconnect() too.
nc.setup()
err = nc.processConnectInit()
if err == nil {
nc.srvPool[i].didConnect = true
nc.srvPool[i].reconnects = 0
nc.current.lastErr = nil
returnedErr = nil
break
} else {
returnedErr = err
nc.mu.Unlock()
nc.close(DISCONNECTED, false, err)
nc.mu.Lock()
nc.current = nil
}
} else {
// Cancel out default connection refused, will trigger the
// No servers error conditional
if strings.Contains(err.Error(), "connection refused") {
returnedErr = nil
}
}
}
nc.initc = false
if returnedErr == nil && nc.status != CONNECTED {
returnedErr = ErrNoServers
}
nc.mu.Unlock()
return returnedErr
}
// This will check to see if the connection should be
// secure. This can be dictated from either end and should
// only be called after the INIT protocol has been received.
func (nc *Conn) checkForSecure() error {
// Check to see if we need to engage TLS
o := nc.Opts
// Check for mismatch in setups
if o.Secure && !nc.info.TLSRequired {
return ErrSecureConnWanted
} else if nc.info.TLSRequired && !o.Secure {
// Switch to Secure since server needs TLS.
o.Secure = true
}
// Need to rewrap with bufio
if o.Secure {
if err := nc.makeTLSConn(); err != nil {
return err
}
}
return nil
}
// processExpectedInfo will look for the expected first INFO message
// sent when a connection is established. The lock should be held entering.
func (nc *Conn) processExpectedInfo() error {
c := &control{}
// Read the protocol
err := nc.readOp(c)
if err != nil {
return err
}
// The nats protocol should send INFO first always.
if c.op != _INFO_OP_ {
return ErrNoInfoReceived
}
// Parse the protocol
if err := nc.processInfo(c.args); err != nil {
return err
}
if nc.Opts.Nkey != "" && nc.info.Nonce == "" {
return ErrNkeysNotSupported
}
return nc.checkForSecure()
}
// Sends a protocol control message by queuing into the bufio writer
// and kicking the flush Go routine. These writes are protected.
func (nc *Conn) sendProto(proto string) {
nc.mu.Lock()
nc.bw.WriteString(proto)
nc.kickFlusher()
nc.mu.Unlock()
}
// Generate a connect protocol message, issuing user/password if
// applicable. The lock is assumed to be held upon entering.
func (nc *Conn) connectProto() (string, error) {
o := nc.Opts
var nkey, sig, user, pass, token, ujwt string
u := nc.current.url.User
if u != nil {
// if no password, assume username is authToken
if _, ok := u.Password(); !ok {
token = u.Username()
} else {
user = u.Username()
pass, _ = u.Password()
}
} else {
// Take from options (possibly all empty strings)
user = o.User
pass = o.Password
token = o.Token
nkey = o.Nkey
}
// Look for user jwt.
if o.UserJWT != nil {
if jwt, err := o.UserJWT(); err != nil {
return _EMPTY_, err
} else {
ujwt = jwt
}
if nkey != _EMPTY_ {
return _EMPTY_, ErrNkeyAndUser
}
}
if ujwt != _EMPTY_ || nkey != _EMPTY_ {
if o.SignatureCB == nil {
if ujwt == _EMPTY_ {
return _EMPTY_, ErrNkeyButNoSigCB
}
return _EMPTY_, ErrUserButNoSigCB
}
sigraw, err := o.SignatureCB([]byte(nc.info.Nonce))
if err != nil {
return _EMPTY_, err
}
sig = base64.RawURLEncoding.EncodeToString(sigraw)
}
if nc.Opts.TokenHandler != nil {
if token != _EMPTY_ {
return _EMPTY_, ErrTokenAlreadySet
}
token = nc.Opts.TokenHandler()
}
cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token,
o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho}
b, err := json.Marshal(cinfo)
if err != nil {
return _EMPTY_, ErrJsonParse
}
// Check if NoEcho is set and we have a server that supports it.
if o.NoEcho && nc.info.Proto < 1 {
return _EMPTY_, ErrNoEchoNotSupported
}
return fmt.Sprintf(conProto, b), nil
}
// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes.
func normalizeErr(line string) string {
s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))
s = strings.TrimLeft(strings.TrimRight(s, "'"), "'")
return s
}
// Send a connect protocol message to the server, issue user/password if
// applicable. Will wait for a flush to return from the server for error
// processing.
func (nc *Conn) sendConnect() error {
// Construct the CONNECT protocol string
cProto, err := nc.connectProto()
if err != nil {
return err
}
// Write the protocol into the buffer
_, err = nc.bw.WriteString(cProto)
if err != nil {
return err
}
// Add to the buffer the PING protocol
_, err = nc.bw.WriteString(pingProto)
if err != nil {
return err
}
// Flush the buffer
err = nc.bw.Flush()
if err != nil {
return err
}
// We don't want to read more than we need here, otherwise
// we would need to transfer the excess read data to the readLoop.
// Since in normal situations we just are looking for a PONG\r\n,
// reading byte-by-byte here is ok.
proto, err := nc.readProto()
if err != nil {
return err
}
// If opts.Verbose is set, handle +OK
if nc.Opts.Verbose && proto == okProto {
// Read the rest now...
proto, err = nc.readProto()
if err != nil {
return err
}
}
// We expect a PONG
if proto != pongProto {
// But it could be something else, like -ERR
// Since we no longer use ReadLine(), trim the trailing "\r\n"
proto = strings.TrimRight(proto, "\r\n")
// If it's a server error...
if strings.HasPrefix(proto, _ERR_OP_) {
// Remove -ERR, trim spaces and quotes, and convert to lower case.
proto = normalizeErr(proto)
// Check if this is an auth error
if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil {
// This will schedule an async error if we are in reconnect,
// and keep track of the auth error for the current server.
// If we have got the same error twice, this sets nc.ar to true to
// indicate that the reconnect should be aborted (will be checked
// in doReconnect()).
nc.processAuthError(authErr)
}
return errors.New("nats: " + proto)
}
// Notify that we got an unexpected protocol.
return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto)
}
// This is where we are truly connected.
nc.status = CONNECTED
return nil
}
// reads a protocol one byte at a time.
func (nc *Conn) readProto() (string, error) {
var (
_buf = [10]byte{}
buf = _buf[:0]
b = [1]byte{}
protoEnd = byte('\n')
)
for {
if _, err := nc.conn.Read(b[:1]); err != nil {
// Do not report EOF error
if err == io.EOF {
return string(buf), nil
}
return "", err
}
buf = append(buf, b[0])
if b[0] == protoEnd {
return string(buf), nil
}
}
}
// A control protocol line.
type control struct {
op, args string
}
// Read a control line and process the intended op.
func (nc *Conn) readOp(c *control) error {
br := bufio.NewReaderSize(nc.conn, defaultBufSize)
line, err := br.ReadString('\n')
if err != nil {
return err
}
parseControl(line, c)
return nil
}
// Parse a control line from the server.
func parseControl(line string, c *control) {
toks := strings.SplitN(line, _SPC_, 2)
if len(toks) == 1 {
c.op = strings.TrimSpace(toks[0])
c.args = _EMPTY_
} else if len(toks) == 2 {
c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
} else {
c.op = _EMPTY_
}
}
// flushReconnectPending will push the pending items that were
// gathered while we were in a RECONNECTING state to the socket.
func (nc *Conn) flushReconnectPendingItems() {
if nc.pending == nil {
return
}
if nc.pending.Len() > 0 {
nc.bw.Write(nc.pending.Bytes())
}
}
// Stops the ping timer if set.
// Connection lock is held on entry.
func (nc *Conn) stopPingTimer() {
if nc.ptmr != nil {
nc.ptmr.Stop()
}
}
// Try to reconnect using the option parameters.
// This function assumes we are allowed to reconnect.
func (nc *Conn) doReconnect(err error) {
// We want to make sure we have the other watchers shutdown properly
// here before we proceed past this point.
nc.waitForExits()
// FIXME(dlc) - We have an issue here if we have
// outstanding flush points (pongs) and they were not
// sent out, but are still in the pipe.
// Hold the lock manually and release where needed below,
// can't do defer here.
nc.mu.Lock()
// Clear any queued pongs, e.g. pending flush calls.
nc.clearPendingFlushCalls()
// Clear any errors.
nc.err = nil
// Perform appropriate callback if needed for a disconnect.
// DisconnectedErrCB has priority over deprecated DisconnectedCB
if nc.Opts.DisconnectedErrCB != nil {
nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) })
} else if nc.Opts.DisconnectedCB != nil {
nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) })
}
// This is used to wait on go routines exit if we start them in the loop
// but an error occurs after that.
waitForGoRoutines := false
for len(nc.srvPool) > 0 {
cur, err := nc.selectNextServer()
if err != nil {
nc.err = err
break
}
sleepTime := int64(0)
// Sleep appropriate amount of time before the
// connection attempt if connecting to same server
// we just got disconnected from..
if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait {
sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt))
}
// On Windows, createConn() will take more than a second when no
// server is running at that address. So it could be that the
// time elapsed between reconnect attempts is always > than
// the set option. Release the lock to give a chance to a parallel
// nc.Close() to break the loop.
nc.mu.Unlock()
if sleepTime <= 0 {
runtime.Gosched()
} else {
time.Sleep(time.Duration(sleepTime))
}
// If the readLoop, etc.. go routines were started, wait for them to complete.
if waitForGoRoutines {
nc.waitForExits()
waitForGoRoutines = false
}
nc.mu.Lock()
// Check if we have been closed first.
if nc.isClosed() {
break
}
// Mark that we tried a reconnect
cur.reconnects++
// Try to create a new connection
err = nc.createConn()
// Not yet connected, retry...
// Continue to hold the lock
if err != nil {
nc.err = nil
continue
}
// We are reconnected
nc.Reconnects++
// Process connect logic
if nc.err = nc.processConnectInit(); nc.err != nil {
// Check if we should abort reconnect. If so, break out
// of the loop and connection will be closed.
if nc.ar {
break
}
nc.status = RECONNECTING
// Reset the buffered writer to the pending buffer
// (was set to a buffered writer on nc.conn in createConn)
nc.bw.Reset(nc.pending)
continue
}
// Clear possible lastErr under the connection lock after
// a successful processConnectInit().
nc.current.lastErr = nil
// Clear out server stats for the server we connected to..
cur.didConnect = true
cur.reconnects = 0
// Send existing subscription state
nc.resendSubscriptions()
// Now send off and clear pending buffer
nc.flushReconnectPendingItems()
// Flush the buffer
nc.err = nc.bw.Flush()
if nc.err != nil {
nc.status = RECONNECTING
// Reset the buffered writer to the pending buffer (bytes.Buffer).
nc.bw.Reset(nc.pending)
// Stop the ping timer (if set)
nc.stopPingTimer()
// Since processConnectInit() returned without error, the
// go routines were started, so wait for them to return
// on the next iteration (after releasing the lock).
waitForGoRoutines = true
continue
}
// Done with the pending buffer
nc.pending = nil
// This is where we are truly connected.
nc.status = CONNECTED
// Queue up the reconnect callback.
if nc.Opts.ReconnectedCB != nil {
nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) })
}
// Release lock here, we will return below.
nc.mu.Unlock()
// Make sure to flush everything
nc.Flush()
return
}
// Call into close.. We have no servers left..
if nc.err == nil {
nc.err = ErrNoServers
}
nc.mu.Unlock()
nc.close(CLOSED, true, nil)
}
// processOpErr handles errors from reading or parsing the protocol.
// The lock should not be held entering this function.
func (nc *Conn) processOpErr(err error) {
nc.mu.Lock()
if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() {
nc.mu.Unlock()
return
}
if nc.Opts.AllowReconnect && nc.status == CONNECTED {
// Set our new status
nc.status = RECONNECTING
// Stop ping timer if set
nc.stopPingTimer()
if nc.conn != nil {
nc.bw.Flush()
nc.conn.Close()
nc.conn = nil
}
// Create pending buffer before reconnecting.
nc.pending = new(bytes.Buffer)
nc.bw.Reset(nc.pending)
go nc.doReconnect(err)
nc.mu.Unlock()
return
}
nc.status = DISCONNECTED
nc.err = err
nc.mu.Unlock()
nc.close(CLOSED, true, nil)
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) asyncCBDispatcher() {
for {
ac.mu.Lock()
// Protect for spurious wakeups. We should get out of the
// wait only if there is an element to pop from the list.
for ac.head == nil {
ac.cond.Wait()
}
cur := ac.head
ac.head = cur.next
if cur == ac.tail {
ac.tail = nil
}
ac.mu.Unlock()
// This signals that the dispatcher has been closed and all
// previous callbacks have been dispatched.
if cur.f == nil {
return
}
// Invoke callback outside of handler's lock
cur.f()
}
}
// Add the given function to the tail of the list and
// signals the dispatcher.
func (ac *asyncCallbacksHandler) push(f func()) {
ac.pushOrClose(f, false)
}
// Signals that we are closing...
func (ac *asyncCallbacksHandler) close() {
ac.pushOrClose(nil, true)
}
// Add the given function to the tail of the list and
// signals the dispatcher.
func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) {
ac.mu.Lock()
defer ac.mu.Unlock()
// Make sure that library is not calling push with nil function,
// since this is used to notify the dispatcher that it should stop.
if !close && f == nil {
panic("pushing a nil callback")
}
cb := &asyncCB{f: f}
if ac.tail != nil {
ac.tail.next = cb
} else {
ac.head = cb
}
ac.tail = cb
if close {
ac.cond.Broadcast()
} else {
ac.cond.Signal()
}
}
// readLoop() will sit on the socket reading and processing the
// protocol from the server. It will dispatch appropriately based
// on the op type.
func (nc *Conn) readLoop() {
// Release the wait group on exit
defer nc.wg.Done()
// Create a parseState if needed.
nc.mu.Lock()
if nc.ps == nil {
nc.ps = &parseState{}
}
conn := nc.conn
nc.mu.Unlock()
if conn == nil {
return
}
// Stack based buffer.
b := make([]byte, defaultBufSize)
for {
if n, err := conn.Read(b); err != nil {
nc.processOpErr(err)
break
} else if err = nc.parse(b[:n]); err != nil {
nc.processOpErr(err)
break
}
}
// Clear the parseState here..
nc.mu.Lock()
nc.ps = nil
nc.mu.Unlock()
}
// waitForMsgs waits on the conditional shared with readLoop and processMsg.
// It is used to deliver messages to asynchronous subscribers.
func (nc *Conn) waitForMsgs(s *Subscription) {
var closed bool
var delivered, max uint64
// Used to account for adjustments to sub.pBytes when we wrap back around.
msgLen := -1
for {
s.mu.Lock()
// Do accounting for last msg delivered here so we only lock once
// and drain state trips after callback has returned.
if msgLen >= 0 {
s.pMsgs--
s.pBytes -= msgLen
msgLen = -1
}
if s.pHead == nil && !s.closed {
s.pCond.Wait()
}
// Pop the msg off the list
m := s.pHead
if m != nil {
s.pHead = m.next
if s.pHead == nil {
s.pTail = nil
}
if m.barrier != nil {
s.mu.Unlock()
if atomic.AddInt64(&m.barrier.refs, -1) == 0 {
m.barrier.f()
}
continue
}
msgLen = len(m.Data)
}
mcb := s.mcb
max = s.max
closed = s.closed
if !s.closed {
s.delivered++
delivered = s.delivered
}
s.mu.Unlock()
if closed {
break
}
// Deliver the message.
if m != nil && (max == 0 || delivered <= max) {
mcb(m)
}
// If we have hit the max for delivered msgs, remove sub.
if max > 0 && delivered >= max {
nc.mu.Lock()
nc.removeSub(s)
nc.mu.Unlock()
break
}
}
// Check for barrier messages
s.mu.Lock()
for m := s.pHead; m != nil; m = s.pHead {
if m.barrier != nil {
s.mu.Unlock()
if atomic.AddInt64(&m.barrier.refs, -1) == 0 {
m.barrier.f()
}
s.mu.Lock()
}
s.pHead = m.next
}
s.mu.Unlock()
}
// processMsg is called by parse and will place the msg on the
// appropriate channel/pending queue for processing. If the channel is full,
// or the pending queue is over the pending limits, the connection is
// considered a slow consumer.
func (nc *Conn) processMsg(data []byte) {
// Don't lock the connection to avoid server cutting us off if the
// flusher is holding the connection lock, trying to send to the server
// that is itself trying to send data to us.
nc.subsMu.RLock()
// Stats
atomic.AddUint64(&nc.InMsgs, 1)
atomic.AddUint64(&nc.InBytes, uint64(len(data)))
sub := nc.subs[nc.ps.ma.sid]
if sub == nil {
nc.subsMu.RUnlock()
return
}
// Copy them into string
subj := string(nc.ps.ma.subject)
reply := string(nc.ps.ma.reply)
// Doing message create outside of the sub's lock to reduce contention.
// It's possible that we end-up not using the message, but that's ok.
// FIXME(dlc): Need to copy, should/can do COW?
msgPayload := make([]byte, len(data))
copy(msgPayload, data)
// FIXME(dlc): Should we recycle these containers?
m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub}
sub.mu.Lock()
// Subscription internal stats (applicable only for non ChanSubscription's)
if sub.typ != ChanSubscription {
sub.pMsgs++
if sub.pMsgs > sub.pMsgsMax {
sub.pMsgsMax = sub.pMsgs
}
sub.pBytes += len(m.Data)
if sub.pBytes > sub.pBytesMax {
sub.pBytesMax = sub.pBytes
}
// Check for a Slow Consumer
if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) ||
(sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) {
goto slowConsumer
}
}
// We have two modes of delivery. One is the channel, used by channel
// subscribers and syncSubscribers, the other is a linked list for async.
if sub.mch != nil {
select {
case sub.mch <- m:
default:
goto slowConsumer
}
} else {
// Push onto the async pList
if sub.pHead == nil {
sub.pHead = m
sub.pTail = m
sub.pCond.Signal()
} else {
sub.pTail.next = m
sub.pTail = m
}
}
// Clear SlowConsumer status.
sub.sc = false
sub.mu.Unlock()
nc.subsMu.RUnlock()
return
slowConsumer:
sub.dropped++
sc := !sub.sc
sub.sc = true
// Undo stats from above
if sub.typ != ChanSubscription {
sub.pMsgs--
sub.pBytes -= len(m.Data)
}
sub.mu.Unlock()
nc.subsMu.RUnlock()
if sc {
// Now we need connection's lock and we may end-up in the situation
// that we were trying to avoid, except that in this case, the client
// is already experiencing client-side slow consumer situation.
nc.mu.Lock()
nc.err = ErrSlowConsumer
if nc.Opts.AsyncErrorCB != nil {
nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) })
}
nc.mu.Unlock()
}
}
// processPermissionsViolation is called when the server signals a subject
// permissions violation on either publish or subscribe.
func (nc *Conn) processPermissionsViolation(err string) {
nc.mu.Lock()
// create error here so we can pass it as a closure to the async cb dispatcher.
e := errors.New("nats: " + err)
nc.err = e
if nc.Opts.AsyncErrorCB != nil {
nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) })
}
nc.mu.Unlock()
}
// processAuthError generally processing for auth errors. We want to do retries
// unless we get the same error again. This allows us for instance to swap credentials
// and have the app reconnect, but if nothing is changing we should bail.
// This function will return true if the connection should be closed, false otherwise.
// Connection lock is held on entry
func (nc *Conn) processAuthError(err error) bool {
nc.err = err
if !nc.initc && nc.Opts.AsyncErrorCB != nil {
nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
}
// We should give up if we tried twice on this server and got the
// same error.
if nc.current.lastErr == err {
nc.ar = true
} else {
nc.current.lastErr = err
}
return nc.ar
}
// flusher is a separate Go routine that will process flush requests for the write
// bufio. This allows coalescing of writes to the underlying socket.
func (nc *Conn) flusher() {
// Release the wait group
defer nc.wg.Done()
// snapshot the bw and conn since they can change from underneath of us.
nc.mu.Lock()
bw := nc.bw
conn := nc.conn
fch := nc.fch
nc.mu.Unlock()
if conn == nil || bw == nil {
return
}
for {
if _, ok := <-fch; !ok {
return
}
nc.mu.Lock()
// Check to see if we should bail out.
if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn {
nc.mu.Unlock()
return
}
if bw.Buffered() > 0 {
if err := bw.Flush(); err != nil {
if nc.err == nil {
nc.err = err
}
}
}
nc.mu.Unlock()
}
}
// processPing will send an immediate pong protocol response to the
// server. The server uses this mechanism to detect dead clients.
func (nc *Conn) processPing() {
nc.sendProto(pongProto)
}
// processPong is used to process responses to the client's ping
// messages. We use pings for the flush mechanism as well.
func (nc *Conn) processPong() {
var ch chan struct{}
nc.mu.Lock()
if len(nc.pongs) > 0 {
ch = nc.pongs[0]
nc.pongs = nc.pongs[1:]
}
nc.pout = 0
nc.mu.Unlock()
if ch != nil {
ch <- struct{}{}
}
}
// processOK is a placeholder for processing OK messages.
func (nc *Conn) processOK() {
// do nothing
}
// processInfo is used to parse the info messages sent
// from the server.
// This function may update the server pool.
func (nc *Conn) processInfo(info string) error {
if info == _EMPTY_ {
return nil
}
ncInfo := serverInfo{}
if err := json.Unmarshal([]byte(info), &ncInfo); err != nil {
return err
}
// Copy content into connection's info structure.
nc.info = ncInfo
// The array could be empty/not present on initial connect,
// if advertise is disabled on that server, or servers that
// did not include themselves in the async INFO protocol.
// If empty, do not remove the implicit servers from the pool.
if len(ncInfo.ConnectURLs) == 0 {
return nil
}
// Note about pool randomization: when the pool was first created,
// it was randomized (if allowed). We keep the order the same (removing
// implicit servers that are no longer sent to us). New URLs are sent
// to us in no specific order so don't need extra randomization.
hasNew := false
// This is what we got from the server we are connected to.
urls := nc.info.ConnectURLs
// Transform that to a map for easy lookups
tmp := make(map[string]struct{}, len(urls))
for _, curl := range urls {
tmp[curl] = struct{}{}
}
// Walk the pool and removed the implicit servers that are no longer in the
// given array/map
sp := nc.srvPool
for i := 0; i < len(sp); i++ {
srv := sp[i]
curl := srv.url.Host
// Check if this URL is in the INFO protocol
_, inInfo := tmp[curl]
// Remove from the temp map so that at the end we are left with only
// new (or restarted) servers that need to be added to the pool.
delete(tmp, curl)
// Keep servers that were set through Options, but also the one that
// we are currently connected to (even if it is a discovered server).
if !srv.isImplicit || srv.url == nc.current.url {
continue
}
if !inInfo {
// Remove from server pool. Keep current order.
copy(sp[i:], sp[i+1:])
nc.srvPool = sp[:len(sp)-1]
sp = nc.srvPool
i--
}
}
// Figure out if we should save off the current non-IP hostname if we encounter a bare IP.
saveTLS := nc.current != nil && !hostIsIP(nc.current.url)
// If there are any left in the tmp map, these are new (or restarted) servers
// and need to be added to the pool.
for curl := range tmp {
// Before adding, check if this is a new (as in never seen) URL.
// This is used to figure out if we invoke the DiscoveredServersCB
if _, present := nc.urls[curl]; !present {
hasNew = true
}
nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS)
}
if hasNew && !nc.initc && nc.Opts.DiscoveredServersCB != nil {
nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) })
}
return nil
}
// processAsyncInfo does the same than processInfo, but is called
// from the parser. Calls processInfo under connection's lock
// protection.
func (nc *Conn) processAsyncInfo(info []byte) {
nc.mu.Lock()
// Ignore errors, we will simply not update the server pool...
nc.processInfo(string(info))
nc.mu.Unlock()
}
// LastError reports the last error encountered via the connection.
// It can be used reliably within ClosedCB in order to find out reason
// why connection was closed for example.
func (nc *Conn) LastError() error {
if nc == nil {
return ErrInvalidConnection
}
nc.mu.RLock()
err := nc.err
nc.mu.RUnlock()
return err
}
// Check if the given error string is an auth error, and if so returns
// the corresponding ErrXXX error, nil otherwise
func checkAuthError(e string) error {
if strings.HasPrefix(e, AUTHORIZATION_ERR) {
return ErrAuthorization
}
if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) {
return ErrAuthExpired
}
return nil
}
// processErr processes any error messages from the server and
// sets the connection's lastError.
func (nc *Conn) processErr(ie string) {
// Trim, remove quotes
ne := normalizeErr(ie)
// convert to lower case.
e := strings.ToLower(ne)
close := false
// FIXME(dlc) - process Slow Consumer signals special.
if e == STALE_CONNECTION {
nc.processOpErr(ErrStaleConnection)
} else if strings.HasPrefix(e, PERMISSIONS_ERR) {
nc.processPermissionsViolation(ne)
} else if authErr := checkAuthError(e); authErr != nil {
nc.mu.Lock()
close = nc.processAuthError(authErr)
nc.mu.Unlock()
} else {
close = true
nc.mu.Lock()
nc.err = errors.New("nats: " + ne)
nc.mu.Unlock()
}
if close {
nc.close(CLOSED, true, nil)
}
}
// kickFlusher will send a bool on a channel to kick the
// flush Go routine to flush data to the server.
func (nc *Conn) kickFlusher() {
if nc.bw != nil {
select {
case nc.fch <- struct{}{}:
default:
}
}
}
// Publish publishes the data argument to the given subject. The data
// argument is left untouched and needs to be correctly interpreted on
// the receiver.
func (nc *Conn) Publish(subj string, data []byte) error {
return nc.publish(subj, _EMPTY_, data)
}
// PublishMsg publishes the Msg structure, which includes the
// Subject, an optional Reply and an optional Data field.
func (nc *Conn) PublishMsg(m *Msg) error {
if m == nil {
return ErrInvalidMsg
}
return nc.publish(m.Subject, m.Reply, m.Data)
}
// PublishRequest will perform a Publish() excpecting a response on the
// reply subject. Use Request() for automatically waiting for a response
// inline.
func (nc *Conn) PublishRequest(subj, reply string, data []byte) error {
return nc.publish(subj, reply, data)
}
// Used for handrolled itoa
const digits = "0123456789"
// publish is the internal function to publish messages to a nats-server.
// Sends a protocol data message by queuing into the bufio writer
// and kicking the flush go routine. These writes should be protected.
func (nc *Conn) publish(subj, reply string, data []byte) error {
if nc == nil {
return ErrInvalidConnection
}
if subj == "" {
return ErrBadSubject
}
nc.mu.Lock()
if nc.isClosed() {
nc.mu.Unlock()
return ErrConnectionClosed
}
if nc.isDrainingPubs() {
nc.mu.Unlock()
return ErrConnectionDraining
}
// Proactively reject payloads over the threshold set by server.
msgSize := int64(len(data))
if msgSize > nc.info.MaxPayload {
nc.mu.Unlock()
return ErrMaxPayload
}
// Check if we are reconnecting, and if so check if
// we have exceeded our reconnect outbound buffer limits.
if nc.isReconnecting() {
// Flush to underlying buffer.
nc.bw.Flush()
// Check if we are over
if nc.pending.Len() >= nc.Opts.ReconnectBufSize {
nc.mu.Unlock()
return ErrReconnectBufExceeded
}
}
msgh := nc.scratch[:len(_PUB_P_)]
msgh = append(msgh, subj...)
msgh = append(msgh, ' ')
if reply != "" {
msgh = append(msgh, reply...)
msgh = append(msgh, ' ')
}
// We could be smarter here, but simple loop is ok,
// just avoid strconv in fast path
// FIXME(dlc) - Find a better way here.
// msgh = strconv.AppendInt(msgh, int64(len(data)), 10)
var b [12]byte
var i = len(b)
if len(data) > 0 {
for l := len(data); l > 0; l /= 10 {
i -= 1
b[i] = digits[l%10]
}
} else {
i -= 1
b[i] = digits[0]
}
msgh = append(msgh, b[i:]...)
msgh = append(msgh, _CRLF_...)
_, err := nc.bw.Write(msgh)
if err == nil {
_, err = nc.bw.Write(data)
}
if err == nil {
_, err = nc.bw.WriteString(_CRLF_)
}
if err != nil {
nc.mu.Unlock()
return err
}
nc.OutMsgs++
nc.OutBytes += uint64(len(data))
if len(nc.fch) == 0 {
nc.kickFlusher()
}
nc.mu.Unlock()
return nil
}
// respHandler is the global response handler. It will look up
// the appropriate channel based on the last token and place
// the message on the channel if possible.
func (nc *Conn) respHandler(m *Msg) {
nc.mu.Lock()
// Just return if closed.
if nc.isClosed() {
nc.mu.Unlock()
return
}
var mch chan *Msg
// Grab mch
rt := nc.respToken(m.Subject)
if rt != _EMPTY_ {
mch = nc.respMap[rt]
// Delete the key regardless, one response only.
delete(nc.respMap, rt)
} else if len(nc.respMap) == 1 {
// If the server has rewritten the subject, the response token (rt)
// will not match (could be the case with JetStream). If that is the
// case and there is a single entry, use that.
for k, v := range nc.respMap {
mch = v
delete(nc.respMap, k)
break
}
}
nc.mu.Unlock()
// Don't block, let Request timeout instead, mch is
// buffered and we should delete the key before a
// second response is processed.
select {
case mch <- m:
default:
return
}
}
// Helper to setup and send new request style requests. Return the chan to receive the response.
func (nc *Conn) createNewRequestAndSend(subj string, data []byte) (chan *Msg, string, error) {
// Do setup for the new style if needed.
if nc.respMap == nil {
nc.initNewResp()
}
// Create new literal Inbox and map to a chan msg.
mch := make(chan *Msg, RequestChanLen)
respInbox := nc.newRespInbox()
token := respInbox[respInboxPrefixLen:]
nc.respMap[token] = mch
if nc.respMux == nil {
// Create the response subscription we will use for all new style responses.
// This will be on an _INBOX with an additional terminal token. The subscription
// will be on a wildcard.
s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false)
if err != nil {
nc.mu.Unlock()
return nil, token, err
}
nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1)
nc.respMux = s
}
nc.mu.Unlock()
if err := nc.PublishRequest(subj, respInbox, data); err != nil {
return nil, token, err
}
return mch, token, nil
}
// Request will send a request payload and deliver the response message,
// or an error, including a timeout if no message was received properly.
func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
nc.mu.Lock()
// If user wants the old style.
if nc.Opts.UseOldRequestStyle {
nc.mu.Unlock()
return nc.oldRequest(subj, data, timeout)
}
mch, token, err := nc.createNewRequestAndSend(subj, data)
if err != nil {
return nil, err
}
t := globalTimerPool.Get(timeout)
defer globalTimerPool.Put(t)
var ok bool
var msg *Msg
select {
case msg, ok = <-mch:
if !ok {
return nil, ErrConnectionClosed
}
case <-t.C:
nc.mu.Lock()
delete(nc.respMap, token)
nc.mu.Unlock()
return nil, ErrTimeout
}
return msg, nil
}
// oldRequest will create an Inbox and perform a Request() call
// with the Inbox reply and return the first reply received.
// This is optimized for the case of multiple responses.
func (nc *Conn) oldRequest(subj string, data []byte, timeout time.Duration) (*Msg, error) {
inbox := NewInbox()
ch := make(chan *Msg, RequestChanLen)
s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, false)
if err != nil {
return nil, err
}
s.AutoUnsubscribe(1)
defer s.Unsubscribe()
err = nc.PublishRequest(subj, inbox, data)
if err != nil {
return nil, err
}
return s.NextMsg(timeout)
}
// InboxPrefix is the prefix for all inbox subjects.
const (
InboxPrefix = "_INBOX."
inboxPrefixLen = len(InboxPrefix)
respInboxPrefixLen = inboxPrefixLen + nuidSize + 1
replySuffixLen = 8 // Gives us 62^8
rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// NewInbox will return an inbox string which can be used for directed replies from
// subscribers. These are guaranteed to be unique, but can be shared and subscribed
// to by others.
func NewInbox() string {
var b [inboxPrefixLen + nuidSize]byte
pres := b[:inboxPrefixLen]
copy(pres, InboxPrefix)
ns := b[inboxPrefixLen:]
copy(ns, nuid.Next())
return string(b[:])
}
// Function to init new response structures.
func (nc *Conn) initNewResp() {
// _INBOX wildcard
nc.respSub = fmt.Sprintf("%s.*", NewInbox())
nc.respMap = make(map[string]chan *Msg)
nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// newRespInbox creates a new literal response subject
// that will trigger the mux subscription handler.
// Lock should be held.
func (nc *Conn) newRespInbox() string {
if nc.respMap == nil {
nc.initNewResp()
}
var b [respInboxPrefixLen + replySuffixLen]byte
pres := b[:respInboxPrefixLen]
copy(pres, nc.respSub)
rn := nc.respRand.Int63()
for i, l := respInboxPrefixLen, rn; i < len(b); i++ {
b[i] = rdigits[l%base]
l /= base
}
return string(b[:])
}
// NewRespInbox is the new format used for _INBOX.
func (nc *Conn) NewRespInbox() string {
nc.mu.Lock()
s := nc.newRespInbox()
nc.mu.Unlock()
return s
}
// respToken will return the last token of a literal response inbox
// which we use for the message channel lookup. This needs to do a
// scan to protect itself against the server changing the subject.
// Lock should be held.
func (nc *Conn) respToken(respInbox string) string {
var token string
n, err := fmt.Sscanf(respInbox, nc.respScanf, &token)
if err != nil || n != 1 {
return ""
}
return token
}
// Subscribe will express interest in the given subject. The subject
// can have wildcards (partial:*, full:>). Messages will be delivered
// to the associated MsgHandler.
func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) {
return nc.subscribe(subj, _EMPTY_, cb, nil, false)
}
// ChanSubscribe will express interest in the given subject and place
// all messages received on the channel.
// You should not close the channel until sub.Unsubscribe() has been called.
func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) {
return nc.subscribe(subj, _EMPTY_, nil, ch, false)
}
// ChanQueueSubscribe will express interest in the given subject.
// All subscribers with the same queue name will form the queue group
// and only one member of the group will be selected to receive any given message,
// which will be placed on the channel.
// You should not close the channel until sub.Unsubscribe() has been called.
// Note: This is the same than QueueSubscribeSyncWithChan.
func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) {
return nc.subscribe(subj, group, nil, ch, false)
}
// SubscribeSync will express interest on the given subject. Messages will
// be received synchronously using Subscription.NextMsg().
func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
mch := make(chan *Msg, nc.Opts.SubChanLen)
s, e := nc.subscribe(subj, _EMPTY_, nil, mch, true)
return s, e
}
// QueueSubscribe creates an asynchronous queue subscriber on the given subject.
// All subscribers with the same queue name will form the queue group and
// only one member of the group will be selected to receive any given
// message asynchronously.
func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) {
return nc.subscribe(subj, queue, cb, nil, false)
}
// QueueSubscribeSync creates a synchronous queue subscriber on the given
// subject. All subscribers with the same queue name will form the queue
// group and only one member of the group will be selected to receive any
// given message synchronously using Subscription.NextMsg().
func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) {
mch := make(chan *Msg, nc.Opts.SubChanLen)
s, e := nc.subscribe(subj, queue, nil, mch, true)
return s, e
}
// QueueSubscribeSyncWithChan will express interest in the given subject.
// All subscribers with the same queue name will form the queue group
// and only one member of the group will be selected to receive any given message,
// which will be placed on the channel.
// You should not close the channel until sub.Unsubscribe() has been called.
// Note: This is the same than ChanQueueSubscribe.
func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) {
return nc.subscribe(subj, queue, nil, ch, false)
}
// badSubject will do quick test on whether a subject is acceptable.
// Spaces are not allowed and all tokens should be > 0 in len.
func badSubject(subj string) bool {
if strings.ContainsAny(subj, " \t\r\n") {
return true
}
tokens := strings.Split(subj, ".")
for _, t := range tokens {
if len(t) == 0 {
return true
}
}
return false
}
// badQueue will check a queue name for whitespace.
func badQueue(qname string) bool {
return strings.ContainsAny(qname, " \t\r\n")
}
// subscribe is the internal subscribe function that indicates interest in a subject.
func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool) (*Subscription, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
nc.mu.Lock()
s, err := nc.subscribeLocked(subj, queue, cb, ch, isSync)
nc.mu.Unlock()
return s, err
}
func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool) (*Subscription, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
if badSubject(subj) {
return nil, ErrBadSubject
}
if queue != "" && badQueue(queue) {
return nil, ErrBadQueueName
}
// Check for some error conditions.
if nc.isClosed() {
return nil, ErrConnectionClosed
}
if nc.isDraining() {
return nil, ErrConnectionDraining
}
if cb == nil && ch == nil {
return nil, ErrBadSubscription
}
sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc}
// Set pending limits.
sub.pMsgsLimit = DefaultSubPendingMsgsLimit
sub.pBytesLimit = DefaultSubPendingBytesLimit
// If we have an async callback, start up a sub specific
// Go routine to deliver the messages.
if cb != nil {
sub.typ = AsyncSubscription
sub.pCond = sync.NewCond(&sub.mu)
go nc.waitForMsgs(sub)
} else if !isSync {
sub.typ = ChanSubscription
sub.mch = ch
} else { // Sync Subscription
sub.typ = SyncSubscription
sub.mch = ch
}
nc.subsMu.Lock()
nc.ssid++
sub.sid = nc.ssid
nc.subs[sub.sid] = sub
nc.subsMu.Unlock()
// We will send these for all subs when we reconnect
// so that we can suppress here if reconnecting.
if !nc.isReconnecting() {
fmt.Fprintf(nc.bw, subProto, subj, queue, sub.sid)
// Kick flusher if needed.
if len(nc.fch) == 0 {
nc.kickFlusher()
}
}
return sub, nil
}
// NumSubscriptions returns active number of subscriptions.
func (nc *Conn) NumSubscriptions() int {
nc.mu.RLock()
defer nc.mu.RUnlock()
return len(nc.subs)
}
// Lock for nc should be held here upon entry
func (nc *Conn) removeSub(s *Subscription) {
nc.subsMu.Lock()
delete(nc.subs, s.sid)
nc.subsMu.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
// Release callers on NextMsg for SyncSubscription only
if s.mch != nil && s.typ == SyncSubscription {
close(s.mch)
}
s.mch = nil
// Mark as invalid
s.closed = true
if s.pCond != nil {
s.pCond.Broadcast()
}
}
// SubscriptionType is the type of the Subscription.
type SubscriptionType int
// The different types of subscription types.
const (
AsyncSubscription = SubscriptionType(iota)
SyncSubscription
ChanSubscription
NilSubscription
)
// Type returns the type of Subscription.
func (s *Subscription) Type() SubscriptionType {
if s == nil {
return NilSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
return s.typ
}
// IsValid returns a boolean indicating whether the subscription
// is still active. This will return false if the subscription has
// already been closed.
func (s *Subscription) IsValid() bool {
if s == nil {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
return s.conn != nil && !s.closed
}
// Drain will remove interest but continue callbacks until all messages
// have been processed.
func (s *Subscription) Drain() error {
if s == nil {
return ErrBadSubscription
}
s.mu.Lock()
conn := s.conn
s.mu.Unlock()
if conn == nil {
return ErrBadSubscription
}
return conn.unsubscribe(s, 0, true)
}
// Unsubscribe will remove interest in the given subject.
func (s *Subscription) Unsubscribe() error {
if s == nil {
return ErrBadSubscription
}
s.mu.Lock()
conn := s.conn
closed := s.closed
s.mu.Unlock()
if conn == nil || conn.IsClosed() {
return ErrConnectionClosed
}
if closed {
return ErrBadSubscription
}
if conn.IsDraining() {
return ErrConnectionDraining
}
return conn.unsubscribe(s, 0, false)
}
// checkDrained will watch for a subscription to be fully drained
// and then remove it.
func (nc *Conn) checkDrained(sub *Subscription) {
if nc == nil || sub == nil {
return
}
// This allows us to know that whatever we have in the client pending
// is correct and the server will not send additional information.
nc.Flush()
// Once we are here we just wait for Pending to reach 0 or
// any other state to exit this go routine.
for {
// check connection is still valid.
if nc.IsClosed() {
return
}
// Check subscription state
sub.mu.Lock()
conn := sub.conn
closed := sub.closed
pMsgs := sub.pMsgs
sub.mu.Unlock()
if conn == nil || closed || pMsgs == 0 {
nc.mu.Lock()
nc.removeSub(sub)
nc.mu.Unlock()
return
}
time.Sleep(100 * time.Millisecond)
}
}
// AutoUnsubscribe will issue an automatic Unsubscribe that is
// processed by the server when max messages have been received.
// This can be useful when sending a request to an unknown number
// of subscribers.
func (s *Subscription) AutoUnsubscribe(max int) error {
if s == nil {
return ErrBadSubscription
}
s.mu.Lock()
conn := s.conn
closed := s.closed
s.mu.Unlock()
if conn == nil || closed {
return ErrBadSubscription
}
return conn.unsubscribe(s, max, false)
}
// unsubscribe performs the low level unsubscribe to the server.
// Use Subscription.Unsubscribe()
func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error {
nc.mu.Lock()
// ok here, but defer is expensive
defer nc.mu.Unlock()
defer nc.kickFlusher()
if nc.isClosed() {
return ErrConnectionClosed
}
nc.subsMu.RLock()
s := nc.subs[sub.sid]
nc.subsMu.RUnlock()
// Already unsubscribed
if s == nil {
return nil
}
maxStr := _EMPTY_
if max > 0 {
s.max = uint64(max)
maxStr = strconv.Itoa(max)
} else if !drainMode {
nc.removeSub(s)
}
if drainMode {
go nc.checkDrained(sub)
}
// We will send these for all subs when we reconnect
// so that we can suppress here.
if !nc.isReconnecting() {
fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr)
}
return nil
}
// NextMsg will return the next message available to a synchronous subscriber
// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription),
// the connection is closed (ErrConnectionClosed), or the timeout is reached (ErrTimeout).
func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) {
if s == nil {
return nil, ErrBadSubscription
}
s.mu.Lock()
err := s.validateNextMsgState()
if err != nil {
s.mu.Unlock()
return nil, err
}
// snapshot
mch := s.mch
s.mu.Unlock()
var ok bool
var msg *Msg
// If something is available right away, let's optimize that case.
select {
case msg, ok = <-mch:
if !ok {
return nil, s.getNextMsgErr()
}
if err := s.processNextMsgDelivered(msg); err != nil {
return nil, err
} else {
return msg, nil
}
default:
}
// If we are here a message was not immediately available, so lets loop
// with a timeout.
t := globalTimerPool.Get(timeout)
defer globalTimerPool.Put(t)
select {
case msg, ok = <-mch:
if !ok {
return nil, s.getNextMsgErr()
}
if err := s.processNextMsgDelivered(msg); err != nil {
return nil, err
}
case <-t.C:
return nil, ErrTimeout
}
return msg, nil
}
// validateNextMsgState checks whether the subscription is in a valid
// state to call NextMsg and be delivered another message synchronously.
// This should be called while holding the lock.
func (s *Subscription) validateNextMsgState() error {
if s.connClosed {
return ErrConnectionClosed
}
if s.mch == nil {
if s.max > 0 && s.delivered >= s.max {
return ErrMaxMessages
} else if s.closed {
return ErrBadSubscription
}
}
if s.mcb != nil {
return ErrSyncSubRequired
}
if s.sc {
s.sc = false
return ErrSlowConsumer
}
return nil
}
// This is called when the sync channel has been closed.
// The error returned will be either connection or subscription
// closed depending on what caused NextMsg() to fail.
func (s *Subscription) getNextMsgErr() error {
s.mu.Lock()
defer s.mu.Unlock()
if s.connClosed {
return ErrConnectionClosed
}
return ErrBadSubscription
}
// processNextMsgDelivered takes a message and applies the needed
// accounting to the stats from the subscription, returning an
// error in case we have the maximum number of messages have been
// delivered already. It should not be called while holding the lock.
func (s *Subscription) processNextMsgDelivered(msg *Msg) error {
s.mu.Lock()
nc := s.conn
max := s.max
// Update some stats.
s.delivered++
delivered := s.delivered
if s.typ == SyncSubscription {
s.pMsgs--
s.pBytes -= len(msg.Data)
}
s.mu.Unlock()
if max > 0 {
if delivered > max {
return ErrMaxMessages
}
// Remove subscription if we have reached max.
if delivered == max {
nc.mu.Lock()
nc.removeSub(s)
nc.mu.Unlock()
}
}
return nil
}
// Queued returns the number of queued messages in the client for this subscription.
// DEPRECATED: Use Pending()
func (s *Subscription) QueuedMsgs() (int, error) {
m, _, err := s.Pending()
return int(m), err
}
// Pending returns the number of queued messages and queued bytes in the client for this subscription.
func (s *Subscription) Pending() (int, int, error) {
if s == nil {
return -1, -1, ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return -1, -1, ErrBadSubscription
}
if s.typ == ChanSubscription {
return -1, -1, ErrTypeSubscription
}
return s.pMsgs, s.pBytes, nil
}
// MaxPending returns the maximum number of queued messages and queued bytes seen so far.
func (s *Subscription) MaxPending() (int, int, error) {
if s == nil {
return -1, -1, ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return -1, -1, ErrBadSubscription
}
if s.typ == ChanSubscription {
return -1, -1, ErrTypeSubscription
}
return s.pMsgsMax, s.pBytesMax, nil
}
// ClearMaxPending resets the maximums seen so far.
func (s *Subscription) ClearMaxPending() error {
if s == nil {
return ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return ErrBadSubscription
}
if s.typ == ChanSubscription {
return ErrTypeSubscription
}
s.pMsgsMax, s.pBytesMax = 0, 0
return nil
}
// Pending Limits
const (
DefaultSubPendingMsgsLimit = 65536
DefaultSubPendingBytesLimit = 65536 * 1024
)
// PendingLimits returns the current limits for this subscription.
// If no error is returned, a negative value indicates that the
// given metric is not limited.
func (s *Subscription) PendingLimits() (int, int, error) {
if s == nil {
return -1, -1, ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return -1, -1, ErrBadSubscription
}
if s.typ == ChanSubscription {
return -1, -1, ErrTypeSubscription
}
return s.pMsgsLimit, s.pBytesLimit, nil
}
// SetPendingLimits sets the limits for pending msgs and bytes for this subscription.
// Zero is not allowed. Any negative value means that the given metric is not limited.
func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error {
if s == nil {
return ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return ErrBadSubscription
}
if s.typ == ChanSubscription {
return ErrTypeSubscription
}
if msgLimit == 0 || bytesLimit == 0 {
return ErrInvalidArg
}
s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit
return nil
}
// Delivered returns the number of delivered messages for this subscription.
func (s *Subscription) Delivered() (int64, error) {
if s == nil {
return -1, ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return -1, ErrBadSubscription
}
return int64(s.delivered), nil
}
// Dropped returns the number of known dropped messages for this subscription.
// This will correspond to messages dropped by violations of PendingLimits. If
// the server declares the connection a SlowConsumer, this number may not be
// valid.
func (s *Subscription) Dropped() (int, error) {
if s == nil {
return -1, ErrBadSubscription
}
s.mu.Lock()
defer s.mu.Unlock()
if s.conn == nil || s.closed {
return -1, ErrBadSubscription
}
return s.dropped, nil
}
// Respond allows a convenient way to respond to requests in service based subscriptions.
func (m *Msg) Respond(data []byte) error {
if m == nil || m.Sub == nil {
return ErrMsgNotBound
}
if m.Reply == "" {
return ErrMsgNoReply
}
m.Sub.mu.Lock()
nc := m.Sub.conn
m.Sub.mu.Unlock()
// No need to check the connection here since the call to publish will do all the checking.
return nc.Publish(m.Reply, data)
}
// FIXME: This is a hack
// removeFlushEntry is needed when we need to discard queued up responses
// for our pings as part of a flush call. This happens when we have a flush
// call outstanding and we call close.
func (nc *Conn) removeFlushEntry(ch chan struct{}) bool {
nc.mu.Lock()
defer nc.mu.Unlock()
if nc.pongs == nil {
return false
}
for i, c := range nc.pongs {
if c == ch {
nc.pongs[i] = nil
return true
}
}
return false
}
// The lock must be held entering this function.
func (nc *Conn) sendPing(ch chan struct{}) {
nc.pongs = append(nc.pongs, ch)
nc.bw.WriteString(pingProto)
// Flush in place.
nc.bw.Flush()
}
// This will fire periodically and send a client origin
// ping to the server. Will also check that we have received
// responses from the server.
func (nc *Conn) processPingTimer() {
nc.mu.Lock()
if nc.status != CONNECTED {
nc.mu.Unlock()
return
}
// Check for violation
nc.pout++
if nc.pout > nc.Opts.MaxPingsOut {
nc.mu.Unlock()
nc.processOpErr(ErrStaleConnection)
return
}
nc.sendPing(nil)
nc.ptmr.Reset(nc.Opts.PingInterval)
nc.mu.Unlock()
}
// FlushTimeout allows a Flush operation to have an associated timeout.
func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) {
if nc == nil {
return ErrInvalidConnection
}
if timeout <= 0 {
return ErrBadTimeout
}
nc.mu.Lock()
if nc.isClosed() {
nc.mu.Unlock()
return ErrConnectionClosed
}
t := globalTimerPool.Get(timeout)
defer globalTimerPool.Put(t)
// Create a buffered channel to prevent chan send to block
// in processPong() if this code here times out just when
// PONG was received.
ch := make(chan struct{}, 1)
nc.sendPing(ch)
nc.mu.Unlock()
select {
case _, ok := <-ch:
if !ok {
err = ErrConnectionClosed
} else {
close(ch)
}
case <-t.C:
err = ErrTimeout
}
if err != nil {
nc.removeFlushEntry(ch)
}
return
}
// Flush will perform a round trip to the server and return when it
// receives the internal reply.
func (nc *Conn) Flush() error {
return nc.FlushTimeout(60 * time.Second)
}
// Buffered will return the number of bytes buffered to be sent to the server.
// FIXME(dlc) take into account disconnected state.
func (nc *Conn) Buffered() (int, error) {
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.isClosed() || nc.bw == nil {
return -1, ErrConnectionClosed
}
return nc.bw.Buffered(), nil
}
// resendSubscriptions will send our subscription state back to the
// server. Used in reconnects
func (nc *Conn) resendSubscriptions() {
// Since we are going to send protocols to the server, we don't want to
// be holding the subsMu lock (which is used in processMsg). So copy
// the subscriptions in a temporary array.
nc.subsMu.RLock()
subs := make([]*Subscription, 0, len(nc.subs))
for _, s := range nc.subs {
subs = append(subs, s)
}
nc.subsMu.RUnlock()
for _, s := range subs {
adjustedMax := uint64(0)
s.mu.Lock()
if s.max > 0 {
if s.delivered < s.max {
adjustedMax = s.max - s.delivered
}
// adjustedMax could be 0 here if the number of delivered msgs
// reached the max, if so unsubscribe.
if adjustedMax == 0 {
s.mu.Unlock()
fmt.Fprintf(nc.bw, unsubProto, s.sid, _EMPTY_)
continue
}
}
s.mu.Unlock()
fmt.Fprintf(nc.bw, subProto, s.Subject, s.Queue, s.sid)
if adjustedMax > 0 {
maxStr := strconv.Itoa(int(adjustedMax))
fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr)
}
}
}
// This will clear any pending flush calls and release pending calls.
// Lock is assumed to be held by the caller.
func (nc *Conn) clearPendingFlushCalls() {
// Clear any queued pongs, e.g. pending flush calls.
for _, ch := range nc.pongs {
if ch != nil {
close(ch)
}
}
nc.pongs = nil
}
// This will clear any pending Request calls.
// Lock is assumed to be held by the caller.
func (nc *Conn) clearPendingRequestCalls() {
if nc.respMap == nil {
return
}
for key, ch := range nc.respMap {
if ch != nil {
close(ch)
delete(nc.respMap, key)
}
}
}
// Low level close call that will do correct cleanup and set
// desired status. Also controls whether user defined callbacks
// will be triggered. The lock should not be held entering this
// function. This function will handle the locking manually.
func (nc *Conn) close(status Status, doCBs bool, err error) {
nc.mu.Lock()
if nc.isClosed() {
nc.status = status
nc.mu.Unlock()
return
}
nc.status = CLOSED
// Kick the Go routines so they fall out.
nc.kickFlusher()
nc.mu.Unlock()
nc.mu.Lock()
// Clear any queued pongs, e.g. pending flush calls.
nc.clearPendingFlushCalls()
// Clear any queued and blocking Requests.
nc.clearPendingRequestCalls()
// Stop ping timer if set.
nc.stopPingTimer()
nc.ptmr = nil
// Need to close and set tcp conn to nil if reconnect loop has stopped,
// otherwise we would incorrectly invoke Disconnect handler (if set)
// down below.
if nc.ar && nc.conn != nil {
nc.conn.Close()
nc.conn = nil
} else if nc.conn != nil {
// Go ahead and make sure we have flushed the outbound
nc.bw.Flush()
defer nc.conn.Close()
}
// Close sync subscriber channels and release any
// pending NextMsg() calls.
nc.subsMu.Lock()
for _, s := range nc.subs {
s.mu.Lock()
// Release callers on NextMsg for SyncSubscription only
if s.mch != nil && s.typ == SyncSubscription {
close(s.mch)
}
s.mch = nil
// Mark as invalid, for signaling to deliverMsgs
s.closed = true
// Mark connection closed in subscription
s.connClosed = true
// If we have an async subscription, signals it to exit
if s.typ == AsyncSubscription && s.pCond != nil {
s.pCond.Signal()
}
s.mu.Unlock()
}
nc.subs = nil
nc.subsMu.Unlock()
nc.status = status
// Perform appropriate callback if needed for a disconnect.
if doCBs {
if nc.conn != nil {
if nc.Opts.DisconnectedErrCB != nil {
nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) })
} else if nc.Opts.DisconnectedCB != nil {
nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) })
}
}
if nc.Opts.ClosedCB != nil {
nc.ach.push(func() { nc.Opts.ClosedCB(nc) })
}
}
// If this is terminal, then we have to notify the asyncCB handler that
// it can exit once all async cbs have been dispatched.
if status == CLOSED {
nc.ach.close()
}
nc.mu.Unlock()
}
// Close will close the connection to the server. This call will release
// all blocking calls, such as Flush() and NextMsg()
func (nc *Conn) Close() {
if nc != nil {
nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil)
}
}
// IsClosed tests if a Conn has been closed.
func (nc *Conn) IsClosed() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.isClosed()
}
// IsReconnecting tests if a Conn is reconnecting.
func (nc *Conn) IsReconnecting() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.isReconnecting()
}
// IsConnected tests if a Conn is connected.
func (nc *Conn) IsConnected() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.isConnected()
}
// drainConnection will run in a separate Go routine and will
// flush all publishes and drain all active subscriptions.
func (nc *Conn) drainConnection() {
// Snapshot subs list.
nc.mu.Lock()
// Check again here if we are in a state to not process.
if nc.isClosed() {
nc.mu.Unlock()
return
}
if nc.isConnecting() || nc.isReconnecting() {
nc.mu.Unlock()
// Move to closed state.
nc.close(CLOSED, true, nil)
return
}
subs := make([]*Subscription, 0, len(nc.subs))
for _, s := range nc.subs {
subs = append(subs, s)
}
errCB := nc.Opts.AsyncErrorCB
drainWait := nc.Opts.DrainTimeout
nc.mu.Unlock()
// for pushing errors with context.
pushErr := func(err error) {
nc.mu.Lock()
nc.err = err
if errCB != nil {
nc.ach.push(func() { errCB(nc, nil, err) })
}
nc.mu.Unlock()
}
// Do subs first
for _, s := range subs {
if err := s.Drain(); err != nil {
// We will notify about these but continue.
pushErr(err)
}
}
// Wait for the subscriptions to drop to zero.
timeout := time.Now().Add(drainWait)
for time.Now().Before(timeout) {
if nc.NumSubscriptions() == 0 {
break
}
time.Sleep(10 * time.Millisecond)
}
// Check if we timed out.
if nc.NumSubscriptions() != 0 {
pushErr(ErrDrainTimeout)
}
// Flip State
nc.mu.Lock()
nc.status = DRAINING_PUBS
nc.mu.Unlock()
// Do publish drain via Flush() call.
err := nc.FlushTimeout(5 * time.Second)
if err != nil {
pushErr(err)
nc.close(CLOSED, true, nil)
return
}
// Move to closed state.
nc.close(CLOSED, true, nil)
}
// Drain will put a connection into a drain state. All subscriptions will
// immediately be put into a drain state. Upon completion, the publishers
// will be drained and can not publish any additional messages. Upon draining
// of the publishers, the connection will be closed. Use the ClosedCB()
// option to know when the connection has moved from draining to closed.
func (nc *Conn) Drain() error {
nc.mu.Lock()
if nc.isClosed() {
nc.mu.Unlock()
return ErrConnectionClosed
}
if nc.isConnecting() || nc.isReconnecting() {
nc.mu.Unlock()
nc.close(CLOSED, true, nil)
return ErrConnectionReconnecting
}
if nc.isDraining() {
nc.mu.Unlock()
return nil
}
nc.status = DRAINING_SUBS
go nc.drainConnection()
nc.mu.Unlock()
return nil
}
// IsDraining tests if a Conn is in the draining state.
func (nc *Conn) IsDraining() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.isDraining()
}
// caller must lock
func (nc *Conn) getServers(implicitOnly bool) []string {
poolSize := len(nc.srvPool)
var servers = make([]string, 0)
for i := 0; i < poolSize; i++ {
if implicitOnly && !nc.srvPool[i].isImplicit {
continue
}
url := nc.srvPool[i].url
servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host))
}
return servers
}
// Servers returns the list of known server urls, including additional
// servers discovered after a connection has been established. If
// authentication is enabled, use UserInfo or Token when connecting with
// these urls.
func (nc *Conn) Servers() []string {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.getServers(false)
}
// DiscoveredServers returns only the server urls that have been discovered
// after a connection has been established. If authentication is enabled,
// use UserInfo or Token when connecting with these urls.
func (nc *Conn) DiscoveredServers() []string {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.getServers(true)
}
// Status returns the current state of the connection.
func (nc *Conn) Status() Status {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.status
}
// Test if Conn has been closed Lock is assumed held.
func (nc *Conn) isClosed() bool {
return nc.status == CLOSED
}
// Test if Conn is in the process of connecting
func (nc *Conn) isConnecting() bool {
return nc.status == CONNECTING
}
// Test if Conn is being reconnected.
func (nc *Conn) isReconnecting() bool {
return nc.status == RECONNECTING
}
// Test if Conn is connected or connecting.
func (nc *Conn) isConnected() bool {
return nc.status == CONNECTED || nc.isDraining()
}
// Test if Conn is in the draining state.
func (nc *Conn) isDraining() bool {
return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS
}
// Test if Conn is in the draining state for pubs.
func (nc *Conn) isDrainingPubs() bool {
return nc.status == DRAINING_PUBS
}
// Stats will return a race safe copy of the Statistics section for the connection.
func (nc *Conn) Stats() Statistics {
// Stats are updated either under connection's mu or with atomic operations
// for inbound stats in processMsg().
nc.mu.Lock()
stats := Statistics{
InMsgs: atomic.LoadUint64(&nc.InMsgs),
InBytes: atomic.LoadUint64(&nc.InBytes),
OutMsgs: nc.OutMsgs,
OutBytes: nc.OutBytes,
Reconnects: nc.Reconnects,
}
nc.mu.Unlock()
return stats
}
// MaxPayload returns the size limit that a message payload can have.
// This is set by the server configuration and delivered to the client
// upon connect.
func (nc *Conn) MaxPayload() int64 {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.info.MaxPayload
}
// AuthRequired will return if the connected server requires authorization.
func (nc *Conn) AuthRequired() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.info.AuthRequired
}
// TLSRequired will return if the connected server requires TLS connections.
func (nc *Conn) TLSRequired() bool {
nc.mu.RLock()
defer nc.mu.RUnlock()
return nc.info.TLSRequired
}
// Barrier schedules the given function `f` to all registered asynchronous
// subscriptions.
// Only the last subscription to see this barrier will invoke the function.
// If no subscription is registered at the time of this call, `f()` is invoked
// right away.
// ErrConnectionClosed is returned if the connection is closed prior to
// the call.
func (nc *Conn) Barrier(f func()) error {
nc.mu.Lock()
if nc.isClosed() {
nc.mu.Unlock()
return ErrConnectionClosed
}
nc.subsMu.Lock()
// Need to figure out how many non chan subscriptions there are
numSubs := 0
for _, sub := range nc.subs {
if sub.typ == AsyncSubscription {
numSubs++
}
}
if numSubs == 0 {
nc.subsMu.Unlock()
nc.mu.Unlock()
f()
return nil
}
barrier := &barrierInfo{refs: int64(numSubs), f: f}
for _, sub := range nc.subs {
sub.mu.Lock()
if sub.mch == nil {
msg := &Msg{barrier: barrier}
// Push onto the async pList
if sub.pTail != nil {
sub.pTail.next = msg
} else {
sub.pHead = msg
sub.pCond.Signal()
}
sub.pTail = msg
}
sub.mu.Unlock()
}
nc.subsMu.Unlock()
nc.mu.Unlock()
return nil
}
// GetClientID returns the client ID assigned by the server to which
// the client is currently connected to. Note that the value may change if
// the client reconnects.
// This function returns ErrNoClientIDReturned if the server is of a
// version prior to 1.2.0.
func (nc *Conn) GetClientID() (uint64, error) {
nc.mu.RLock()
defer nc.mu.RUnlock()
if nc.isClosed() {
return 0, ErrConnectionClosed
}
if nc.info.CID == 0 {
return 0, ErrClientIDNotSupported
}
return nc.info.CID, nil
}
// NkeyOptionFromSeed will load an nkey pair from a seed file.
// It will return the NKey Option and will handle
// signing of nonce challenges from the server. It will take
// care to not hold keys in memory and to wipe memory.
func NkeyOptionFromSeed(seedFile string) (Option, error) {
kp, err := nkeyPairFromSeedFile(seedFile)
if err != nil {
return nil, err
}
// Wipe our key on exit.
defer kp.Wipe()
pub, err := kp.PublicKey()
if err != nil {
return nil, err
}
if !nkeys.IsValidPublicUserKey(pub) {
return nil, fmt.Errorf("nats: Not a valid nkey user seed")
}
sigCB := func(nonce []byte) ([]byte, error) {
return sigHandler(nonce, seedFile)
}
return Nkey(string(pub), sigCB), nil
}
// Just wipe slice with 'x', for clearing contents of creds or nkey seed file.
func wipeSlice(buf []byte) {
for i := range buf {
buf[i] = 'x'
}
}
func userFromFile(userFile string) (string, error) {
path, err := expandPath(userFile)
if err != nil {
return _EMPTY_, fmt.Errorf("nats: %v", err)
}
contents, err := ioutil.ReadFile(path)
if err != nil {
return _EMPTY_, fmt.Errorf("nats: %v", err)
}
defer wipeSlice(contents)
return jwt.ParseDecoratedJWT(contents)
}
func homeDir() (string, error) {
if runtime.GOOS == "windows" {
homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
userProfile := os.Getenv("USERPROFILE")
var home string
if homeDrive == "" || homePath == "" {
if userProfile == "" {
return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%")
}
home = userProfile
} else {
home = filepath.Join(homeDrive, homePath)
}
return home, nil
}
home := os.Getenv("HOME")
if home == "" {
return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME")
}
return home, nil
}
func expandPath(p string) (string, error) {
p = os.ExpandEnv(p)
if !strings.HasPrefix(p, "~") {
return p, nil
}
home, err := homeDir()
if err != nil {
return _EMPTY_, err
}
return filepath.Join(home, p[1:]), nil
}
func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) {
contents, err := ioutil.ReadFile(seedFile)
if err != nil {
return nil, fmt.Errorf("nats: %v", err)
}
defer wipeSlice(contents)
return jwt.ParseDecoratedNKey(contents)
}
// Sign authentication challenges from the server.
// Do not keep private seed in memory.
func sigHandler(nonce []byte, seedFile string) ([]byte, error) {
kp, err := nkeyPairFromSeedFile(seedFile)
if err != nil {
return nil, err
}
// Wipe our key on exit.
defer kp.Wipe()
sig, _ := kp.Sign(nonce)
return sig, nil
}
type timeoutWriter struct {
timeout time.Duration
conn net.Conn
err error
}
// Write implements the io.Writer interface.
func (tw *timeoutWriter) Write(p []byte) (int, error) {
if tw.err != nil {
return 0, tw.err
}
var n int
tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout))
n, tw.err = tw.conn.Write(p)
tw.conn.SetWriteDeadline(time.Time{})
return n, tw.err
}
| {
o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12}
} |
cachevalues.go | package cache
type DependenciesCache struct {
modulesPublished map[string]bool
successes int
failures int
total int
}
func (dc *DependenciesCache) GetMap() map[string]bool {
dc.initMap()
return dc.modulesPublished
}
func (dc *DependenciesCache) GetSuccesses() int {
return dc.successes
}
func (dc *DependenciesCache) GetFailures() int {
return dc.failures
}
func (dc *DependenciesCache) GetTotal() int {
return dc.total
}
func (dc *DependenciesCache) IncrementSuccess() {
dc.successes += 1
}
func (dc *DependenciesCache) IncrementFailures() {
dc.failures += 1
}
func (dc *DependenciesCache) IncrementTotal(sum int) {
dc.total += sum
}
func (dc *DependenciesCache) initMap() { | dc.modulesPublished = make(map[string]bool)
}
} | if dc.modulesPublished == nil { |
pair_test.go | package types_test
import (
"testing"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/require"
"github.com/cosmosquad-labs/squad/x/liquidity/types"
)
func | (t *testing.T) {
for _, tc := range []struct {
name string
malleate func(pair *types.Pair)
expectedErr string
}{
{
"happy case",
func(pair *types.Pair) {},
"",
},
{
"zero id",
func(pair *types.Pair) {
pair.Id = 0
},
"pair id must not be 0",
},
{
"invalid base coin denom",
func(pair *types.Pair) {
pair.BaseCoinDenom = "invalliddenom!"
},
"invalid base coin denom: invalid denom: invalliddenom!",
},
{
"invalid quote coin denom",
func(pair *types.Pair) {
pair.QuoteCoinDenom = "invaliddenom!"
},
"invalid quote coin denom: invalid denom: invaliddenom!",
},
{
"invalid escrow address",
func(pair *types.Pair) {
pair.EscrowAddress = "invalidaddr"
},
"invalid escrow address invalidaddr: decoding bech32 failed: invalid separator index -1",
},
{
"",
func(pair *types.Pair) {
p := sdk.NewDec(-1)
pair.LastPrice = &p
},
"last price must be positive: -1.000000000000000000",
},
{
"",
func(pair *types.Pair) {
pair.CurrentBatchId = 0
},
"current batch id must not be 0",
},
} {
t.Run(tc.name, func(t *testing.T) {
pair := types.NewPair(1, "denom1", "denom2")
tc.malleate(&pair)
err := pair.Validate()
if tc.expectedErr == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, tc.expectedErr)
}
})
}
}
func TestPairEscrowAddress(t *testing.T) {
for _, tc := range []struct {
pairId uint64
expected string
}{
{1, "cosmos17u9nx0h9cmhypp6cg9lf4q8ku9l3k8mz232su7m28m39lkz25dgqzkypxs"},
{2, "cosmos1dsm56ejte5wsvptgtlq8qy3qvw6vpgz8w3z77f7cyjkmayzq3fxsdtsn2d"},
} {
t.Run("", func(t *testing.T) {
require.Equal(t, tc.expected, types.PairEscrowAddress(tc.pairId).String())
})
}
}
| TestPair_Validate |
main.go | package main
import (
"fmt"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
_ "github.com/grpc-ecosystem/go-grpc-middleware/auth"
grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
"gome/api"
"gome/engine"
rpc "gome/grpc"
"gome/request"
"gome/utils"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"log"
)
func main() | {
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
rpcListener := rpc.NewRpcListener()
listener := rpcListener.Listener
rpcServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_ctxtags.StreamServerInterceptor(),
grpc_opentracing.StreamServerInterceptor(),
//grpc_prometheus.StreamServerInterceptor,
//grpc_zap.StreamServerInterceptor(ZapInterceptor()),
grpc_zap.StreamServerInterceptor(utils.ZapFileInterceptor()),
//grpc_auth.StreamServerInterceptor(myAuthFunction),
grpc_recovery.StreamServerInterceptor(utils.RecoveryInterceptor()),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(),
grpc_opentracing.UnaryServerInterceptor(),
//grpc_prometheus.UnaryServerInterceptor,
//grpc_zap.UnaryServerInterceptor(utils.ZapFileInterceptor(), request.GetOption()),
request.UnaryServerInterceptor(utils.ZapFileInterceptor()),
//grpc_auth.UnaryServerInterceptor(myAuthFunction),
grpc_recovery.UnaryServerInterceptor(utils.RecoveryInterceptor()),
)),
)
api.RegisterOrderServer(rpcServer, &engine.Order{})
api.RegisterPoolServer(rpcServer, &engine.Pool{})
reflection.Register(rpcServer)
if err := rpcServer.Serve(listener); err != nil {
log.Println("错误:", err)
log.Fatalln("服务启动失败")
}
}
|
|
kdtree.py | # -*- coding:utf-8 -*-
import copy
import numpy as np
from scipy._lib.six import xrange
class KDTree:
def __init__(self, bucket_size, dimensions, parent=None):
self.bucket_size = bucket_size
self.parent = None
self.left = None
self.right = None
self.split_dimension = None
self.split_value = None
self.index_locations = []
self.location_count = 0
self.min_limit = [np.Inf] * dimensions
self.max_limit = [-np.Inf] * dimensions
self.dimensions = dimensions
def get_leaf(self, location):
if not self.left and not self.right:
return self
elif location[self.split_dimension] <= self.split_value:
return self.left.get_leaf(location)
else:
return self.right.get_leaf(location)
def add_point(self, index_location_tuple):
self.index_locations.append(index_location_tuple)
self.location_count += 1
self.extendBounds(index_location_tuple[1])
self.min_boundary = copy.deepcopy(self.min_limit)
self.max_boundary = copy.deepcopy(self.max_limit)
def extendBounds(self, location):
# empty
if self.min_limit == None:
self.min_limit = copy.deepcopy(location)
self.max_limit = copy.deepcopy(location)
return
for i in xrange(self.dimensions):
self.min_limit[i] = min(self.min_limit[i], location[i])
self.max_limit[i] = max(self.max_limit[i], location[i])
def findWidestAxis(self):
widths = [self.max_limit[i] - self.min_limit[i] for i in range(self.dimensions)]
widest_axis = np.argmax(widths)
return widest_axis
def getNodes(self):
nodes = []
self.getNodesHelper(nodes)
return nodes
def getNodesHelper(self, nodes):
nodes.append(self)
if self.left:
self.left.getNodesHelper(nodes)
if self.right:
self.right.getNodesHelper(nodes)
def getLeaves(self):
leaves = []
self.getLeavesHelper(leaves)
return leaves
def getLeavesHelper(self, leaves):
if not self.right and not self.left:
leaves.append(self)
else:
if self.left:
self.left.getLeavesHelper(leaves)
if self.right:
self.right.getLeavesHelper(leaves)
def balance(self):
self.nodeSplit(self)
def nodeSplit(self, cursor, empty_non_leaf=True):
if cursor.location_count > cursor.bucket_size:
cursor.split_dimension = cursor.findWidestAxis()
# the partition method is the median of all values in the widest dimension
cursor.split_value = np.median([cursor.index_locations[i][1][cursor.split_dimension] for i in range(cursor.location_count)])
# if width is 0 (all the values are the same) don't partition
if cursor.min_limit[cursor.split_dimension] == cursor.max_limit[cursor.split_dimension]:
return
# Don't let the split value be the same as the upper value as
# can happen due to rounding errors!
if cursor.split_value == cursor.max_limit[cursor.split_dimension]:
cursor.split_value = cursor.min_limit[cursor.split_dimension]
cursor.left = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)
cursor.right = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)
cursor.left.min_boundary = copy.deepcopy(cursor.min_boundary)
cursor.left.max_boundary = copy.deepcopy(cursor.max_boundary)
cursor.right.min_boundary = copy.deepcopy(cursor.min_boundary)
cursor.right.max_boundary = copy.deepcopy(cursor.max_boundary)
cursor.left.max_boundary[cursor.split_dimension] = cursor.split_value
cursor.right.min_boundary[cursor.split_dimension] = cursor.split_value
for index_loc in cursor.index_locations:
if index_loc[1][cursor.split_dimension] > cursor.split_value:
cursor.right.index_locations.append(index_loc)
cursor.right.location_count += 1
cursor.right.extendBounds(index_loc[1])
else:
cursor.left.index_locations.append(index_loc)
cursor.left.location_count += 1
cursor.left.extendBounds(index_loc[1])
if empty_non_leaf:
cursor.index_locations = []
cursor.nodeSplit(cursor.left)
cursor.nodeSplit(cursor.right)
class KDTreeClustering:
def __init__(self, bucket_size=10):
self.bucket_size = bucket_size
self.is_fitted = False
def | (self, X):
# X is an array
if hasattr(X, 'shape'):
n_samples = X.shape[0]
dimensions = X.shape[1]
else:
n_samples = len(X)
dimensions = len(X[0])
self.kdtree = KDTree(bucket_size=self.bucket_size, dimensions=dimensions, parent=None)
for i in xrange(n_samples):
self.kdtree.add_point((i, X[i]))
self.kdtree.nodeSplit(cursor=self.kdtree, empty_non_leaf=True)
self.clusters = [leave.index_locations for leave in self.kdtree.getLeaves()]
clusters = [cluster.index_locations for cluster in self.kdtree.getLeaves()]
results = np.zeros((n_samples,), dtype=int)
for i, id_locs in enumerate(clusters):
for id, l in id_locs:
results[id] = i
self.clusters = results
self.num_clusters = len(clusters)
self.is_fitted = True
def get_clusters(self):
if self.is_fitted:
return self.clusters
if __name__ == '__main__':
# tree = KDTree(300, 2)
import params
import geolocate
geolocate.initialize(granularity=params.BUCKET_SIZE, write=False, readText=True, reload_init=False, regression=False)
locations = [geolocate.locationStr2Float(loc) for loc in params.trainUsers.values()]
clusterer = KDTreeClustering(bucket_size=params.BUCKET_SIZE)
clusterer.fit(locations)
clusters = clusterer.get_clusters()
| fit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.