file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
jmap.rs | use crate::{
errors::*,
objects::{AutoLocal, JMethodID, JObject},
signature::{JavaType, Primitive},
JNIEnv,
};
/// Wrapper for JObjects that implement `java/util/Map`. Provides methods to get
/// and set entries and a way to iterate over key/value pairs.
///
/// Looks up the class and method ids on creation rather than for every method
/// call.
pub struct JMap<'a: 'b, 'b> {
internal: JObject<'a>,
class: AutoLocal<'a, 'b>,
get: JMethodID<'a>,
put: JMethodID<'a>,
remove: JMethodID<'a>,
env: &'b JNIEnv<'a>,
}
impl<'a: 'b, 'b> ::std::ops::Deref for JMap<'a, 'b> {
type Target = JObject<'a>;
fn deref(&self) -> &Self::Target {
&self.internal
}
}
impl<'a: 'b, 'b> From<JMap<'a, 'b>> for JObject<'a> {
fn from(other: JMap<'a, 'b>) -> JObject<'a> {
other.internal
}
}
impl<'a: 'b, 'b> JMap<'a, 'b> {
/// Create a map from the environment and an object. This looks up the
/// necessary class and method ids to call all of the methods on it so that
/// exra work doesn't need to be done on every method call.
pub fn from_env(env: &'b JNIEnv<'a>, obj: JObject<'a>) -> Result<JMap<'a, 'b>> {
let class = env.auto_local(env.find_class("java/util/Map")?.into());
let get = env.get_method_id(&class, "get", "(Ljava/lang/Object;)Ljava/lang/Object;")?;
let put = env.get_method_id(
&class,
"put",
"(Ljava/lang/Object;Ljava/lang/Object;\
)Ljava/lang/Object;",
)?;
let remove =
env.get_method_id(&class, "remove", "(Ljava/lang/Object;)Ljava/lang/Object;")?;
Ok(JMap {
internal: obj,
class,
get,
put,
remove,
env,
})
}
/// Look up the value for a key. Returns `Some` if it's found and `None` if
/// a null pointer would be returned.
pub fn get(&self, key: JObject<'a>) -> Result<Option<JObject<'a>>> {
let result = self.env.call_method_unchecked(
self.internal,
self.get,
JavaType::Object("java/lang/Object".into()),
&[key.into()],
);
match result {
Ok(val) => Ok(Some(val.l()?)),
Err(e) => match *e.kind() {
ErrorKind::NullPtr(_) => Ok(None),
_ => Err(e),
},
}
}
/// Look up the value for a key. Returns `Some` with the old value if the
/// key already existed and `None` if it's a new key.
pub fn put(&self, key: JObject<'a>, value: JObject<'a>) -> Result<Option<JObject<'a>>> {
let result = self.env.call_method_unchecked(
self.internal,
self.put,
JavaType::Object("java/lang/Object".into()),
&[key.into(), value.into()],
);
match result {
Ok(val) => Ok(Some(val.l()?)),
Err(e) => match *e.kind() {
ErrorKind::NullPtr(_) => Ok(None),
_ => Err(e),
},
}
}
/// Remove a value from the map. Returns `Some` with the removed value and
/// `None` if there was no value for the key.
pub fn remove(&self, key: JObject<'a>) -> Result<Option<JObject<'a>>> {
let result = self.env.call_method_unchecked(
self.internal,
self.remove,
JavaType::Object("java/lang/Object".into()),
&[key.into()],
);
match result {
Ok(val) => Ok(Some(val.l()?)),
Err(e) => match *e.kind() {
ErrorKind::NullPtr(_) => Ok(None),
_ => Err(e),
},
}
}
/// Get key/value iterator for the map. This is done by getting the
/// `EntrySet` from java and iterating over it.
pub fn iter(&self) -> Result<JMapIter<'a, 'b, '_>> {
let iter_class = self
.env
.auto_local(self.env.find_class("java/util/Iterator")?.into());
let has_next = self.env.get_method_id(&iter_class, "hasNext", "()Z")?;
let next = self
.env
.get_method_id(&iter_class, "next", "()Ljava/lang/Object;")?;
let entry_class = self
.env
.auto_local(self.env.find_class("java/util/Map$Entry")?.into());
let get_key = self
.env
.get_method_id(&entry_class, "getKey", "()Ljava/lang/Object;")?;
let get_value = self
.env
.get_method_id(&entry_class, "getValue", "()Ljava/lang/Object;")?;
// Get the iterator over Map entries.
// Use the local frame till #109 is resolved, so that implicitly looked-up
// classes are freed promptly.
let iter = self.env.with_local_frame(16, || {
let entry_set = self
.env
.call_method_unchecked(
self.internal,
(&self.class, "entrySet", "()Ljava/util/Set;"),
JavaType::Object("java/util/Set".into()),
&[],
)?
.l()?;
let iter = self
.env
.call_method_unchecked(
entry_set,
("java/util/Set", "iterator", "()Ljava/util/Iterator;"),
JavaType::Object("java/util/Iterator".into()),
&[],
)?
.l()?;
Ok(iter)
})?;
let iter = self.env.auto_local(iter);
Ok(JMapIter {
map: &self,
has_next,
next,
get_key,
get_value,
iter,
})
}
}
/// An iterator over the keys and values in a map.
///
/// TODO: make the iterator implementation for java iterators its own thing
/// and generic enough to use elsewhere.
pub struct JMapIter<'a, 'b, 'c> {
map: &'c JMap<'a, 'b>,
has_next: JMethodID<'a>,
next: JMethodID<'a>,
get_key: JMethodID<'a>,
get_value: JMethodID<'a>,
iter: AutoLocal<'a, 'b>,
}
impl<'a: 'b, 'b: 'c, 'c> JMapIter<'a, 'b, 'c> {
fn | (&self) -> Result<Option<(JObject<'a>, JObject<'a>)>> {
let iter = self.iter.as_obj();
let has_next = self
.map
.env
.call_method_unchecked(
iter,
self.has_next,
JavaType::Primitive(Primitive::Boolean),
&[],
)?
.z()?;
if !has_next {
return Ok(None);
}
let next = self
.map
.env
.call_method_unchecked(
iter,
self.next,
JavaType::Object("java/util/Map$Entry".into()),
&[],
)?
.l()?;
let key = self
.map
.env
.call_method_unchecked(
next,
self.get_key,
JavaType::Object("java/lang/Object".into()),
&[],
)?
.l()?;
let value = self
.map
.env
.call_method_unchecked(
next,
self.get_value,
JavaType::Object("java/lang/Object".into()),
&[],
)?
.l()?;
Ok(Some((key, value)))
}
}
impl<'a: 'b, 'b: 'c, 'c> Iterator for JMapIter<'a, 'b, 'c> {
type Item = (JObject<'a>, JObject<'a>);
fn next(&mut self) -> Option<Self::Item> {
match self.get_next() {
Ok(Some(n)) => Some(n),
_ => None,
}
}
}
| get_next |
admin.py | from django.contrib import admin
from .models import Cart, CartItem
class CartItemInline(admin.TabularInline):
model = CartItem
| class CartAdmin(admin.ModelAdmin):
inlines = [CartItemInline] | @admin.register(Cart) |
renderContentTypeId.ts | import { upperFirst, camelCase } from "lodash"
export default function | (contentTypeId: string): string {
return "I" + upperFirst(camelCase(contentTypeId))
}
| renderContentTypeId |
commitment_service.rs | use crate::{consensus::Stake, rpc_subscriptions::RpcSubscriptions};
use panoptis_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_runtime::{
bank::Bank,
commitment::{BlockCommitment, BlockCommitmentCache, CommitmentSlots, VOTE_THRESHOLD_SIZE},
};
use solana_sdk::clock::Slot;
use solana_vote_program::vote_state::VoteState;
use std::{
cmp::max,
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender},
sync::{Arc, RwLock},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub struct CommitmentAggregationData {
bank: Arc<Bank>,
root: Slot,
total_stake: Stake,
}
impl CommitmentAggregationData {
pub fn new(bank: Arc<Bank>, root: Slot, total_stake: Stake) -> Self {
Self {
bank,
root,
total_stake,
}
}
}
fn get_highest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
let mut stake_sum = 0;
for (root, stake) in rooted_stake {
stake_sum += stake;
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
return root;
}
}
0
}
pub struct AggregateCommitmentService {
t_commitment: JoinHandle<()>,
}
impl AggregateCommitmentService {
pub fn new(
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
subscriptions: Arc<RpcSubscriptions>,
) -> (Sender<CommitmentAggregationData>, Self) {
let (sender, receiver): (
Sender<CommitmentAggregationData>,
Receiver<CommitmentAggregationData>,
) = channel();
let exit_ = exit.clone();
(
sender,
Self {
t_commitment: Builder::new()
.name("solana-aggregate-stake-lockouts".to_string())
.spawn(move || loop {
if exit_.load(Ordering::Relaxed) {
break;
}
if let Err(RecvTimeoutError::Disconnected) =
Self::run(&receiver, &block_commitment_cache, &subscriptions, &exit_)
{
break;
}
})
.unwrap(),
},
)
}
fn run(
receiver: &Receiver<CommitmentAggregationData>,
block_commitment_cache: &RwLock<BlockCommitmentCache>,
subscriptions: &Arc<RpcSubscriptions>,
exit: &Arc<AtomicBool>,
) -> Result<(), RecvTimeoutError> {
loop {
if exit.load(Ordering::Relaxed) {
return Ok(());
}
let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?;
while let Ok(new_data) = receiver.try_recv() {
aggregation_data = new_data;
}
let ancestors = aggregation_data.bank.status_cache_ancestors();
if ancestors.is_empty() {
continue;
}
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
let update_commitment_slots =
Self::update_commitment_cache(block_commitment_cache, aggregation_data, ancestors);
aggregate_commitment_time.stop();
datapoint_info!(
"block-commitment-cache",
(
"aggregate-commitment-ms",
aggregate_commitment_time.as_ms() as i64,
i64
),
(
"highest-confirmed-root",
update_commitment_slots.highest_confirmed_root as i64,
i64
),
(
"highest-confirmed-slot",
update_commitment_slots.highest_confirmed_slot as i64,
i64
),
);
// Triggers rpc_subscription notifications as soon as new commitment data is available,
// sending just the commitment cache slot information that the notifications thread
// needs
subscriptions.notify_subscribers(update_commitment_slots);
}
}
fn update_commitment_cache(
block_commitment_cache: &RwLock<BlockCommitmentCache>,
aggregation_data: CommitmentAggregationData,
ancestors: Vec<u64>,
) -> CommitmentSlots {
let (block_commitment, rooted_stake) =
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
let highest_confirmed_root =
get_highest_confirmed_root(rooted_stake, aggregation_data.total_stake);
let mut new_block_commitment = BlockCommitmentCache::new(
block_commitment,
aggregation_data.total_stake,
CommitmentSlots {
slot: aggregation_data.bank.slot(),
root: aggregation_data.root,
highest_confirmed_slot: aggregation_data.root,
highest_confirmed_root,
},
);
let highest_confirmed_slot = new_block_commitment.calculate_highest_confirmed_slot();
new_block_commitment.set_highest_confirmed_slot(highest_confirmed_slot);
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
let highest_confirmed_root = max(
new_block_commitment.highest_confirmed_root(),
w_block_commitment_cache.highest_confirmed_root(),
);
new_block_commitment.set_highest_confirmed_root(highest_confirmed_root);
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
w_block_commitment_cache.commitment_slots()
}
pub fn aggregate_commitment(
ancestors: &[Slot],
bank: &Bank,
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
assert!(!ancestors.is_empty());
// Check ancestors is sorted
for a in ancestors.windows(2) {
assert!(a[0] < a[1]);
}
let mut commitment = HashMap::new();
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
if lamports == 0 {
continue;
}
if let Ok(vote_state) = account.vote_state().as_ref() {
Self::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
vote_state,
ancestors,
lamports,
);
}
}
(commitment, rooted_stake)
}
fn aggregate_commitment_for_vote_account(
commitment: &mut HashMap<Slot, BlockCommitment>,
rooted_stake: &mut Vec<(Slot, u64)>,
vote_state: &VoteState,
ancestors: &[Slot],
lamports: u64,
) {
assert!(!ancestors.is_empty());
let mut ancestors_index = 0;
if let Some(root) = vote_state.root_slot {
for (i, a) in ancestors.iter().enumerate() {
if *a <= root {
commitment
.entry(*a)
.or_insert_with(BlockCommitment::default)
.increase_rooted_stake(lamports);
} else {
ancestors_index = i;
break;
}
}
rooted_stake.push((root, lamports));
}
for vote in &vote_state.votes {
while ancestors[ancestors_index] <= vote.slot {
commitment
.entry(ancestors[ancestors_index])
.or_insert_with(BlockCommitment::default)
.increase_confirmation_stake(vote.confirmation_count as usize, lamports);
ancestors_index += 1;
if ancestors_index == ancestors.len() {
return;
}
}
}
}
pub fn join(self) -> thread::Result<()> {
self.t_commitment.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use panoptis_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_runtime::{
accounts_background_service::AbsRequestSender,
bank_forks::BankForks,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
};
use solana_sdk::{account::Account, pubkey::Pubkey, signature::Signer};
use solana_stake_program::stake_state;
use solana_vote_program::{
vote_state::{self, VoteStateVersions},
vote_transaction,
};
#[test]
fn test_get_highest_confirmed_root() {
assert_eq!(get_highest_confirmed_root(vec![], 10), 0);
let rooted_stake = vec![(0, 5), (1, 5)];
assert_eq!(get_highest_confirmed_root(rooted_stake, 10), 0);
let rooted_stake = vec![(1, 5), (0, 10), (2, 5), (1, 4)];
assert_eq!(get_highest_confirmed_root(rooted_stake, 10), 1);
}
#[test]
fn test_aggregate_commitment_for_vote_account_1() {
let ancestors = vec![3, 4, 5, 7, 9, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
let root = *ancestors.last().unwrap();
vote_state.root_slot = Some(root);
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
);
for a in ancestors {
let mut expected = BlockCommitment::default();
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
fn test_aggregate_commitment_for_vote_account_2() {
let ancestors = vec![3, 4, 5, 7, 9, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
let root = ancestors[2];
vote_state.root_slot = Some(root);
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
);
for a in ancestors {
let mut expected = BlockCommitment::default();
if a <= root {
expected.increase_rooted_stake(lamports);
} else {
expected.increase_confirmation_stake(1, lamports);
}
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
fn test_aggregate_commitment_for_vote_account_3() {
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
let mut commitment = HashMap::new();
let mut rooted_stake = vec![];
let lamports = 5;
let mut vote_state = VoteState::default();
let root = ancestors[2];
vote_state.root_slot = Some(root);
assert!(ancestors[4] + 2 >= ancestors[6]);
vote_state.process_slot_vote_unchecked(ancestors[4]);
vote_state.process_slot_vote_unchecked(ancestors[6]);
AggregateCommitmentService::aggregate_commitment_for_vote_account(
&mut commitment,
&mut rooted_stake,
&vote_state,
&ancestors,
lamports,
);
for (i, a) in ancestors.iter().enumerate() {
if *a <= root | else if i <= 4 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(2, lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if i <= 6 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(1, lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
}
}
assert_eq!(rooted_stake[0], (root, lamports));
}
#[test]
fn test_aggregate_commitment_validity() {
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(10_000);
let rooted_stake_amount = 40;
let sk1 = solana_sdk::pubkey::new_rand();
let pk1 = solana_sdk::pubkey::new_rand();
let mut vote_account1 =
vote_state::create_account(&pk1, &solana_sdk::pubkey::new_rand(), 0, 100);
let stake_account1 =
stake_state::create_account(&sk1, &pk1, &vote_account1, &genesis_config.rent, 100);
let sk2 = solana_sdk::pubkey::new_rand();
let pk2 = solana_sdk::pubkey::new_rand();
let mut vote_account2 =
vote_state::create_account(&pk2, &solana_sdk::pubkey::new_rand(), 0, 50);
let stake_account2 =
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
let sk3 = solana_sdk::pubkey::new_rand();
let pk3 = solana_sdk::pubkey::new_rand();
let mut vote_account3 =
vote_state::create_account(&pk3, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account3 = stake_state::create_account(
&sk3,
&pk3,
&vote_account3,
&genesis_config.rent,
rooted_stake_amount,
);
let sk4 = solana_sdk::pubkey::new_rand();
let pk4 = solana_sdk::pubkey::new_rand();
let mut vote_account4 =
vote_state::create_account(&pk4, &solana_sdk::pubkey::new_rand(), 0, 1);
let stake_account4 = stake_state::create_account(
&sk4,
&pk4,
&vote_account4,
&genesis_config.rent,
rooted_stake_amount,
);
genesis_config.accounts.extend(
vec![
(pk1, vote_account1.clone()),
(sk1, stake_account1),
(pk2, vote_account2.clone()),
(sk2, stake_account2),
(pk3, vote_account3.clone()),
(sk3, stake_account3),
(pk4, vote_account4.clone()),
(sk4, stake_account4),
]
.into_iter()
.map(|(key, account)| (key, Account::from(account))),
);
// Create bank
let bank = Arc::new(Bank::new(&genesis_config));
let mut vote_state1 = VoteState::from(&vote_account1).unwrap();
vote_state1.process_slot_vote_unchecked(3);
vote_state1.process_slot_vote_unchecked(5);
let versioned = VoteStateVersions::new_current(vote_state1);
VoteState::to(&versioned, &mut vote_account1).unwrap();
bank.store_account(&pk1, &vote_account1);
let mut vote_state2 = VoteState::from(&vote_account2).unwrap();
vote_state2.process_slot_vote_unchecked(9);
vote_state2.process_slot_vote_unchecked(10);
let versioned = VoteStateVersions::new_current(vote_state2);
VoteState::to(&versioned, &mut vote_account2).unwrap();
bank.store_account(&pk2, &vote_account2);
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
vote_state3.root_slot = Some(1);
let versioned = VoteStateVersions::new_current(vote_state3);
VoteState::to(&versioned, &mut vote_account3).unwrap();
bank.store_account(&pk3, &vote_account3);
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
vote_state4.root_slot = Some(2);
let versioned = VoteStateVersions::new_current(vote_state4);
VoteState::to(&versioned, &mut vote_account4).unwrap();
bank.store_account(&pk4, &vote_account4);
let (commitment, rooted_stake) =
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
for a in ancestors {
if a <= 3 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(2, 150);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if a <= 5 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(1, 100);
expected.increase_confirmation_stake(2, 50);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if a <= 9 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(2, 50);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else if a <= 10 {
let mut expected = BlockCommitment::default();
expected.increase_confirmation_stake(1, 50);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} else {
assert!(commitment.get(&a).is_none());
}
}
assert_eq!(rooted_stake.len(), 2);
assert_eq!(get_highest_confirmed_root(rooted_stake, 100), 1)
}
#[test]
fn test_highest_confirmed_root_advance() {
fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Arc<Bank>) -> Slot {
let (_stake, vote_account) = bank.get_vote_account(&vote_pubkey).unwrap();
let slot = vote_account
.vote_state()
.as_ref()
.unwrap()
.root_slot
.unwrap();
slot
}
let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests());
let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand();
let validator_keypairs = vec![&validator_vote_keypairs];
let GenesisConfigInfo {
genesis_config,
mint_keypair: _,
voting_keypair: _,
} = create_genesis_config_with_vote_accounts(
1_000_000_000,
&validator_keypairs,
vec![100; 1],
);
let bank0 = Bank::new(&genesis_config);
let mut bank_forks = BankForks::new(bank0);
// Fill bank_forks with banks with votes landing in the next slot
// Create enough banks such that vote account will root slots 0 and 1
for x in 0..33 {
let previous_bank = bank_forks.get(x).unwrap();
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
let vote = vote_transaction::new_vote_transaction(
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
bank_forks.insert(bank);
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
for x in 0..root {
bank_forks.set_root(x, &AbsRequestSender::default(), None);
}
// Add an additional bank/vote that will root slot 2
let bank33 = bank_forks.get(33).unwrap();
let bank34 = Bank::new_from_parent(bank33, &Pubkey::default(), 34);
let vote33 = vote_transaction::new_vote_transaction(
vec![33],
bank33.hash(),
bank33.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank34.process_transaction(&vote33).unwrap();
bank_forks.insert(bank34);
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 0,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
bank_forks.set_root(
root,
&AbsRequestSender::default(),
Some(highest_confirmed_root),
);
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
// Add a forked bank. Because the vote for bank 33 landed in the non-ancestor, the vote
// account's root (and thus the highest_confirmed_root) rolls back to slot 1
let bank33 = bank_forks.get(33).unwrap();
let bank35 = Bank::new_from_parent(bank33, &Pubkey::default(), 35);
bank_forks.insert(bank35);
let working_bank = bank_forks.working_bank();
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 1,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
// Add additional banks beyond lockout built on the new fork to ensure that behavior
// continues normally
for x in 35..=37 {
let previous_bank = bank_forks.get(x).unwrap();
let bank = Bank::new_from_parent(previous_bank, &Pubkey::default(), x + 1);
let vote = vote_transaction::new_vote_transaction(
vec![x],
previous_bank.hash(),
previous_bank.last_blockhash(),
&validator_vote_keypairs.node_keypair,
&validator_vote_keypairs.vote_keypair,
&validator_vote_keypairs.vote_keypair,
None,
);
bank.process_transaction(&vote).unwrap();
bank_forks.insert(bank);
}
let working_bank = bank_forks.working_bank();
let root = get_vote_account_root_slot(
validator_vote_keypairs.vote_keypair.pubkey(),
&working_bank,
);
let ancestors = working_bank.status_cache_ancestors();
let _ = AggregateCommitmentService::update_commitment_cache(
&block_commitment_cache,
CommitmentAggregationData {
bank: working_bank,
root: 0,
total_stake: 100,
},
ancestors,
);
let highest_confirmed_root = block_commitment_cache
.read()
.unwrap()
.highest_confirmed_root();
bank_forks.set_root(
root,
&AbsRequestSender::default(),
Some(highest_confirmed_root),
);
let highest_confirmed_root_bank = bank_forks.get(highest_confirmed_root);
assert!(highest_confirmed_root_bank.is_some());
}
}
| {
let mut expected = BlockCommitment::default();
expected.increase_rooted_stake(lamports);
assert_eq!(*commitment.get(&a).unwrap(), expected);
} |
provider.go | package providers
import (
"context"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/config"
"github.com/rancher/rancher/pkg/auth/providers/github"
"github.com/rancher/rancher/pkg/auth/providers/local"
)
//Providers map
var providers map[string]PrincipalProvider
var providerOrderList []string
func | () {
providerOrderList = []string{"github", "local"}
providers = make(map[string]PrincipalProvider)
}
//PrincipalProvider interfacse defines what methods an identity provider should implement
type PrincipalProvider interface {
GetName() string
AuthenticateUser(jsonInput v3.LoginInput) (v3.Principal, []v3.Principal, map[string]string, int, error)
SearchPrincipals(name string, myToken v3.Token) ([]v3.Principal, int, error)
}
func Configure(ctx context.Context, mgmtCtx *config.ManagementContext) {
for _, providerName := range providerOrderList {
if _, exists := providers[providerName]; !exists {
switch providerName {
case "local":
providers[providerName] = local.Configure(ctx, mgmtCtx)
case "github":
providers[providerName] = github.Configure(ctx, mgmtCtx)
}
}
}
}
func AuthenticateUser(jsonInput v3.LoginInput) (v3.Principal, []v3.Principal, map[string]string, int, error) {
var groupPrincipals []v3.Principal
var userPrincipal v3.Principal
var providerInfo = make(map[string]string)
var status int
var err error
var providerName string
if jsonInput.GithubCredential.Code != "" {
providerName = "github"
} else if jsonInput.LocalCredential.Username != "" {
providerName = "local"
}
userPrincipal, groupPrincipals, providerInfo, status, err = providers[providerName].AuthenticateUser(jsonInput)
return userPrincipal, groupPrincipals, providerInfo, status, err
}
func SearchPrincipals(name string, myToken v3.Token) ([]v3.Principal, int, error) {
principals := make([]v3.Principal, 0)
var status int
var err error
principals, status, err = providers[myToken.AuthProvider].SearchPrincipals(name, myToken)
return principals, status, err
}
| init |
coercion.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .validators import is_item_iterable
def coerce_sequence_of_tuple(sequence):
"""Make sure all items of a sequence are of type tuple.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[tuple]
A list containing the items of the original sequence,
with each iterable item converted to a tuple,
and non-iterable items wrapped in a tuple.
Examples
--------
>>> items = coerce_sequence_of_tuple(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_tuple(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, tuple):
if not is_item_iterable(item):
|
else:
item = tuple(item)
items.append(item)
return items
def coerce_sequence_of_list(sequence):
"""Make sure all items of a sequence are of type list.
Parameters
----------
sequence : sequence
A sequence of items.
Returns
-------
list[list]
A list containing the items of the original sequence,
with each iterable item converted to a list,
and non-iterable items wrapped in a list.
Examples
--------
>>> items = coerce_sequence_of_list(['a', 1, (None, ), [2.0, 3.0]])
>>> is_sequence_of_list(items)
True
"""
items = []
for item in sequence:
if not isinstance(item, list):
if not is_item_iterable(item):
item = [item]
else:
item = list(item)
items.append(item)
return items
| item = (item, ) |
wireless_settings.py | class WirelessSettings(object):
def __init__(self, session):
super(WirelessSettings, self).__init__()
self._session = session
def getNetworkWirelessSettings(self, networkId: str):
|
def updateNetworkWirelessSettings(self, networkId: str, **kwargs):
"""
**Update the wireless settings for a network**
https://developer.cisco.com/docs/meraki-api-v0/#!update-network-wireless-settings
- networkId (string)
- meshingEnabled (boolean): Toggle for enabling or disabling meshing in a network
- ipv6BridgeEnabled (boolean): Toggle for enabling or disabling IPv6 bridging in a network (Note: if enabled, SSIDs must also be configured to use bridge mode)
- locationAnalyticsEnabled (boolean): Toggle for enabling or disabling location analytics for your network
- ledLightsOn (boolean): Toggle for enabling or disabling LED lights on all APs in the network (making them run dark)
"""
kwargs.update(locals())
metadata = {
'tags': ['Wireless settings'],
'operation': 'updateNetworkWirelessSettings',
}
resource = f'/networks/{networkId}/wireless/settings'
body_params = ['meshingEnabled', 'ipv6BridgeEnabled', 'locationAnalyticsEnabled', 'ledLightsOn']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
| """
**Return the wireless settings for a network**
https://developer.cisco.com/docs/meraki-api-v0/#!get-network-wireless-settings
- networkId (string)
"""
metadata = {
'tags': ['Wireless settings'],
'operation': 'getNetworkWirelessSettings',
}
resource = f'/networks/{networkId}/wireless/settings'
return self._session.get(metadata, resource) |
role.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internalversion
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
api "k8s.io/kubernetes/pkg/api"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
)
// RolesGetter has a method to return a RoleInterface.
// A group's client should implement this interface.
type RolesGetter interface {
Roles(namespace string) RoleInterface
}
// RoleInterface has methods to work with Role resources.
type RoleInterface interface {
Create(*rbac.Role) (*rbac.Role, error)
Update(*rbac.Role) (*rbac.Role, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*rbac.Role, error)
List(opts v1.ListOptions) (*rbac.RoleList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error)
RoleExpansion
}
// roles implements RoleInterface
type roles struct {
client rest.Interface
ns string
}
// newRoles returns a Roles
func | (c *RbacClient, namespace string) *roles {
return &roles{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any.
func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) {
result = &rbac.Role{}
err = c.client.Post().
Namespace(c.ns).
Resource("roles").
Body(role).
Do().
Into(result)
return
}
// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) {
result = &rbac.Role{}
err = c.client.Put().
Namespace(c.ns).
Resource("roles").
Name(role.Name).
Body(role).
Do().
Into(result)
return
}
// Delete takes name of the role and deletes it. Returns an error if one occurs.
func (c *roles) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("roles").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("roles").
VersionedParams(&listOptions, api.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
func (c *roles) Get(name string, options v1.GetOptions) (result *rbac.Role, err error) {
result = &rbac.Role{}
err = c.client.Get().
Namespace(c.ns).
Resource("roles").
Name(name).
VersionedParams(&options, api.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Roles that match those selectors.
func (c *roles) List(opts v1.ListOptions) (result *rbac.RoleList, err error) {
result = &rbac.RoleList{}
err = c.client.Get().
Namespace(c.ns).
Resource("roles").
VersionedParams(&opts, api.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested roles.
func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("roles").
VersionedParams(&opts, api.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched role.
func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbac.Role, err error) {
result = &rbac.Role{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("roles").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| newRoles |
getVirtualNetworkGatewayLearnedRoutes.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200801
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func GetVirtualNetworkGatewayLearnedRoutes(ctx *pulumi.Context, args *GetVirtualNetworkGatewayLearnedRoutesArgs, opts ...pulumi.InvokeOption) (*GetVirtualNetworkGatewayLearnedRoutesResult, error) {
var rv GetVirtualNetworkGatewayLearnedRoutesResult
err := ctx.Invoke("azure-native:network/v20200801:getVirtualNetworkGatewayLearnedRoutes", args, &rv, opts...)
if err != nil |
return &rv, nil
}
type GetVirtualNetworkGatewayLearnedRoutesArgs struct {
ResourceGroupName string `pulumi:"resourceGroupName"`
VirtualNetworkGatewayName string `pulumi:"virtualNetworkGatewayName"`
}
// List of virtual network gateway routes.
type GetVirtualNetworkGatewayLearnedRoutesResult struct {
Value []GatewayRouteResponse `pulumi:"value"`
}
| {
return nil, err
} |
data_commands.py | import logging
import sys
import yfinance
import pandas as pd
import yfinance as yf
import os
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict, List
from finrl.config import TimeRange, setup_utils_configuration
from finrl.data.converter import convert_ohlcv_format, convert_trades_format
from finrl.data.history import (convert_trades_to_ohlcv, refresh_backtest_ohlcv_data,
refresh_backtest_trades_data)
from finrl.exceptions import OperationalException
from finrl.exchange import timeframe_to_minutes
from finrl.resolvers import ExchangeResolver
from finrl.state import RunMode
logger = logging.getLogger(__name__)
def start_download_cryptodata(args: Dict[str, Any]) -> None:
"""
Parameters:
ARGS_DOWNLOAD_DATA = {'config': ['config.json'], 'datadir': None,
'user_data_dir': None, 'pairs': None, 'pairs_file': None,
'days': 160, 'timerange': None,
'download_trades': False, 'exchange': 'binance',
'timeframes': ['1d'], 'erase': False,
'dataformat_ohlcv': None, 'dataformat_trades': None}
Returns:
Json files in user_data/data/exchange/*.json
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
if 'pairs' not in config:
raise OperationalException(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
logger.info(f"About to download pairs: {config['pairs']}, "
f"intervals: {config['timeframes']} to {config['datadir']}")
pairs_not_available: List[str] = []
# Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
# Manual validations of relevant settings
exchange.validate_pairs(config['pairs'])
for timeframe in config['timeframes']:
exchange.validate_timeframes(timeframe)
try:
if config.get('download_trades'):
pairs_not_available = refresh_backtest_trades_data(
exchange, pairs=config['pairs'], datadir=config['datadir'],
timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_trades'])
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
)
else:
pairs_not_available = refresh_backtest_ohlcv_data(
exchange, pairs=config['pairs'], timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format=config['dataformat_ohlcv'])
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
finally:
if pairs_not_available:
logger.info(f"Pairs [{','.join(pairs_not_available)}] not available "
f"on exchange {exchange.name}.")
def start_download_stockdata(args: Dict[str, Any]) -> None:
"""Fetches data from Yahoo API
Parameters
----------
ticker_list, timerange,
Returns
-------
Json of data
"""
args["exchange"] = "yahoo"
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
if 'days' in config and 'timerange' in config:
raise OperationalException("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
config["datadir"] = "user_data/data/yahoo"
timerange = TimeRange()
if 'days' in config:
time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d")
timerange = TimeRange.parse_timerange(f'{time_since}-')
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.now().strftime("%Y-%m-%d")
if 'timerange' in config:
timerange = timerange.parse_timerange(config['timerange'])
start = datetime.fromtimestamp(timerange.startts).strftime("%Y-%m-%d")
end = datetime.fromtimestamp(timerange.stopts).strftime("%Y-%m-%d")
try:
data_df = pd.DataFrame()
for tic in config['ticker_list']:
temp_df = yf.download(tic, start=start, end=end)
temp_df.columns = [
"open",
"high",
"low",
"close",
"adjcp",
"volume",
]
temp_df["close"] = temp_df["adjcp"]
temp_df = temp_df.drop(["adjcp"], axis=1)
temp_df.to_json(f'{os.getcwd()}/{config["datadir"]}/{tic}.json')
except KeyboardInterrupt:
sys.exit("Interrupt received, aborting ...")
| """
Convert data from one format to another
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
convert_ohlcv_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
else:
convert_trades_format(config,
convert_from=args['format_from'], convert_to=args['format_to'],
erase=args['erase'])
def start_list_data(args: Dict[str, Any]) -> None:
"""
List available backtest data
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from tabulate import tabulate
from freqtrade.data.history.idatahandler import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
paircombs = dhc.ohlcv_get_available_data(config['datadir'])
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
groupedpair = defaultdict(list)
for pair, timeframe in sorted(paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]))):
groupedpair[pair].append(timeframe)
if groupedpair:
print(tabulate([(pair, ', '.join(timeframes)) for pair, timeframes in groupedpair.items()],
headers=("Pair", "Timeframe"),
tablefmt='psql', stralign='right')) |
def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None: |
options.py | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=redefined-builtin, too-many-arguments
import os
from collections import OrderedDict, namedtuple
import click
ConfigOptionClass = namedtuple(
"ConfigOption",
[
"scope",
"name",
"type",
"multiple",
"sysenvvar",
"buildenvvar",
"oldnames",
"default",
],
)
def ConfigOption(
scope,
name,
type=str,
multiple=False,
sysenvvar=None,
buildenvvar=None,
oldnames=None,
default=None,
):
return ConfigOptionClass(
scope, name, type, multiple, sysenvvar, buildenvvar, oldnames, default
)
def ConfigPlatformioOption(*args, **kwargs):
return ConfigOption("platformio", *args, **kwargs)
def ConfigEnvOption(*args, **kwargs):
|
ProjectOptions = OrderedDict(
[
("%s.%s" % (option.scope, option.name), option)
for option in [
#
# [platformio]
#
ConfigPlatformioOption(name="description"),
ConfigPlatformioOption(
name="default_envs",
oldnames=["env_default"],
multiple=True,
sysenvvar="PLATFORMIO_DEFAULT_ENVS",
),
ConfigPlatformioOption(name="extra_configs", multiple=True),
# Dirs
ConfigPlatformioOption(
name="core_dir",
oldnames=["home_dir"],
sysenvvar="PLATFORMIO_CORE_DIR",
default=os.path.join(os.path.expanduser("~"), ".platformio"),
),
ConfigPlatformioOption(
name="globallib_dir",
sysenvvar="PLATFORMIO_GLOBALLIB_DIR",
default=os.path.join("$PROJECT_CORE_DIR", "lib"),
),
ConfigPlatformioOption(
name="platforms_dir",
sysenvvar="PLATFORMIO_PLATFORMS_DIR",
default=os.path.join("$PROJECT_CORE_DIR", "platforms"),
),
ConfigPlatformioOption(
name="packages_dir",
sysenvvar="PLATFORMIO_PACKAGES_DIR",
default=os.path.join("$PROJECT_CORE_DIR", "packages"),
),
ConfigPlatformioOption(
name="cache_dir",
sysenvvar="PLATFORMIO_CACHE_DIR",
default=os.path.join("$PROJECT_CORE_DIR", ".cache"),
),
ConfigPlatformioOption(
name="build_cache_dir", sysenvvar="PLATFORMIO_BUILD_CACHE_DIR"
),
ConfigPlatformioOption(
name="workspace_dir",
sysenvvar="PLATFORMIO_WORKSPACE_DIR",
default=os.path.join("$PROJECT_DIR", ".pio"),
),
ConfigPlatformioOption(
name="build_dir",
sysenvvar="PLATFORMIO_BUILD_DIR",
default=os.path.join("$PROJECT_WORKSPACE_DIR", "build"),
),
ConfigPlatformioOption(
name="libdeps_dir",
sysenvvar="PLATFORMIO_LIBDEPS_DIR",
default=os.path.join("$PROJECT_WORKSPACE_DIR", "libdeps"),
),
ConfigPlatformioOption(
name="lib_dir",
sysenvvar="PLATFORMIO_LIB_DIR",
default=os.path.join("$PROJECT_DIR", "lib"),
),
ConfigPlatformioOption(
name="include_dir",
sysenvvar="PLATFORMIO_INCLUDE_DIR",
default=os.path.join("$PROJECT_DIR", "include"),
),
ConfigPlatformioOption(
name="src_dir",
sysenvvar="PLATFORMIO_SRC_DIR",
default=os.path.join("$PROJECT_DIR", "src"),
),
ConfigPlatformioOption(
name="test_dir",
sysenvvar="PLATFORMIO_TEST_DIR",
default=os.path.join("$PROJECT_DIR", "test"),
),
ConfigPlatformioOption(
name="boards_dir",
sysenvvar="PLATFORMIO_BOARDS_DIR",
default=os.path.join("$PROJECT_DIR", "boards"),
),
ConfigPlatformioOption(
name="data_dir",
sysenvvar="PLATFORMIO_DATA_DIR",
default=os.path.join("$PROJECT_DIR", "data"),
),
ConfigPlatformioOption(
name="shared_dir",
sysenvvar="PLATFORMIO_SHARED_DIR",
default=os.path.join("$PROJECT_DIR", "shared"),
),
#
# [env]
#
ConfigEnvOption(name="extends", multiple=True),
# Generic
ConfigEnvOption(name="platform", buildenvvar="PIOPLATFORM"),
ConfigEnvOption(name="platform_packages", multiple=True),
ConfigEnvOption(
name="framework", multiple=True, buildenvvar="PIOFRAMEWORK"
),
# Board
ConfigEnvOption(name="board", buildenvvar="BOARD"),
ConfigEnvOption(
name="board_build.mcu", oldnames=["board_mcu"], buildenvvar="BOARD_MCU"
),
ConfigEnvOption(
name="board_build.f_cpu",
oldnames=["board_f_cpu"],
buildenvvar="BOARD_F_CPU",
),
ConfigEnvOption(
name="board_build.f_flash",
oldnames=["board_f_flash"],
buildenvvar="BOARD_F_FLASH",
),
ConfigEnvOption(
name="board_build.flash_mode",
oldnames=["board_flash_mode"],
buildenvvar="BOARD_FLASH_MODE",
),
# Build
ConfigEnvOption(name="build_type", type=click.Choice(["release", "debug"])),
ConfigEnvOption(
name="build_flags",
multiple=True,
sysenvvar="PLATFORMIO_BUILD_FLAGS",
buildenvvar="BUILD_FLAGS",
),
ConfigEnvOption(
name="src_build_flags",
multiple=True,
sysenvvar="PLATFORMIO_SRC_BUILD_FLAGS",
buildenvvar="SRC_BUILD_FLAGS",
),
ConfigEnvOption(
name="build_unflags",
multiple=True,
sysenvvar="PLATFORMIO_BUILD_UNFLAGS",
buildenvvar="BUILD_UNFLAGS",
),
ConfigEnvOption(
name="src_filter",
multiple=True,
sysenvvar="PLATFORMIO_SRC_FILTER",
buildenvvar="SRC_FILTER",
),
ConfigEnvOption(name="targets", multiple=True),
# Upload
ConfigEnvOption(
name="upload_port",
sysenvvar="PLATFORMIO_UPLOAD_PORT",
buildenvvar="UPLOAD_PORT",
),
ConfigEnvOption(name="upload_protocol", buildenvvar="UPLOAD_PROTOCOL"),
ConfigEnvOption(
name="upload_speed", type=click.INT, buildenvvar="UPLOAD_SPEED"
),
ConfigEnvOption(
name="upload_flags",
multiple=True,
sysenvvar="PLATFORMIO_UPLOAD_FLAGS",
buildenvvar="UPLOAD_FLAGS",
),
ConfigEnvOption(
name="upload_resetmethod", buildenvvar="UPLOAD_RESETMETHOD"
),
ConfigEnvOption(name="upload_command", buildenvvar="UPLOADCMD"),
# Monitor
ConfigEnvOption(name="monitor_port"),
ConfigEnvOption(name="monitor_speed", oldnames=["monitor_baud"]),
ConfigEnvOption(name="monitor_rts", type=click.IntRange(0, 1)),
ConfigEnvOption(name="monitor_dtr", type=click.IntRange(0, 1)),
ConfigEnvOption(name="monitor_flags", multiple=True),
# Library
ConfigEnvOption(
name="lib_deps",
oldnames=["lib_use", "lib_force", "lib_install"],
multiple=True,
),
ConfigEnvOption(name="lib_ignore", multiple=True),
ConfigEnvOption(
name="lib_extra_dirs",
multiple=True,
sysenvvar="PLATFORMIO_LIB_EXTRA_DIRS",
),
ConfigEnvOption(
name="lib_ldf_mode",
type=click.Choice(["off", "chain", "deep", "chain+", "deep+"]),
),
ConfigEnvOption(
name="lib_compat_mode", type=click.Choice(["off", "soft", "strict"])
),
ConfigEnvOption(name="lib_archive", type=click.BOOL),
# Test
ConfigEnvOption(name="test_filter", multiple=True),
ConfigEnvOption(name="test_ignore", multiple=True),
ConfigEnvOption(name="test_port"),
ConfigEnvOption(name="test_speed", type=click.INT),
ConfigEnvOption(name="test_transport"),
ConfigEnvOption(name="test_build_project_src", type=click.BOOL),
# Debug
ConfigEnvOption(name="debug_tool"),
ConfigEnvOption(name="debug_init_break"),
ConfigEnvOption(name="debug_init_cmds", multiple=True),
ConfigEnvOption(name="debug_extra_cmds", multiple=True),
ConfigEnvOption(
name="debug_load_cmds", oldnames=["debug_load_cmd"], multiple=True
),
ConfigEnvOption(
name="debug_load_mode",
type=click.Choice(["always", "modified", "manual"]),
),
ConfigEnvOption(name="debug_server", multiple=True),
ConfigEnvOption(name="debug_port"),
ConfigEnvOption(
name="debug_svd_path",
type=click.Path(exists=True, file_okay=True, dir_okay=False),
),
# Check
ConfigEnvOption(name="check_tool", multiple=True),
ConfigEnvOption(name="check_filter", multiple=True),
ConfigEnvOption(name="check_flags", multiple=True),
ConfigEnvOption(
name="check_severity",
multiple=True,
type=click.Choice(["low", "medium", "high"]),
),
# Other
ConfigEnvOption(
name="extra_scripts",
oldnames=["extra_script"],
multiple=True,
sysenvvar="PLATFORMIO_EXTRA_SCRIPTS",
),
]
]
)
| return ConfigOption("env", *args, **kwargs) |
annotate_txt.py | import argparse
from xml.etree import ElementTree as ET
import os
from pickle import dump
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("dir")
parser.add_argument("save")
args = parser.parse_args()
path = os.path.join(args.dir)
classes_nums = {"cat" : 0 , "dog" : 1}
keys = list(classes_nums.keys())
try:
os.mkdir(args.save)
except:
print("Folder is already exist !")
def ToMidPoint(x1 , y1 , x2 , y2 , size):
dw = 1.0 / size[0]
dh = 1.0 / size[1]
h = y2 - y1
w = x2 - x1
x = (x1 + (w/2))
y = (y1 + (h/2))
return x * dw , y * dh , w * dw , h * dh
for File in tqdm(os.listdir(path)):
obj_list = 0
xml_path = os.path.join(path , File)
file_name = "{}/{}".format(args.save , File.replace("xml" , "txt"))
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w_img = int(size.find('width').text)
h_img = int(size.find('height').text)
with open(file_name , "w") as F :
for obj in root.iter("object"):
class_name = obj.find("name").text
if class_name not in keys:
continue | xml_box = obj.find("bndbox")
nedded = ["xmin" , "ymin" , "xmax" , "ymax"]
x1 , y1 = float(xml_box.find(nedded[0]).text) , float(xml_box.find(nedded[1]).text)
x2 , y2 = float(xml_box.find(nedded[2]).text) , float(xml_box.find(nedded[3]).text)
x , y , w , h = ToMidPoint(x1 , y1 , x2 , y2 , (w_img , h_img))
F.write("{} {} {} {} {}\n".format(class_id , x , y , w , h))
if obj_list == 0:
os.remove(file_name) | obj_list += 1
class_id = classes_nums[class_name] |
graphlaplace.rs | //! Graph Laplacian stuff
use std::collections::HashMap;
use ndarray::{Array1, Array2, Array, Axis};
use sprs::{CsMat, TriMatBase};
use lax::{layout::MatrixLayout, UVTFlag, SVDDC_};
use crate::tools::svdapprox::*;
use crate::nodeparam::*;
const FULL_MAT_REPR : usize = 5000;
const FULL_SVD_SIZE_LIMIT : usize = 5000;
/// We use a normalized symetric laplacian to go to the svd.
/// But we want the left eigenvectors of the normalized R(andom)W(alk) laplacian so we must keep track
/// of degrees (rown L1 norms)
pub(crate) struct GraphLaplacian {
// symetrized graph. Exactly D^{-1/2} * G * D^{-1/2}
sym_laplacian: MatRepr<f32>,
// the vector giving D of the symtrized graph
pub(crate) degrees: Array1<f32>,
//
_s : Option<Array1<f32>>,
//
_u : Option<Array2<f32>>
}
impl GraphLaplacian {
pub fn new(sym_laplacian: MatRepr<f32>, degrees: Array1<f32>) -> Self | // end of new for GraphLaplacian
#[inline]
fn is_csr(&self) -> bool {
self.sym_laplacian.is_csr()
} // end is_csr
fn get_nbrow(&self) -> usize {
self.degrees.len()
}
fn do_full_svd(&mut self) -> Result<SvdResult<f32>, String> {
//
log::info!("GraphLaplacian doing full svd");
let b = self.sym_laplacian.get_full_mut().unwrap();
log::trace!("GraphLaplacian ... size nbrow {} nbcol {} ", b.shape()[0], b.shape()[1]);
let layout = MatrixLayout::C { row: b.shape()[0] as i32, lda: b.shape()[1] as i32 };
let slice_for_svd_opt = b.as_slice_mut();
if slice_for_svd_opt.is_none() {
println!("direct_svd Matrix cannot be transformed into a slice : not contiguous or not in standard order");
return Err(String::from("not contiguous or not in standard order"));
}
// use divide conquer (calls lapack gesdd), faster but could use svd (lapack gesvd)
log::trace!("direct_svd calling svddc driver");
let res_svd_b = f32::svddc(layout, UVTFlag::Some, slice_for_svd_opt.unwrap());
if res_svd_b.is_err() {
log::info!("GraphLaplacian do_full_svd svddc failed");
return Err(String::from("GraphLaplacian svddc failed"));
};
// we have to decode res and fill in SvdApprox fields.
// lax does encapsulte dgesvd (double) and sgesvd (single) which returns U and Vt as vectors.
// We must reconstruct Array2 from slices.
// now we must match results
// u is (m,r) , vt must be (r, n) with m = self.data.shape()[0] and n = self.data.shape()[1]
let res_svd_b = res_svd_b.unwrap();
let r = res_svd_b.s.len();
let m = b.shape()[0];
// must truncate to asked dim
let s : Array1<f32> = res_svd_b.s.iter().map(|x| *x).collect::<Array1<f32>>();
//
let s_u : Option<Array2<f32>>;
if let Some(u_vec) = res_svd_b.u {
s_u = Some(Array::from_shape_vec((m, r), u_vec).unwrap());
}
else {
s_u = None;
}
Ok(SvdResult{s : Some(s), u: s_u, vt : None})
} // end of do_full_svd
/// do a partial approxlated svd
fn do_approx_svd(&mut self, asked_dim : usize) -> Result<SvdResult<f32>, String> {
assert!(asked_dim >= 2);
// get eigen values of normalized symetric lapalcian
//
// switch to full or partial svd depending on csr representation and size
// csr implies approx svd.
log::info!("got laplacian, going to approximated svd ... asked_dim : {}", asked_dim);
let mut svdapprox = SvdApprox::new(&self.sym_laplacian);
// TODO adjust epsil ?
// we need one dim more beccause we get rid of first eigen vector as in dmap, and for slowly decreasing spectrum RANK approx is
// better see Halko-Tropp
let svdmode = RangeApproxMode::RANK(RangeRank::new(20, 5));
let svd_res = svdapprox.direct_svd(svdmode);
log::trace!("exited svd");
if !svd_res.is_ok() {
println!("svd approximation failed");
std::panic!();
}
return svd_res;
} // end if do_approx_svd
pub fn do_svd(&mut self, asked_dim : usize) -> Result<SvdResult<f32>, String> {
if !self.is_csr() && self.get_nbrow() <= FULL_SVD_SIZE_LIMIT { // try direct svd
self.do_full_svd()
}
else {
self.do_approx_svd(asked_dim)
}
} // end of init_from_sv_approx
} // end of impl GraphLaplacian
// the function computes a symetric laplacian graph for svd with transition probabilities taken from NodeParams
// We will then need the lower non zero eigenvalues and eigen vectors.
// The best justification for this is in Diffusion Maps.
//
// Store in a symetric matrix representation dense of CsMat with for spectral embedding
// Do the Svd to initialize embedding. After that we do not need any more a full matrix.
// - Get maximal incoming degree and choose either a CsMat or a dense Array2.
//
// See also Veerman A Primer on Laplacian Dynamics in Directed Graphs 2020 arxiv https://arxiv.org/abs/2002.02605
pub(crate) fn get_laplacian(initial_space : &NodeParams) -> GraphLaplacian {
//
log::debug!("in get_laplacian");
//
let nbnodes = initial_space.get_nb_nodes();
// get stats
let max_nbng = initial_space.get_max_nbng();
let node_params = initial_space;
// TODO define a threshold for dense/sparse representation
if nbnodes <= FULL_MAT_REPR {
log::debug!("get_laplacian using full matrix");
let mut transition_proba = Array2::<f32>::zeros((nbnodes, nbnodes));
// we loop on all nodes, for each we want nearest neighbours, and get scale of distances around it
for i in 0..node_params.params.len() {
// remind to index each request
let node_param = node_params.get_node_param(i);
// CAVEAT diagonal transition 0. or 1. ? Choose 0. as in t-sne umap LargeVis
for j in 0..node_param.edges.len() {
let edge = node_param.edges[j];
transition_proba[[i, edge.node]] = edge.weight;
} // end of for j
} // end for i
log::trace!("full matrix initialized");
// now we symetrize the graph by taking mean
// The UMAP formula (p_i+p_j - p_i *p_j) implies taking the non null proba when one proba is null,
// so UMAP initialization is more packed.
let mut symgraph = (&transition_proba + &transition_proba.view().t()) * 0.5;
// now we go to the symetric laplacian D^-1/2 * G * D^-1/2 but get rid of the I - ...
// cf Yan-Jordan Fast Approximate Spectral Clustering ACM-KDD 2009
// compute sum of row and renormalize. See Lafon-Keller-Coifman
// Diffusions Maps appendix B
// IEEE TRANSACTIONS ON PATTERN ANALYSIS AND MACHINE INTELLIGENCE,VOL. 28, NO. 11,NOVEMBER 2006
let diag = symgraph.sum_axis(Axis(1));
for i in 0..nbnodes {
let mut row = symgraph.row_mut(i);
for j in 0..nbnodes {
row[[j]] /= (diag[[i]] * diag[[j]]).sqrt();
}
}
//
log::trace!("\n allocating full matrix laplacian");
let laplacian = GraphLaplacian::new(MatRepr::from_array2(symgraph), diag);
laplacian
} else {
log::debug!("Embedder using csr matrix");
// now we must construct a CsrMat to store the symetrized graph transition probablity to go svd.
// and initialize field initial_space with some NodeParams
let mut edge_list = HashMap::<(usize, usize), f32>::with_capacity(nbnodes * max_nbng);
for i in 0..node_params.params.len() {
let node_param = node_params.get_node_param(i);
for j in 0..node_param.edges.len() {
let edge = node_param.edges[j];
edge_list.insert((i, edge.node), node_param.edges[j].weight);
} // end of for j
}
// now we iter on the hasmap symetrize the graph, and insert in triplets transition_proba
let mut diagonal = Array1::<f32>::zeros(nbnodes);
let mut rows = Vec::<usize>::with_capacity(nbnodes * 2 * max_nbng);
let mut cols = Vec::<usize>::with_capacity(nbnodes * 2 * max_nbng);
let mut values = Vec::<f32>::with_capacity(nbnodes * 2 * max_nbng);
for ((i, j), val) in edge_list.iter() {
assert!(i!=j);
let sym_val;
if let Some(t_val) = edge_list.get(&(*j, *i)) {
sym_val = (val + t_val) * 0.5;
} else {
sym_val = *val;
}
rows.push(*i);
cols.push(*j);
values.push(sym_val);
diagonal[*i] += sym_val;
//
rows.push(*j);
cols.push(*i);
values.push(sym_val);
diagonal[*j] += sym_val;
}
// as in FULL Representation we avoided the I diagnoal term which cancels anyway
// Now we reset non diagonal terms to D^-1/2 G D^-1/2 i.e val[i,j]/(D[i]*D[j])^1/2
for i in 0..rows.len() {
let row = rows[i];
let col = cols[i];
if row != col {
values[i] = values[i] / (diagonal[row] * diagonal[col]).sqrt();
}
}
//
log::trace!("allocating csr laplacian");
let laplacian = TriMatBase::<Vec<usize>, Vec<f32>>::from_triplets(
(nbnodes, nbnodes),
rows,
cols,
values,
);
let csr_mat: CsMat<f32> = laplacian.to_csr();
let laplacian = GraphLaplacian::new(MatRepr::from_csrmat(csr_mat),diagonal);
laplacian
} // end case CsMat
//
} // end of get_laplacian
| {
GraphLaplacian{sym_laplacian, degrees, _s : None, _u: None}
} |
revel_test.go | package revel
import (
"bytes"
"fmt"
stdlog "log"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"github.com/revel/revel"
"gopkg.in/birkirb/loggers.v1"
"gopkg.in/birkirb/loggers.v1/log"
)
func TestRevelInterface(t *testing.T) {
var _ loggers.Contextual = NewLogger()
}
func TestRevelLevelOutputWithColor(t *testing.T) {
l, b := newBufferedRevelLog()
l.Debugln("\x1b[30mThis text will have black color\x1b[0m")
l.Debugln("This text will have default color")
var expectedMatch = []string{
"TRACE.*This text will have black color.+$",
"TRACE.*This text will have default color",
}
actual := b.String()
lines := strings.Split(actual, "\n")
k := 1 // Offset for lines before expected
for i, expected := range expectedMatch {
if ok, _ := regexp.Match(expected, []byte(lines[i+k])); !ok {
t.Errorf("Log output mismatch `%s` (actual) != `%s` (expected)", lines[i+k], expected)
}
}
}
func TestRevelLevelOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.Info("This is a test")
expectedMatch := "INFO.*This is a test\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
}
func TestRevelLevelfOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.Errorf("This is %s test", "a")
expectedMatch := "ERROR.*This is a test\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
}
func TestRevelLevellnOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.Debugln("This is a test.", "So is this.")
expectedMatch := "TRACE.*This is a test. So is this.\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
}
func TestRevelWithFieldsOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.WithFields("test", true).Warn("This is a message.")
expectedMatch := "WARN.*This is a message. test=true\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
}
func TestRevelWithFieldsfOutput(t *testing.T) {
l, b := newBufferedRevelLog()
l.WithFields("test", true, "Error", "serious").Errorf("This is a %s.", "message")
expectedMatch := "ERROR.*This is a message. test=true Error=serious\n"
actual := b.String()
if ok, _ := regexp.Match(expectedMatch, []byte(actual)); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, expectedMatch)
}
}
func newBufferedRevelLog() (loggers.Contextual, *bytes.Buffer) {
var b []byte
var bb = bytes.NewBuffer(b)
// Loggers
revel.TRACE = stdlog.New(bb, "TRACE ", stdlog.Ldate|stdlog.Ltime)
revel.INFO = stdlog.New(bb, "INFO ", stdlog.Ldate|stdlog.Ltime)
revel.WARN = stdlog.New(bb, "WARN ", stdlog.Ldate|stdlog.Ltime)
revel.ERROR = stdlog.New(bb, "ERROR ", stdlog.Ldate|stdlog.Ltime)
return NewLogger(), bb
}
func TestBackTrace(t *testing.T) {
l, b := newBufferedRevelLog()
log.Logger = l
log.Error("an error")
_, file, line, _ := runtime.Caller(0)
mustContain := fmt.Sprintf("%s:%d", filepath.Base(file), line-1)
actual := b.String()
if ok := strings.Contains(actual, mustContain); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, mustContain)
}
}
func TestBackTraceF(t *testing.T) {
l, b := newBufferedRevelLog()
log.Logger = l
log.Errorf("an error: %s", "value")
_, file, line, _ := runtime.Caller(0)
mustContain := fmt.Sprintf("%s:%d", filepath.Base(file), line-1)
actual := b.String()
if ok := strings.Contains(actual, mustContain); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, mustContain)
}
}
func TestBackTraceLn(t *testing.T) | {
l, b := newBufferedRevelLog()
log.Logger = l
log.Errorln("an error")
_, file, line, _ := runtime.Caller(0)
mustContain := fmt.Sprintf("%s:%d", filepath.Base(file), line-1)
actual := b.String()
if ok := strings.Contains(actual, mustContain); !ok {
t.Errorf("Log output mismatch %s (actual) != %s (expected)", actual, mustContain)
}
} |
|
BreadcrumbSection.js | import cx from 'classnames'
import _ from 'lodash'
import PropTypes from 'prop-types'
import React, { Component } from 'react'
import {
createShorthandFactory,
customPropTypes,
getUnhandledProps,
getElementType,
META,
useKeyOnly,
} from '../../lib'
/**
* A section sub-component for Breadcrumb component.
*/
export default class | extends Component {
static propTypes = {
/** An element type to render as (string or function). */
as: customPropTypes.as,
/** Style as the currently active section. */
active: PropTypes.bool,
/** Primary content. */
children: PropTypes.node,
/** Additional classes. */
className: PropTypes.string,
/** Shorthand for primary content. */
content: customPropTypes.contentShorthand,
/** Render as an `a` tag instead of a `div` and adds the href attribute. */
href: customPropTypes.every([
customPropTypes.disallow(['link']),
PropTypes.string,
]),
/** Render as an `a` tag instead of a `div`. */
link: customPropTypes.every([
customPropTypes.disallow(['href']),
PropTypes.bool,
]),
/**
* Called on click. When passed, the component will render as an `a`
* tag by default instead of a `div`.
*
* @param {SyntheticEvent} event - React's original SyntheticEvent.
* @param {object} data - All props.
*/
onClick: PropTypes.func,
}
static _meta = {
name: 'BreadcrumbSection',
type: META.TYPES.COLLECTION,
parent: 'Breadcrumb',
}
handleClick = (e) => {
const { onClick } = this.props
if (onClick) onClick(e, this.props)
}
render() {
const {
active,
children,
className,
content,
href,
link,
onClick,
} = this.props
const classes = cx(
useKeyOnly(active, 'active'),
'section',
className,
)
const rest = getUnhandledProps(BreadcrumbSection, this.props)
const ElementType = getElementType(BreadcrumbSection, this.props, () => {
if (link || onClick) return 'a'
})
return (
<ElementType {...rest} className={classes} href={href} onClick={this.handleClick}>
{_.isNil(children) ? content : children}
</ElementType>
)
}
}
BreadcrumbSection.create = createShorthandFactory(BreadcrumbSection, content => ({ content, link: true }))
| BreadcrumbSection |
index.js | import React from "react"
import { graphql, Link } from "gatsby"
import Layout from "../components/layout"
import SEO from "../components/seo"
import Posts from "../components/posts"
import {FaTags} from 'react-icons/fa'
class BlogIndex extends React.Component {
render() {
const { data } = this.props
const {title,companyName,companyUrl} = data.site.siteMetadata
return (
<Layout title={title} companyName={companyName} companyUrl={companyUrl}>
<SEO title="All posts" />
<Link
style={{
boxShadow: `none`,
textDecoration: `none`,
}}
to={`/tags`}
>
<FaTags/> Tags
</Link>
<Posts posts={data.posts.edges}/>
</Layout>
)
}
}
export default BlogIndex
export const pageQuery = graphql`
query {
site {
siteMetadata {
title
companyName
companyUrl
}
}
posts: allMarkdownRemark(
sort: { fields: [frontmatter___date], order: DESC }) {
edges {
node {
excerpt
fields {
slug
}
timeToRead
frontmatter { | }
}
}
}
}
` | date(formatString: "MMMM DD, YYYY")
title
description
author |
movie_to_image.py | import glob
import cv2
import os
def | (movie_files_dir, out_dir):
movie_files = glob.glob(movie_files_dir)
if not movie_files:
print('movie files are not found.')
return
for movie_file in movie_files:
ext = movie_file.split('.')[-1]
if not ext == 'mp4' or not ext == 'MP4':
print(f"can't extract this movie file: {movie_file}")
continue
out_dir = out_dir
if not os.path.exists(out_dir):
os.mkdir(out_dir)
cap = cv2.VideoCapture(movie_file)
if not cap.isOpened():
print(f"can't extract this movie file: {movie_file}")
return
digit = len(str(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))))
n = 0
while True:
ret, frame = cap.read()
if ret:
cv2.imwrite(f"{movie_file}_{str(n).zfill(digit)}.jpg", frame)
n += 1
continue
return
print(f'{len(movie_files)} movie files extracted')
if __name__ == '__main__':
movie_files_dir = 'movies/*.mp4'
out_dir = 'out/'
extract_frame(movie_files_dir, out_dir)
| extract_frame |
ALS-Model-preparation.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and |
from __future__ import print_function
import sys
if sys.version >= '3':
long = int
from pyspark.sql import SparkSession
import numpy as np
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
from pyspark.sql import Row
import pickle
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ALSExample")\
.getOrCreate()
lines = spark.read.text("movielens_rating_100k").rdd
parts = lines.map(lambda row: row.value.split("::"))
ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]),
rating=float(p[2]), timestamp=long(p[3])))
ratings = spark.createDataFrame(ratingsRDD)
(training, test) = ratings.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
als = ALS(maxIter=10, regParam=0.1, userCol="userId", itemCol="movieId", ratingCol="rating",
coldStartStrategy="drop")
als.rank = 4
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))
#print(np.asarray([x[1] for x in model.userFactors.collect()]))
userfactorinoinnp = np.asarray([x[1] for x in model.userFactors.collect()])
itemfactorinoinnp = np.asarray([x[1] for x in model.itemFactors.collect()])
filename = 'ALSItem'
outfile = open(filename, 'wb')
pickle.dump(itemfactorinoinnp, outfile)
outfile.close()
filename = 'ALSUser'
outfile = open(filename, 'wb')
pickle.dump(userfactorinoinnp, outfile)
#model.save("ALSModel")
spark.stop() | # limitations under the License.
#
# NOTICE
#MODIFIED FROM ORIGINAL, TAKEN FROM APACHE DOCUMENTATION |
lib.rs | //! Recursive reference.
//!
//!This crate provides a way to traverse recursive structures easily and safely.
//!Rust's lifetime rules will usually force you to either only walk forward through the structure,
//!or use recursion, calling your method recursively every time you go down a node,
//!and returning every time you want to go back up, which leads to terrible code.
//!
//!Instead, you can use the [`RecRef`] type, to safely and dynamically walk up
//!and down your recursive structure.
//!
//!# Examples
//!
//! Say we have a recursive linked list structure
//! ----------------------------------------------
//!```rust
//!enum List<T> {
//! Root(Box<Node<T>>),
//! Empty,
//!}
//!struct Node<T> {
//! value: T,
//! next: List<T>,
//!}
//!```
//!
//!We can use a [`RecRef`] directly
//!----------------------------------------------
//!```rust
//!use recursive_reference::*;
//!
//! # enum List<T> {
//! # Root(Box<Node<T>>),
//! # Empty,
//! # }
//! # struct Node<T> {
//! # value: T,
//! # next: List<T>,
//! # }
//!
//!fn main() -> Result<(), ()> {
//! // crate a list to test
//! let node1 = Node {
//! value: 5,
//! next: List::Empty,
//! };
//! let mut node2 = Node {
//! value: 2,
//! next: List::Root(Box::new(node1)),
//! };
//!
//! // create a `RecRef`
//! let mut rec_ref = RecRef::new(&mut node2);
//! // rec_ref is a smart pointer to the current node
//! assert_eq!(rec_ref.value, 2);
//!
//! // move forward through the list
//! RecRef::extend_result(&mut rec_ref, |node| match &mut node.next {
//! List::Root(next_node) => Ok(next_node),
//! List::Empty => Err(()),
//! })?;
//! assert_eq!(rec_ref.value, 5); // now we're at the second node
//!
//! // pop the `RecRef`, moving it back to the head
//! RecRef::pop(&mut rec_ref).ok_or(())?;
//! assert_eq!(rec_ref.value, 2);
//! Ok(())
//!}
//!```
//!
//!We can also wrap a [`RecRef`] in a walker struct
//!----------------------------------------------
//!Note: this time we are using a `RecRef<List<T>>` and not a `RecRef<Node<T>>`, to allow pointing
//!at the empty end of the list.
//!```rust
//!use recursive_reference::*;
//! # enum List<T> {
//! # Root(Box<Node<T>>),
//! # Empty,
//! # }
//! # struct Node<T> {
//! # value: T,
//! # next: List<T>,
//! # }
//!struct Walker<'a, T> {
//! rec_ref: RecRef<'a, Node<T>>,
//!}
//!impl<'a, T> Walker<'a, T> {
//! /// Crates a new Walker
//! pub fn new(node: &'a mut Node<T>) -> Self {
//! Walker {
//! rec_ref: RecRef::new(node),
//! }
//! }
//!
//! /// Returns `None` when at the tail end of the list.
//! /// Moves to the next node.
//! pub fn next(&mut self) -> Option<()> {
//! RecRef::extend_result(&mut self.rec_ref, |current| match &mut current.next {
//! List::Empty => Err(()),
//! List::Root(node) => Ok(node),
//! })
//! .ok()
//! }
//!
//! /// Returns `None` when at the head of the list.
//! /// Goes back to the previous node.
//! pub fn prev(&mut self) -> Option<()> {
//! RecRef::pop(&mut self.rec_ref)?;
//! Some(())
//! }
//!
//! /// Returns `None` when at the tail end of the list.
//! /// Returns `Some(reference)` where `reference` is a mutable reference to the current value.
//! pub fn value_mut(&mut self) -> &mut T {
//! &mut self.rec_ref.value
//! }
//!}
//!
//!fn main() -> Result<(), ()> {
//! // crate a list to test
//! let node1 = Node {
//! value: 5,
//! next: List::Empty,
//! };
//! let mut node2 = Node {
//! value: 2,
//! next: List::Root(Box::new(node1)),
//! };
//!
//! // create a walker for the list
//! let mut walker = Walker::new(&mut node2);
//! // walker has mutable access to the node value
//! assert_eq!(*walker.value_mut(), 2);
//! // move to the next node
//! walker.next().ok_or(())?;
//! assert_eq!(*walker.value_mut(), 5);
//! assert_eq!(walker.next(), None); // currently at the end of the list
//! // move back
//! walker.prev().ok_or(())?;
//! assert_eq!(*walker.value_mut(), 2);
//! Ok(())
//!}
//!```
//! With a [`RecRef`] you can
//! ----------------------------------------------
//! * Use the current reference (i.e, the top reference).
//! the [`RecRef`] is a smart pointer to it.
//! * Freeze the current reference
//! and extend the [`RecRef`] with a new reference derived from it, using [`extend`][RecRef::extend] and similar functions.
//! for example, push to the stack a reference to the child of the current node.
//! * Pop the stack to get back to the previous reference, unfreezing it.
//!
//! # Safety
//! The [`RecRef`] type is implemented using unsafe rust, but provides a safe interface.
//! The [`RecRef`] methods' types guarantee that the references will always have a legal lifetime
//! and will respect rust's borrow rules, even if that lifetime is not known in advance.
//!
//! The [`RecRef`] obeys rust's borrowing rules, by simulating freezing. Whenever
//! you extend a [`RecRef`] with a reference `child_ref` that is derived from the current
//! reference `parent_ref`, the [`RecRef`] freezes `parent_ref`, and no longer allows
//! `parent_ref` to be used.
//! When `child_ref` will be popped from the [`RecRef`],
//! `parent_ref` will be allowed to be used again.
//!
//! This is essentially the same as what would have happened if you wrote your functions recursively,
//! but it's decoupled from the actual call stack.
//!
//! Another important point to consider is the safety of
//! the actual call to [`extend`][RecRef::extend]: see its documentation.
#![no_std]
#![doc(html_root_url = "https://docs.rs/recursive_reference/0.3.0/recursive_reference/")]
extern crate alloc;
use alloc::vec::*;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
use core::ptr::NonNull;
use void::ResultVoidExt;
/// A Recursive reference.
/// This struct is used to allow recursively reborrowing mutable references in a dynamic
/// but safe way.
///
/// `RecRef<'a, T>` represents a reference to a value of type `T`, with lifetime `'a`,
/// which can move recursively into and out of its subfields of the same type `T`.
///
/// With a [`RecRef`] you can
/// ----------------------------------------------
/// * Use the current reference (i.e, the top reference).
/// The [`RecRef`] is a smart pointer to it.
/// * Freeze the current reference
/// and extend the [`RecRef`] with a new reference derived from it, using [`extend`][RecRef::extend] and similar functions.
/// For example, push to the stack a reference to the child of the current node.
/// * Pop the stack to get back to the previous reference, unfreezing it.
///
/// The methods' types guarantee that the references will always have a legal lifetime
/// and will respect rust's borrow rules, even if that lifetime is not known in advance.
///
/// Internally, the [`RecRef`] stores a [`Vec`] of pointers, that it extends and pops from.
// Safety invariants (please read `RecRef::extend`'s documentation before reading this):
// The values in `vec` are allowed to alias. However:
// For any index `i`, `vec[i]` can be safely used under these conditions:
// * all of the values in `vec[..i]` are considered frozen.
// * all of the values in `vec[i+1..]` are gone (e.g, if they are popped from the RecRef).
// In such a case `vec[i]` could be unfrozen, converted to a `&'x mut T` for any `'a : 'x` and used.
// Specifically, this happens when the values in `vec[i+1..]` are references produced
// through `vec[i]`.
// See `RecRef::extend`'s documentation for more details on how this is ensured.
//
// In particular, all values in `vec` have been produced from valid mutable references `&mut T`.
pub struct RecRef<'a, T: ?Sized> {
head: NonNull<T>,
vec: Vec<NonNull<T>>,
phantom: PhantomData<&'a mut T>,
}
impl<'a, T: ?Sized> RecRef<'a, T> {
/// Creates a new RecRef containing only a single reference.
pub fn new(r: &'a mut T) -> Self {
RecRef {
head: NonNull::from(r),
vec: Vec::new(),
phantom: PhantomData,
}
}
/// Returns the size of `rec_ref`, i.e, the amount of references in it.
/// It increases every time you extend `rec_ref`, and decreases every time you pop
/// `rec_ref`.
/// The size of a new [`RecRef`] is always `1`.
pub fn size(rec_ref: &Self) -> usize {
rec_ref.vec.len() + 1
}
/// This function extends `rec_ref` one time. If the current
/// reference is `current_ref: &mut T`, then this call extends `rec_ref`
/// with the new reference `ref2: &mut T = func(current_ref)`.
/// After this call, `rec_ref` will expose the new `ref2`, and `current_ref`
/// will be frozen (As it is borrowed by `ref2`), until `ref2` is
/// popped off, unfreezing `current_ref`.
///
/// # Safety:
/// Pay close attention to the type of `func`: we require that
/// `F: for<'b> FnOnce(&'b mut T) -> &'b mut T`. That is, for every lifetime `'b`,
/// we require that `F: FnOnce(&'b mut T) -> &'b mut T`.
///
/// Let's define `'freeze_time` to be the time `ref2` will be in the [`RecRef`].
/// That is, `'freeze_time`
/// is the time for which `ref2` will live, and the lifetime in which `current_ref`
/// will be frozen by `ref2`. Then, the type of `func` should have been
/// `FnOnce(&'freeze_time mut T) -> &'freeze_time mut T`. If that woudld have been the type
/// of `func`, the code would've followed rust's borrowing rules correctly.
///
/// However, we can't know yet what that
/// lifetime is: it will be whatever amount of time passes until the programmer decides
/// to pop `ref2` out of the [`RecRef`]. And that hasn't even been decided at this point.
/// Whatever lifetime `'freeze_time` that turns out to be, we will know
/// after-the-fact that the type of `func` should have been
/// `FnOnce(&'freeze_time mut T) -> &'freeze_time mut T`.
///
/// Therefore, the solution is to require that `func` will be able to work with any value of
/// `'freeze_time`. Then we can be
/// sure that the code would've worked correctly if we put the correct lifetime there.
/// Therefore, we can always pick correct lifetimes after-the-fact, so the code must be safe.
///
/// Also note:
/// The type ensures that the current reference can't be leaked outside of `func`.
/// `func` can't guarantee that
/// `current_ref` will live for any length of time, so it can't store it outside anywhere
/// or give it to anything.
/// It can only use `current_ref` while still inside `func`,
/// and use it in order to return `ref2`, which is the
/// intended usage.
pub fn extend<F>(rec_ref: &mut Self, func: F)
where
F: for<'b> FnOnce(&'b mut T) -> &'b mut T,
{
Self::extend_result(rec_ref, |r| Ok(func(r))).void_unwrap()
}
/// Same as [`Self::extend`], but allows the function to return an error value.
pub fn extend_result<E, F>(rec_ref: &mut Self, func: F) -> Result<(), E>
where
F: for<'b> FnOnce(&'b mut T) -> Result<&'b mut T, E>,
{
Self::extend_result_precise(rec_ref, |r, _phantom| func(r))
}
/// Same as [`Self::extend`], but allows the function to return an error value,
/// and also tells the inner function that `'a : 'b` using a phantom argument.
pub fn extend_result_precise<E, F>(rec_ref: &mut Self, func: F) -> Result<(), E>
where
F: for<'b> FnOnce(&'b mut T, PhantomData<&'b &'a ()>) -> Result<&'b mut T, E>,
{
// Safety:
// `rec_ref.head` was produced from a `&mut T`, and by RecRef's
// safety invariant, it can be used. Furthermore,
// Pushing another reference derived from it into the `rec_ref` preserves
// the safety invariant.
//
// To understand how the invariant is ensured (and what it means)
// see `RecRef::extend`'s documentation.
//
// However, there's another assumption here.
// The lifetime of the reference here is indeterminate.
// It could be any value. Thus by default the compiler will choose it
// to be extremely short, that is, only until where `NonNull::from` is called on it.
//
// In fact, we want this lifetime to be a lifetime we haven't chosen yet.
// It could be anything from as short as `'_` to as long as `'a`.
// I arbitrarily chose it to be set to `'_`.
//
// Essentially, we assume here that calling `func` will have the same effect
// even if we give it the wrong lifetime. In other words, we assume some form
// of parametricity.
// Semantically, this is true: code can't ever access the lifetimes. All lifetime
// information is deleted at compile time.
// However, we also assume that rust's optimizations
// won't change the program's meaning because we used the wrong lifetime.
let head_ref: &'_ mut T = unsafe { rec_ref.head.as_mut() };
match func(head_ref, PhantomData) {
Ok(p) => {
Self::push(rec_ref, p);
Ok(())
}
Err(e) => Err(e),
}
}
/// This function maps the top of the [`RecRef`]. It's similar to [`Self::extend`], but
/// it replaces the current reference instead of keeping it. See [`Self::extend`] for more details.
pub fn map<F>(rec_ref: &mut Self, func: F)
where
F: for<'b> FnOnce(&'b mut T) -> &'b mut T,
{
Self::map_result(rec_ref, |r| Ok(func(r))).void_unwrap()
}
/// Same as [`Self::map`], but allows the function to return an error value.
pub fn map_result<E, F>(rec_ref: &mut Self, func: F) -> Result<(), E>
where
F: for<'b> FnOnce(&'b mut T) -> Result<&'b mut T, E>,
{
Self::map_result_precise(rec_ref, |r, _| func(r))
}
/// Same as [`Self::map`], but allows the function to return an error value,
/// and also tells the inner function that `'a : 'b` using a phantom argument.
pub fn map_result_precise<E, F>(rec_ref: &mut Self, func: F) -> Result<(), E>
where
F: for<'b> FnOnce(&'b mut T, PhantomData<&'b &'a ()>) -> Result<&'b mut T, E>,
{
// Safety:
// `rec_ref.head` was produced from a `&mut T`, and by RecRef's
// safety invariant, it can be used. Furthermore,
// Pushing another reference derived from it into the `rec_ref` preserves
// the safety invariant.
//
// To understand how the invariant is ensured (and what it means)
// see `RecRef::extend`'s documentation.
//
// However, there's another assumption here.
// The lifetime of the reference here is indeterminate.
// It could be any value. Thus by default the compiler will choose it
// to be extremely short, that is, only until where `NonNull::from` is called on it.
//
// In fact, we want this lifetime to be a lifetime we haven't chosen yet.
// It could be anything from as short as `'_` to as long as `'a`.
// I arbitrarily chose it to be set to `'_`.
//
// Essentially, we assume here that calling `func` will have the same effect
// even if we give it the wrong lifetime. In other words, we assume some form
// of parametricity.
// Semantically, this is true: code can't ever access the lifetimes. All lifetime
// information is deleted at compile time.
// However, we also assume that rust's optimizations
// won't change the program's meaning because we used the wrong lifetime.
let head_ref: &'_ mut T = unsafe { rec_ref.head.as_mut() };
match func(head_ref, PhantomData) {
Ok(p) => { | }
}
/// Push another reference to the [`RecRef`], unrelated to the current one.
/// `rec_ref.push(new_ref)` is morally equivalent to `rec_ref.extend_result_precise(move |_, _| { Ok(new_ref) })`.
/// However, you might have some trouble making the anonymous function conform to the
/// right type.
pub fn push(rec_ref: &mut Self, r: &'a mut T) {
rec_ref.vec.push(rec_ref.head);
rec_ref.head = NonNull::from(r);
/* alternative definition using a call to `extend_result_precise`.
// in order to name 'x, replace the signature with:
// pub fn push<'x>(rec_ref: &'x mut Self, r : &'a mut T) {
// this is used in order to tell the closure to conform to the right type
fn helper<'a,'x, T : ?Sized, F> (f : F) -> F where
F : for<'b> FnOnce(&'b mut T, PhantomData<&'b &'a ()>)
-> Result<&'b mut T, void::Void> + 'x
{ f }
Self::extend_result_precise(rec_ref,
helper::<'a,'x>(move |_, _phantom| { Ok(r) })
).void_unwrap();
*/
}
/// Lets the user use the last reference for some time, and discards it completely.
/// After the user uses it, the next time they inspect the [`RecRef`], it won't be there.
/// If the [`RecRef`] has only one reference left, this returns `None`, because
/// the [`RecRef`] can't be empty.
pub fn pop(rec_ref: &mut Self) -> Option<&mut T> {
// Safety:
// This pointer was produced from a `&mut T`.
//
// By RecRef's safety invariant, this reference can be used.
// Whenever it's used, `rec_ref` is frozen, preventing further
// access until this reference is dropped.
let res = unsafe { rec_ref.head.as_mut() };
rec_ref.head = rec_ref.vec.pop()?; // We can't pop the original reference. In that case, Return None.
Some(res)
}
/// Discards the [`RecRef`] and returns the last reference.
/// The difference between this and using [`Self::pop`] are:
/// * This will consume the [`RecRef`]
/// * [`Self::pop`] will never pop the first original reference, because that would produce an
/// invalid [`RecRef`]. [`Self::into_ref`] will.
pub fn into_ref(mut rec_ref: Self) -> &'a mut T {
// Safety:
// This pointer was produced from a `&mut T`.
//
// By RecRef's safety invariant, this reference can be used with lifetime `'a`.
// `rec_ref` is consumed, preventing further
// access until this reference is dropped.
unsafe { rec_ref.head.as_mut() }
}
}
/// [`RecRef<T>`] represents a reference to a value of type `T`,
/// which can move recursively into and out of its subfields of the same type `T`.
/// Therefore, it implements `Deref` and `DerefMut` with `Item=T`.
impl<'a, T: ?Sized> Deref for RecRef<'a, T> {
type Target = T;
fn deref(&self) -> &T {
// Safety:
// This pointer was produced from a `&mut T`.
//
// By RecRef's safety invariant, this reference can be used.
// Whenever it's used, `rec_ref` is borrowed immutably, preventing mutable
// access until this reference is dropped.
unsafe { self.head.as_ref() }
}
}
/// [`RecRef<T>`] represents a reference to a value of type `T`,
/// which can move recursively into and out of its subfields of the same type `T`.
/// Therefore, it implements `Deref` and `DerefMut` with `Item=T`.
impl<'a, T: ?Sized> DerefMut for RecRef<'a, T> {
fn deref_mut(&mut self) -> &mut T {
// Safety:
// This pointer was produced from a `&mut T`.
//
// By RecRef's safety invariant, this reference can be used.
// Whenever it's used, `rec_ref` is frozen, preventing further
// access until this reference is dropped.
unsafe { self.head.as_mut() }
}
}
impl<'a, Q: ?Sized, T: ?Sized + AsRef<Q>> AsRef<Q> for RecRef<'a, T> {
fn as_ref(&self) -> &Q {
AsRef::as_ref(&**self)
}
}
impl<'a, Q: ?Sized, T: ?Sized + AsMut<Q>> AsMut<Q> for RecRef<'a, T> {
fn as_mut(&mut self) -> &mut Q {
AsMut::as_mut(&mut **self)
}
}
impl<'a, T: ?Sized> From<&'a mut T> for RecRef<'a, T> {
fn from(r: &'a mut T) -> Self {
Self::new(r)
}
}
/// # Safety:
/// Behaviorally, A [`RecRef`] is the same as `&'a mut T`, and
/// should be [`Send`] for the same reason. Additionally, it contains a [`Vec`].
/// The [`Send`] instance for [`Vec`] contains the bound `A: Send` for the allocator type `A`,
/// so we should require that as well. However, we don't have direct access to the
/// default allocator type. So instead we require `Vec<&'a mut T>: Send`.
unsafe impl<'a, T: ?Sized + Send> Send for RecRef<'a, T> where Vec<&'a mut T>: Send {}
/// # Safety:
/// Behaviorally, A [`RecRef`] is the same as `&'a mut T`, and
/// should be [`Sync`] for the same reason. Additionally, it contains a [`Vec`].
/// The [`Sync`] instance for [`Vec`] contains the bound `A: Sync` for the allocator type `A`,
/// so we should require that as well. However, we don't have direct access to the
/// default allocator type. So instead we require `Vec<&'a mut T>: Sync`.
unsafe impl<'a, T: ?Sized + Sync> Sync for RecRef<'a, T> where Vec<&'a mut T>: Sync {} | rec_ref.head = NonNull::from(p);
Ok(())
}
Err(e) => Err(e), |
feature_minchainwork.py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import BontecoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(BontecoinTestFramework):
def set_test_params(self):
|
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101] |
getScheduledTrigger.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200901
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func | (ctx *pulumi.Context, args *LookupScheduledTriggerArgs, opts ...pulumi.InvokeOption) (*LookupScheduledTriggerResult, error) {
var rv LookupScheduledTriggerResult
err := ctx.Invoke("azure-native:datashare/v20200901:getScheduledTrigger", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
}
type LookupScheduledTriggerArgs struct {
AccountName string `pulumi:"accountName"`
ResourceGroupName string `pulumi:"resourceGroupName"`
ShareSubscriptionName string `pulumi:"shareSubscriptionName"`
TriggerName string `pulumi:"triggerName"`
}
// A type of trigger based on schedule
type LookupScheduledTriggerResult struct {
CreatedAt string `pulumi:"createdAt"`
Id string `pulumi:"id"`
Kind string `pulumi:"kind"`
Name string `pulumi:"name"`
ProvisioningState string `pulumi:"provisioningState"`
RecurrenceInterval string `pulumi:"recurrenceInterval"`
SynchronizationMode *string `pulumi:"synchronizationMode"`
SynchronizationTime string `pulumi:"synchronizationTime"`
SystemData SystemDataResponse `pulumi:"systemData"`
TriggerStatus string `pulumi:"triggerStatus"`
Type string `pulumi:"type"`
UserName string `pulumi:"userName"`
}
| LookupScheduledTrigger |
__main__.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import base64
import binascii
import os
import struct
import traceback
import uuid
import xml.etree.ElementTree as ET
from . import kit, util
try:
# Try local import first.
from pydel import pydel
except:
import pydel
import rpp
def convert_notes_to_midi(channel, notes, clip_length, instance_length):
notes = pydel.notes_to_on_off_notes(notes)
# Sort by start pulse.
notes.sort(key=lambda note: note.start)
if pydel.condition_expansion_needed(notes):
notes = pydel.condition_expand_notes(notes, clip_length, instance_length)
# Update the clip length to reflect that it's been extended to the instance length.
clip_length = instance_length
midi_messages = []
last_start = 0
for note in notes:
if note.velocity == 0:
# Note off.
ctrl = (0b1000 << 4) | channel
else:
# Note on.
ctrl = (0b1001 << 4) | channel
offset = note.start - last_start
if note.muted:
# Unselected, muted.
command = "Em"
else:
# Unselected.
command = "E"
msg = [
command,
offset,
hex(ctrl)[2:],
hex(note.y)[2:],
hex(note.velocity)[2:],
]
last_start = note.start
midi_messages.append(msg)
# Append a final "note off" message to make the clip the correct length.
offset = clip_length - last_start
msg = [
"E", # Unselected.
offset,
hex(0xb0)[2:],
hex(0x7b)[2:],
hex(0x00)[2:],
]
midi_messages.append(msg)
return midi_messages
def clip_instance_to_reaper_item(clip_instance,
clip,
pretty_clip_idx,
reaper_source,
tempo,
additional_children=None):
if additional_children is None:
additional_children = []
color = util.color_to_reaper(pydel.section_to_color(clip.section))
volume = clip.params.volume
pan = clip.params.pan
start_in_seconds = pydel.pulses_to_seconds(clip_instance.start, tempo)
length_in_seconds = pydel.pulses_to_seconds(clip_instance.length, tempo)
return rpp.Element(
tag="ITEM",
attrib=[],
children=[
["POSITION", start_in_seconds],
["LENGTH", length_in_seconds],
["LOOP", 1],
# Item GUID.
["IGUID", util.generate_guid()],
# First take GUID.
["GUID", util.generate_guid()],
["NAME", "Clip {}".format(pretty_clip_idx)],
["VOLPAN", volume, pan],
["COLOR", color, "B"],
# I think this is a no-op?
["POOLCOLOR", color, "B"],
reaper_source,
] + additional_children)
def audio_clip_to_reaper_source(clip, path_prefix, tempo):
file_path = clip.file_path
# TODO. Hack to point to the right file (thanks to collect?).
#file_path = "/Users/dcower/Downloads/bug1/Bug1/" + file_path[8:].replace("/", "_")
file_path = os.path.join(path_prefix, file_path)
reaper_source_wave = rpp.Element(
tag="SOURCE", attrib=["WAVE"], children=[
["FILE", file_path],
])
# The length of the stored selected audio sample (the chunk between the start and end sample
# positions).
sample_length_seconds = (clip.end_sample_pos - clip.start_sample_pos) / float(
pydel.SAMPLE_RATE_HZ)
# The actual length of the audio *clip*.
clip_length_seconds = pydel.pulses_to_seconds(clip.length, tempo)
playback_rate = sample_length_seconds / clip_length_seconds
# TODO: Should we round playback rate and clip length to get nicer numbers?
# playback_rate = round(playback_rate, 4)
reaper_source_additional_children = [[
"PLAYRATE",
playback_rate,
# 1 = Preserve pitch when changing rate.
int(clip.pitch_speed_independent),
# float, pitch adjust, in semitones.cents
clip.transpose,
# Default pitch shift mode.
-1,
# ?
0,
# ?
0.0025,
]]
section_children = [
["STARTPOS", clip.start_sample_pos / float(pydel.SAMPLE_RATE_HZ)],
["LENGTH", sample_length_seconds],
]
if clip.reversed:
section_children.append(["MODE", 2])
section_children.append(reaper_source_wave)
reaper_source_section = rpp.Element(
tag="SOURCE", attrib=["SECTION"], children=section_children)
return reaper_source_section, reaper_source_additional_children
# TODO: Get rid of this...?
class ReaperMidiPool(object):
def __init__(self):
return
def midi_clip_to_reaper_source(clip, clip_idx, length,
midi_clip_idx_to_reaper_midi_pool):
midi_messages = convert_notes_to_midi(clip.channel, clip.notes, clip.length,
length)
# TODO: Move pooling into something else -- clip_idx shouldn't come into here.
# TODO for MIDI pooling:
# - If we have only condition codes >= 0x14, we can pool all instances and just create the largest
# instance.
# Right now, without this, if there are 2 instances *with condition codes* with different
# lengths, they will not be pooled.
if (clip_idx in midi_clip_idx_to_reaper_midi_pool and
midi_messages == midi_clip_idx_to_reaper_midi_pool[clip_idx].midi_messages
):
reaper_midi_pool = midi_clip_idx_to_reaper_midi_pool[clip_idx]
# Skip putting the MIDI data into the pool.
midi_messages = []
else:
reaper_midi_pool = ReaperMidiPool()
reaper_midi_pool.guid = util.generate_guid()
reaper_midi_pool.pooledevts = util.generate_guid()
reaper_midi_pool.midi_messages = midi_messages
midi_clip_idx_to_reaper_midi_pool[clip_idx] = reaper_midi_pool
return rpp.Element(
tag="SOURCE",
attrib=["MIDIPOOL"],
children=[
["HASDATA", [1, pydel.PPQN, "QN"]],
["POOLEDEVTS", reaper_midi_pool.pooledevts],
["GUID", reaper_midi_pool.guid],
] + midi_messages)
def project_to_reaper_tracks(project, path_prefix):
|
def convert(args):
print("Converting {}".format(args.input_file.name))
try:
tree = ET.parse(args.input_file)
root = tree.getroot()
if root.tag != "song":
print("ERROR: Root tag is not 'song'.")
raise Error()
except:
print(
"ERROR: Only songs from Deluge 3.x are supported. Try re-saving song using 3.x firmware."
)
print(traceback.format_exc())
return
project = pydel.Project.from_element(root)
# Prefix for file paths -- corresponds to root dir on Deluge SD card.
input_dir = os.path.dirname(args.input_file.name)
output_dir = os.path.dirname(args.output_file.name)
# relpath doesn't handle empty input paths.
if input_dir == "":
input_dir = "./"
path_prefix, songs = os.path.split(os.path.relpath(input_dir, output_dir))
if songs != "SONGS":
print(
"WARNING: Expected song to be in SONGS/ directory. Audio clip paths may be incorrect."
)
# TODO: Support collect songs.
reaper_project = rpp.Element(
tag="REAPER_PROJECT",
attrib=["0.1", "5.972/OSX64", "1372525904"],
children=[
["RIPPLE", "0"],
["GROUPOVERRIDE", "0", "0", "0"],
["AUTOXFADE", "1"],
["TEMPO", project.tempo],
["PLAYRATE", 1, 0, 0.25, 4],
["SAMPLERATE", pydel.util.SAMPLE_RATE_HZ, 1, 0],
] + project_to_reaper_tracks(project, path_prefix))
# TODO: Add markers + unused clips.
rpp.dump(reaper_project, args.output_file)
args.input_file.close()
args.output_file.close()
def main():
parser = argparse.ArgumentParser(
description="Converts Synthstrom Audible Deluge songs (XML) to Reaper projects (RPP)."
)
parser.add_argument(
"input_file",
type=argparse.FileType("r"),
help="input Deluge .XML song file")
parser.add_argument(
"output_file",
nargs="?",
type=argparse.FileType("w"),
default="out.rpp",
help="output Reaper .RPP project file")
args = parser.parse_args()
convert(args)
if __name__ == "__main__":
main()
| midi_clip_idx_to_reaper_midi_pool = {}
reaper_tracks = []
# Deluge instruments/tracks are stored bottom-to-top.
for instrument in reversed(project.instruments):
guid = util.generate_guid()
reaper_items = []
# TODO: Add unused clip instances to the end of the timeline.
# Add the clip ("item" in REAPER) instances.
for clip_instance in instrument.clip_instances:
# Clip index is encoded as an arrange-only clip.
# TODO: Deal with this as a clip-level abstraction?
if clip_instance.clip_idx & 0x80000000:
clip_idx = clip_instance.clip_idx - 0x80000000
clip = project.arrange_only_clips[clip_idx]
pretty_clip_idx = -clip_idx
else:
clip = project.clips[clip_instance.clip_idx]
pretty_clip_idx = clip_instance.clip_idx
reaper_item_additional_children = []
if clip.has_audio():
assert instrument.name == clip.track_name
reaper_source, reaper_item_additional_children = audio_clip_to_reaper_source(
clip, path_prefix, project.tempo)
elif clip.has_midi(): # MIDI, synths, and kits.
reaper_source = midi_clip_to_reaper_source(
clip, clip_instance.clip_idx, clip_instance.length,
midi_clip_idx_to_reaper_midi_pool)
else:
print("WARNING: Clip has neither audio nor MIDI")
continue
reaper_items.append(
clip_instance_to_reaper_item(clip_instance, clip, pretty_clip_idx,
reaper_source, project.tempo,
reaper_item_additional_children))
fx_chain = []
if type(instrument) is pydel.instrument.Kit:
fx_chain = [kit.generate_kit_fx_chain(instrument, path_prefix)]
reaper_tracks.append(
rpp.Element(
tag="TRACK",
attrib=[guid],
children=[["NAME", instrument.pretty_name()], ["TRACKID", guid],
["MUTESOLO", [int(instrument.muted), 0, 0]]] +
reaper_items + fx_chain))
return reaper_tracks |
vector2function.py | # Copyright (c) 2016, The University of Texas at Austin & University of
# California, Merced.
#
# All Rights reserved.
# See file COPYRIGHT for details.
#
# This file is part of the hIPPYlib library. For more information and source code
# availability see https://hippylib.github.io.
#
# hIPPYlib is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License (as published by the Free
# Software Foundation) version 2.0 dated June 1991.
from __future__ import absolute_import, division, print_function
from dolfin import Function
def | (x,Vh, **kwargs):
"""
Wrap a finite element vector x into a finite element function in the space Vh.
kwargs is optional keywords arguments to be passed to the construction of a dolfin Function
"""
fun = Function(Vh,**kwargs)
fun.vector().zero()
fun.vector().axpy(1., x)
return fun
| vector2Function |
stages_sort.js | // Test query stage sorting.
if (false) {
t = db.stages_sort;
t.drop();
var N = 50;
for (var i = 0; i < N; ++i) {
t.insert({foo: i, bar: N - i});
}
t.ensureIndex({foo: 1})
// Foo <= 20, descending.
ixscan1 = {ixscan: {args:{name: "stages_sort", keyPattern:{foo: 1},
startKey: {"": 20},
endKey: {}, endKeyInclusive: true,
direction: -1}}};
// Sort with foo ascending.
sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}};
res = db.runCommand({stageDebug: sort1});
assert(!db.getLastError()); | assert.eq(res.results[20].foo, 20);
// Sort with a limit.
//sort2 = {sort: {args: {node: ixscan1, pattern: {foo: 1}, limit: 2}}};
//res = db.runCommand({stageDebug: sort2});
//assert(!db.getLastError());
//assert.eq(res.ok, 1);
//assert.eq(res.results.length, 2);
//assert.eq(res.results[0].foo, 0);
//assert.eq(res.results[1].foo, 1);
} | assert.eq(res.ok, 1);
assert.eq(res.results.length, 21);
assert.eq(res.results[0].foo, 0); |
test_mle.py | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def | (nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| test_exact_mle_fit |
terraform_azure_vm_example_test.go | // +build azure azureslim,compute
// NOTE: We use build tags to differentiate azure testing because we currently do not have azure access setup for
// CircleCI.
package test
import (
"fmt"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
"github.com/gruntwork-io/terratest/modules/azure"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/stretchr/testify/assert"
)
func TestTerraformAzureVmExample(t *testing.T) {
t.Parallel()
subscriptionID := ""
uniquePostfix := random.UniqueId()
// Configure Terraform setting up a path to Terraform code.
terraformOptions := &terraform.Options{
// The path to where our Terraform code is located.
TerraformDir: "../../examples/azure/terraform-azure-vm-example",
// Variables to pass to our Terraform code using -var options.
Vars: map[string]interface{}{
"postfix": uniquePostfix,
},
}
// At the end of the test, run `terraform destroy` to clean up any resources that were created.
defer terraform.Destroy(t, terraformOptions)
// Run `terraform init` and `terraform apply`. Fail the test if there are any errors.
terraform.InitAndApply(t, terraformOptions)
// Run tests for the Virtual Machine.
testStrategiesForVMs(t, terraformOptions, subscriptionID)
testMultipleVMs(t, terraformOptions, subscriptionID)
testInformationOfVM(t, terraformOptions, subscriptionID)
testDisksOfVM(t, terraformOptions, subscriptionID)
testNetworkOfVM(t, terraformOptions, subscriptionID)
}
// These 3 tests check for the same property but illustrate different testing strategies for
// retriving the data. The first strategy is used in the other tests of this module while
// the other two can be extended by the user as needed.
func | (t *testing.T, terraformOptions *terraform.Options, subscriptionID string) {
// Run `terraform output` to get the values of output variables.
resourceGroupName := terraform.Output(t, terraformOptions, "resource_group_name")
virtualMachineName := terraform.Output(t, terraformOptions, "vm_name")
expectedVMSize := compute.VirtualMachineSizeTypes(terraform.Output(t, terraformOptions, "vm_size"))
// 1. Check the VM Size directly. This strategy gets one specific property of the VM per method.
actualVMSize := azure.GetSizeOfVirtualMachine(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.Equal(t, expectedVMSize, actualVMSize)
// 2. Check the VM size by reference. This strategy is beneficial when checking multiple properties
// by using one VM reference. Optional parameters have to be checked first to avoid nil panics.
vmByRef := azure.GetVirtualMachine(t, virtualMachineName, resourceGroupName, subscriptionID)
actualVMSize = vmByRef.HardwareProfile.VMSize
assert.Equal(t, expectedVMSize, actualVMSize)
// 3. Check the VM size by instance. This strategy is beneficial when checking multiple properties
// by using one VM instance and making calls against it with the added benefit of property check abstraction.
vmInstance := azure.Instance{VirtualMachine: vmByRef}
actualVMSize = vmInstance.GetVirtualMachineInstanceSize()
assert.Equal(t, expectedVMSize, actualVMSize)
}
// These tests check for the multiple Virtual Machines in a Resource Group.
func testMultipleVMs(t *testing.T, terraformOptions *terraform.Options, subscriptionID string) {
// Run `terraform output` to get the values of output variables.
resourceGroupName := terraform.Output(t, terraformOptions, "resource_group_name")
expectedVMName := terraform.Output(t, terraformOptions, "vm_name")
expectedVMSize := compute.VirtualMachineSizeTypes(terraform.Output(t, terraformOptions, "vm_size"))
expectedAvsName := terraform.Output(t, terraformOptions, "availability_set_name")
// Check against all VM names in a Resource Group.
vmList := azure.ListVirtualMachinesForResourceGroup(t, resourceGroupName, subscriptionID)
expectedVMCount := 1
assert.Equal(t, expectedVMCount, len(vmList))
assert.Contains(t, vmList, expectedVMName)
// Check Availability Set for multiple VMs.
actualVMsInAvs := azure.GetAvailabilitySetVMNamesInCaps(t, expectedAvsName, resourceGroupName, subscriptionID)
assert.Contains(t, actualVMsInAvs, strings.ToUpper(expectedVMName))
// Get all VMs in a Resource Group, including their properties, therefore avoiding
// multiple SDK calls. The penalty for this approach is introducing direct references
// which need to be checked for nil for optional configurations.
vmsByRef := azure.GetVirtualMachinesForResourceGroup(t, resourceGroupName, subscriptionID)
thisVM := vmsByRef[expectedVMName]
assert.Equal(t, expectedVMSize, thisVM.HardwareProfile.VMSize)
// Check for the VM negative test.
fakeVM := fmt.Sprintf("vm-%s", random.UniqueId())
assert.Nil(t, vmsByRef[fakeVM].VMID)
}
// These tests check information directly related to the specified Azure Virtual Machine.
func testInformationOfVM(t *testing.T, terraformOptions *terraform.Options, subscriptionID string) {
// Run `terraform output` to get the values of output variables.
resourceGroupName := terraform.Output(t, terraformOptions, "resource_group_name")
virtualMachineName := terraform.Output(t, terraformOptions, "vm_name")
expectedVmAdminUser := terraform.OutputList(t, terraformOptions, "vm_admin_username")
expectedImageSKU := terraform.OutputList(t, terraformOptions, "vm_image_sku")
expectedImageVersion := terraform.OutputList(t, terraformOptions, "vm_image_version")
expectedAvsName := terraform.Output(t, terraformOptions, "availability_set_name")
// Check if the Virtual Machine exists.
assert.True(t, azure.VirtualMachineExists(t, virtualMachineName, resourceGroupName, subscriptionID))
// Check the Admin User of the VM.
actualVM := azure.GetVirtualMachine(t, virtualMachineName, resourceGroupName, subscriptionID)
actualVmAdminUser := *actualVM.OsProfile.AdminUsername
assert.Equal(t, expectedVmAdminUser[0], actualVmAdminUser)
// Check the Storage Image properties of the VM.
actualImage := azure.GetVirtualMachineImage(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.Contains(t, expectedImageSKU[0], actualImage.SKU)
assert.Contains(t, expectedImageVersion[0], actualImage.Version)
// Check the Availability Set of the VM.
// The AVS ID returned from the VM is always CAPS so ignoring case in the assertion.
actualexpectedAvsName := azure.GetVirtualMachineAvailabilitySetID(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.True(t, strings.EqualFold(expectedAvsName, actualexpectedAvsName))
}
// These tests check the OS Disk and Attached Managed Disks for the Azure Virtual Machine.
// The following Terratest Azure module is utilized in addition to the compute module:
// - disk
// See the terraform_azure_disk_example_test.go for other related tests.
func testDisksOfVM(t *testing.T, terraformOptions *terraform.Options, subscriptionID string) {
// Run `terraform output` to get the values of output variables.
resourceGroupName := terraform.Output(t, terraformOptions, "resource_group_name")
virtualMachineName := terraform.Output(t, terraformOptions, "vm_name")
expectedOSDiskName := terraform.Output(t, terraformOptions, "os_disk_name")
expectedDiskName := terraform.Output(t, terraformOptions, "managed_disk_name")
expectedDiskType := terraform.Output(t, terraformOptions, "managed_disk_type")
// Check the OS Disk name of the VM.
actualOSDiskName := azure.GetVirtualMachineOSDiskName(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.Equal(t, expectedOSDiskName, actualOSDiskName)
// Check the VM Managed Disk exists in the list of all VM Managed Disks.
actualManagedDiskNames := azure.GetVirtualMachineManagedDisks(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.Contains(t, actualManagedDiskNames, expectedDiskName)
// Check the Managed Disk count of the VM.
expectedManagedDiskCount := 1
assert.Equal(t, expectedManagedDiskCount, len(actualManagedDiskNames))
// Check the Disk Type of the Managed Disk of the VM.
// This does not apply to VHD disks saved under a storage account.
actualDisk := azure.GetDisk(t, expectedDiskName, resourceGroupName, subscriptionID)
actualDiskType := actualDisk.Sku.Name
assert.Equal(t, compute.DiskStorageAccountTypes(expectedDiskType), actualDiskType)
}
// These tests check the underlying Virtual Network, Network Interface and associated Public IP Address.
// The following Terratest Azure modules are utilized in addition to the compute module:
// - networkinterface
// - publicaddress
// - virtualnetwork
// See the terraform_azure_network_example_test.go for other related tests.
func testNetworkOfVM(t *testing.T, terraformOptions *terraform.Options, subscriptionID string) {
// Run `terraform output` to get the values of output variables.
resourceGroupName := terraform.Output(t, terraformOptions, "resource_group_name")
virtualMachineName := terraform.Output(t, terraformOptions, "vm_name")
expectedVNetName := terraform.Output(t, terraformOptions, "virtual_network_name")
expectedSubnetName := terraform.Output(t, terraformOptions, "subnet_name")
expectedPublicAddressName := terraform.Output(t, terraformOptions, "public_ip_name")
expectedNicName := terraform.Output(t, terraformOptions, "network_interface_name")
expectedPrivateIPAddress := terraform.Output(t, terraformOptions, "private_ip")
// VirtualNetwork and Subnet tests
// Check the Subnet exists in the Virtual Network.
actualVnetSubnets := azure.GetVirtualNetworkSubnets(t, expectedVNetName, resourceGroupName, subscriptionID)
assert.NotNil(t, actualVnetSubnets[expectedVNetName])
// Check the Private IP is in the Subnet Range.
actualVMNicIPInSubnet := azure.CheckSubnetContainsIP(t, expectedPrivateIPAddress, expectedSubnetName, expectedVNetName, resourceGroupName, subscriptionID)
assert.True(t, actualVMNicIPInSubnet)
// Network Interface Card tests
// Check the VM Network Interface exists in the list of all VM Network Interfaces.
actualNics := azure.GetVirtualMachineNics(t, virtualMachineName, resourceGroupName, subscriptionID)
assert.Contains(t, actualNics, expectedNicName)
// Check the Network Interface count of the VM.
expectedNICCount := 1
assert.Equal(t, expectedNICCount, len(actualNics))
// Check for the Private IP in the NICs IP list.
actualPrivateIPAddress := azure.GetNetworkInterfacePrivateIPs(t, expectedNicName, resourceGroupName, subscriptionID)
assert.Contains(t, actualPrivateIPAddress, expectedPrivateIPAddress)
// Public IP Address test
// Check for the Public IP for the NIC. No expected value since it is assigned runtime.
actualPublicIP := azure.GetIPOfPublicIPAddressByName(t, expectedPublicAddressName, resourceGroupName, subscriptionID)
assert.NotNil(t, actualPublicIP)
}
| testStrategiesForVMs |
qualif-indirect-mutation-fail.rs | // compile-flags: --crate-type=lib
#![feature(const_mut_refs)]
#![feature(const_precise_live_drops)]
#![feature(const_swap)]
#![feature(raw_ref_op)]
// Mutable borrow of a field with drop impl.
pub const fn f() {
let mut a: (u32, Option<String>) = (0, None); //~ ERROR destructors cannot be evaluated
let _ = &mut a.1;
}
// Mutable borrow of a type with drop impl.
pub const A1: () = {
let mut x = None; //~ ERROR destructors cannot be evaluated
let mut y = Some(String::new());
let a = &mut x;
let b = &mut y;
std::mem::swap(a, b);
std::mem::forget(y);
};
// Mutable borrow of a type with drop impl.
pub const A2: () = {
let mut x = None;
let mut y = Some(String::new());
let a = &mut x;
let b = &mut y;
std::mem::swap(a, b);
std::mem::forget(y);
let _z = x; //~ ERROR destructors cannot be evaluated
};
// Shared borrow of a type that might be !Freeze and Drop.
pub const fn g1<T>() |
// Shared borrow of a type that might be !Freeze and Drop.
pub const fn g2<T>() {
let x: Option<T> = None;
let _ = x.is_some();
let _y = x; //~ ERROR destructors cannot be evaluated
}
// Mutable raw reference to a Drop type.
pub const fn address_of_mut() {
let mut x: Option<String> = None; //~ ERROR destructors cannot be evaluated
&raw mut x;
let mut y: Option<String> = None; //~ ERROR destructors cannot be evaluated
std::ptr::addr_of_mut!(y);
}
// Const raw reference to a Drop type. Conservatively assumed to allow mutation
// until resolution of https://github.com/rust-lang/rust/issues/56604.
pub const fn address_of_const() {
let x: Option<String> = None; //~ ERROR destructors cannot be evaluated
&raw const x;
let y: Option<String> = None; //~ ERROR destructors cannot be evaluated
std::ptr::addr_of!(y);
}
| {
let x: Option<T> = None; //~ ERROR destructors cannot be evaluated
let _ = x.is_some();
} |
PriceField.tsx | import { InputAdornment, TextField, TextFieldProps } from "@material-ui/core";
import { InputProps } from "@material-ui/core/Input";
import { FormChange } from "@saleor/hooks/useForm";
import { makeStyles } from "@saleor/macaw-ui";
import React, { useMemo } from "react";
import { FormattedMessage } from "react-intl";
| import { findPriceSeparator, getCurrencyDecimalPoints } from "./utils";
const useStyles = makeStyles(
theme => ({
currencySymbol: {
fontSize: "0.875rem"
},
inputContainer: {
display: "grid",
gridTemplateColumns: "1fr 2rem 1fr"
},
pullDown: {
marginTop: theme.spacing(2)
},
separator: {
marginTop: theme.spacing(3),
textAlign: "center",
width: "100%"
},
widgetContainer: {
marginTop: theme.spacing(2)
}
}),
{ name: "PriceField" }
);
interface PriceFieldProps {
className?: string;
currencySymbol?: string;
disabled?: boolean;
error?: boolean;
hint?: string;
label?: string;
name?: string;
value?: string | number;
InputProps?: InputProps;
inputProps?: InputProps["inputProps"];
required?: boolean;
onChange(event: any);
}
export const PriceField: React.FC<PriceFieldProps> = props => {
const {
className,
disabled,
error,
label,
hint = "",
currencySymbol,
name,
onChange,
required,
value,
InputProps,
inputProps
} = props;
const classes = useStyles(props);
const minValue = 0;
const maxDecimalLength = useMemo(
() => getCurrencyDecimalPoints(currencySymbol),
[currencySymbol]
);
const handleChange: FormChange = e => {
let value = e.target.value;
const splitCharacter = findPriceSeparator(value);
const [integerPart, decimalPart] = value.split(splitCharacter);
if (maxDecimalLength === 0 && decimalPart) {
// this shouldn't happen - decimal character should be ignored
value = integerPart;
}
if (decimalPart?.length > maxDecimalLength) {
const shortenedDecimalPart = decimalPart.slice(0, maxDecimalLength);
value = `${integerPart}${splitCharacter}${shortenedDecimalPart}`;
}
onChange({
target: {
name: e.target.name,
value
}
});
};
const handleKeyPress: TextFieldProps["onKeyDown"] = e => {
// disallow entering e (exponent)
if (e.key === "e" || e.key === "E") {
e.preventDefault();
}
// ignore separator input when currency doesn't support decimal values
if (
maxDecimalLength === 0 &&
SEPARATOR_CHARACTERS.some(separator => e.key === separator)
) {
e.preventDefault();
}
};
return (
<TextField
className={className}
error={error || value < minValue}
helperText={
hint ? (
hint
) : value < minValue ? (
<FormattedMessage
id="WHkx+F"
defaultMessage="Price cannot be lower than 0"
/>
) : (
""
)
}
label={label}
fullWidth
value={value}
InputProps={{
...InputProps,
endAdornment: currencySymbol ? (
<InputAdornment position="end" className={classes.currencySymbol}>
{currencySymbol}
</InputAdornment>
) : (
<span />
),
inputProps: {
min: 0,
step: 1 / Math.pow(10, maxDecimalLength),
...InputProps?.inputProps
},
type: "number"
}}
inputProps={{
min: minValue,
type: "number",
...inputProps
}}
name={name}
disabled={disabled}
required={required}
onChange={handleChange}
onKeyDown={handleKeyPress}
/>
);
};
PriceField.defaultProps = {
name: "price"
};
PriceField.displayName = "PriceField";
export default PriceField; | import { SEPARATOR_CHARACTERS } from "./consts"; |
app.module.ts | import { Module } from '@nestjs/common';
import { UsersModule } from './users/users.module';
import { ReportsModule } from './reports/reports.module';
import { PrismaModule } from './prisma/prisma.module';
@Module({
imports: [UsersModule, ReportsModule, PrismaModule],
controllers: [], | providers: [],
})
export class AppModule {} | |
server.go | package main
import (
"crypto/md5"
"crypto/tls"
"encoding/base64"
"encoding/hex"
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"strconv"
"strings"
discoverd "github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/keepalive"
"github.com/flynn/flynn/pkg/shutdown"
"github.com/inconshreveable/log15"
)
var logger = log15.New()
func init() {
if os.Getenv("DEBUG") == "" {
// filter debug log messages if DEBUG is not set
logger.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, log15.StdoutHandler))
}
}
type Listener interface {
Start() error
Close() error
Watcher
}
type Router struct {
HTTP Listener
TCP Listener
}
func (s *Router) ListenerFor(typ string) Listener {
switch typ {
case "http":
return s.HTTP
case "tcp":
return s.TCP
default:
return nil
}
}
func (s *Router) Start() error {
log := logger.New("fn", "Start")
log.Info("starting HTTP listener")
if err := s.HTTP.Start(); err != nil {
log.Error("error starting HTTP listener", "err", err)
return err
}
log.Info("starting TCP listener")
if err := s.TCP.Start(); err != nil {
log.Error("error starting TCP listener", "err", err)
s.HTTP.Close()
return err
}
return nil
}
func (s *Router) Close() {
s.HTTP.Close()
s.TCP.Close()
}
var listenFunc = keepalive.ReusableListen
func main() {
defer shutdown.Exit()
log := logger.New("fn", "main")
var cookieKey *[32]byte
if key := os.Getenv("COOKIE_KEY"); key != "" {
res, err := base64.StdEncoding.DecodeString(key)
if err != nil {
shutdown.Fatalf("error decoding COOKIE_KEY: %s", err)
}
if len(res) != 32 {
shutdown.Fatalf("decoded %d bytes from COOKIE_KEY, expected 32", len(res))
}
var k [32]byte
copy(k[:], res)
cookieKey = &k
}
if cookieKey == nil {
shutdown.Fatal("Missing random 32 byte base64-encoded COOKIE_KEY")
}
proxyProtocol := os.Getenv("PROXY_PROTOCOL") == "true"
legacyTLS := os.Getenv("LEGACY_TLS") == "true"
if !legacyTLS {
// Enable TLS 1.3
os.Setenv("GODEBUG", os.Getenv("GODEBUG")+",tls13=1")
}
httpPort := flag.Int("http-port", 8080, "default http listen port")
httpsPort := flag.Int("https-port", 4433, "default https listen port")
tcpIP := flag.String("tcp-ip", os.Getenv("LISTEN_IP"), "tcp router listen ip")
tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start")
tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end")
certFile := flag.String("tls-cert", "", "TLS (SSL) cert file in pem format")
keyFile := flag.String("tls-key", "", "TLS (SSL) key file in pem format")
apiPort := flag.String("api-port", "", "api listen port")
flag.Parse()
httpPorts := []int{*httpPort}
httpsPorts := []int{*httpsPort}
if portRaw := os.Getenv("DEFAULT_HTTP_PORT"); portRaw != "" {
if port, err := strconv.Atoi(portRaw); err != nil {
shutdown.Fatalf("Invalid DEFAULT_HTTP_PORTS: %s", err)
} else if port == 0 {
log.Warn("Disabling HTTP acccess (DEFAULT_HTTP_PORT=0)")
httpPorts = nil
} else {
httpPorts[0] = port
}
}
if portRaw := os.Getenv("DEFAULT_HTTPS_PORT"); portRaw != "" {
if port, err := strconv.Atoi(portRaw); err != nil {
shutdown.Fatalf("Invalid DEFAULT_HTTPS_PORTS: %s", err)
} else if port == 0 {
shutdown.Fatal("Cannot disable HTTPS access (DEFAULT_HTTPS_PORT=0)")
} else {
httpsPorts[0] = port
}
}
defaultPorts := append(httpPorts, httpsPorts...)
if added := os.Getenv("ADDITIONAL_HTTP_PORTS"); added != "" {
for _, raw := range strings.Split(added, ",") {
if port, err := strconv.Atoi(raw); err == nil {
httpPorts = append(httpPorts, port)
} else {
shutdown.Fatal(err)
}
}
}
if added := os.Getenv("ADDITIONAL_HTTPS_PORTS"); added != "" {
for _, raw := range strings.Split(added, ",") {
if port, err := strconv.Atoi(raw); err == nil {
httpsPorts = append(httpsPorts, port)
} else {
shutdown.Fatal(err)
}
}
}
if *apiPort == "" {
*apiPort = os.Getenv("PORT")
if *apiPort == "" {
*apiPort = "5000"
}
}
keypair := tls.Certificate{}
var err error
if *certFile != "" {
if keypair, err = tls.LoadX509KeyPair(*certFile, *keyFile); err != nil {
shutdown.Fatal(err)
}
} else if tlsCert := os.Getenv("TLSCERT"); tlsCert != "" {
if tlsKey := os.Getenv("TLSKEY"); tlsKey != "" {
os.Setenv("TLSKEY", fmt.Sprintf("md5^(%s)", md5sum(tlsKey)))
if keypair, err = tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)); err != nil {
shutdown.Fatal(err)
}
}
}
var error503Page []byte
if error503PageURL := os.Getenv("ERROR_503_PAGE_URL"); error503PageURL != "" {
func() {
res, err := http.Get(error503PageURL)
if err != nil {
log.Error("error getting ERROR_503_PAGE_URL", "err", err)
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Error("unexpected status code getting ERROR_503_PAGE_URL", "status", res.StatusCode)
return
}
error503Page, err = ioutil.ReadAll(&io.LimitedReader{R: res.Body, N: 1000000})
if err != nil {
log.Error("error reading ERROR_503_PAGE_URL", "err", err)
return
}
return
}()
}
log.Info("initializing the controller route store")
store, err := NewControllerStore()
if err != nil {
log.Error("error initializing the controller route store", "err", err)
shutdown.Fatal(err)
}
var httpAddrs []string
var httpsAddrs []string
var reservedPorts []int
for _, port := range httpPorts {
httpAddrs = append(httpAddrs, net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(port)))
reservedPorts = append(reservedPorts, port)
}
for _, port := range httpsPorts {
httpsAddrs = append(httpsAddrs, net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(port)))
reservedPorts = append(reservedPorts, port)
}
r := Router{
TCP: &TCPListener{
IP: *tcpIP,
startPort: *tcpRangeStart,
endPort: *tcpRangeEnd,
syncer: NewSyncer(store, "tcp"),
discoverd: discoverd.DefaultClient,
reservedPorts: reservedPorts,
},
HTTP: &HTTPListener{
Addrs: httpAddrs,
TLSAddrs: httpsAddrs,
LegacyTLSVersions: legacyTLS,
defaultPorts: defaultPorts,
cookieKey: cookieKey,
keypair: keypair,
syncer: NewSyncer(store, "http"),
discoverd: discoverd.DefaultClient,
proxyProtocol: proxyProtocol,
error503Page: error503Page,
},
}
if err := r.Start(); err != nil {
shutdown.Fatal(err)
}
shutdown.BeforeExit(r.Close)
apiAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *apiPort)
log.Info("starting API listener")
listener, err := listenFunc("tcp4", apiAddr)
if err != nil {
log.Error("error starting API listener", "err", err)
shutdown.Fatal(listenErr{apiAddr, err})
}
httpAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(httpPorts[0]))
services := map[string]string{
"router-api": apiAddr,
"router-http": httpAddr,
}
for service, addr := range services {
log.Info("registering service", "name", service, "addr", addr)
hb, err := discoverd.AddServiceAndRegister(service, addr)
if err != nil {
log.Error("error registering service", "name", service, "addr", addr, "err", err)
shutdown.Fatal(err)
}
shutdown.BeforeExit(func() { hb.Close() })
}
log.Info("serving API requests") | shutdown.Fatal(http.Serve(listener, apiHandler(&r)))
}
type listenErr struct {
Addr string
Err error
}
func (e listenErr) Error() string {
return fmt.Sprintf("error binding to port (check if another service is listening on %s): %s", e.Addr, e.Err)
}
func md5sum(data string) string {
digest := md5.Sum([]byte(data))
return hex.EncodeToString(digest[:])
} | |
utf8.rs | use super::*;
pub struct Utf8ChunkedBuilder {
pub builder: MutableUtf8Array<i64>,
pub capacity: usize,
field: Field,
}
impl Utf8ChunkedBuilder {
/// Create a new UtfChunkedBuilder
///
/// # Arguments
///
/// * `capacity` - Number of string elements in the final array.
/// * `bytes_capacity` - Number of bytes needed to store the string values.
pub fn new(name: &str, capacity: usize, bytes_capacity: usize) -> Self {
Utf8ChunkedBuilder {
builder: MutableUtf8Array::<i64>::with_capacities(capacity, bytes_capacity),
capacity,
field: Field::new(name, DataType::Utf8),
}
}
/// Appends a value of type `T` into the builder
#[inline]
pub fn append_value<S: AsRef<str>>(&mut self, v: S) {
self.builder.push(Some(v.as_ref()));
}
/// Appends a null slot into the builder
#[inline]
pub fn append_null(&mut self) {
self.builder.push::<&str>(None);
}
#[inline]
pub fn | <S: AsRef<str>>(&mut self, opt: Option<S>) {
self.builder.push(opt);
}
pub fn finish(self) -> Utf8Chunked {
let arr = self.builder.into_arc();
ChunkedArray {
field: Arc::new(self.field),
chunks: vec![arr],
phantom: PhantomData,
categorical_map: None,
..Default::default()
}
}
fn shrink_to_fit(&mut self) {
self.builder.shrink_to_fit()
}
}
pub struct Utf8ChunkedBuilderCow {
builder: Utf8ChunkedBuilder,
}
impl Utf8ChunkedBuilderCow {
pub fn new(name: &str, capacity: usize) -> Self {
Utf8ChunkedBuilderCow {
builder: Utf8ChunkedBuilder::new(name, capacity, capacity),
}
}
}
impl ChunkedBuilder<Cow<'_, str>, Utf8Type> for Utf8ChunkedBuilderCow {
#[inline]
fn append_value(&mut self, val: Cow<'_, str>) {
self.builder.append_value(val.as_ref())
}
#[inline]
fn append_null(&mut self) {
self.builder.append_null()
}
fn finish(self) -> ChunkedArray<Utf8Type> {
self.builder.finish()
}
fn shrink_to_fit(&mut self) {
self.builder.shrink_to_fit()
}
}
| append_option |
modernizr-2.6.2.js | /* NUGET: BEGIN LICENSE TEXT
*
* Microsoft grants you the right to use these script files for the sole
* purpose of either: (i) interacting through your browser with the Microsoft
* website or online service, subject to the applicable licensing or use
* terms; or (ii) using the files as included with a Microsoft product subject
* to that product's license terms. Microsoft reserves all other rights to the
* files not expressly granted by Microsoft, whether by implication, estoppel
* or otherwise. Insofar as a script file is dual licensed under GPL,
* Microsoft neither took the code under GPL nor distributes it thereunder but
* under the terms set out in this paragraph. All notices and licenses
* below are for informational purposes only.
*
* Copyright (c) Faruk Ates, Paul Irish, Alex Sexton; http://www.modernizr.com/license/
*
* Includes matchMedia polyfill; Copyright (c) 2010 Filament Group, Inc; http://opensource.org/licenses/MIT
*
* Includes material adapted from ES5-shim https://github.com/kriskowal/es5-shim/blob/master/es5-shim.js; Copyright 2009-2012 by contributors; http://opensource.org/licenses/MIT
*
* Includes material from css-support; Copyright (c) 2005-2012 Diego Perini; https://github.com/dperini/css-support/blob/master/LICENSE
*
* NUGET: END LICENSE TEXT */
/*!
* Modernizr v2.6.2
* www.modernizr.com
*
* Copyright (c) Faruk Ates, Paul Irish, Alex Sexton
* Available under the BSD and MIT licenses: www.modernizr.com/license/
*/
/*
* Modernizr tests which native CSS3 and HTML5 features are available in
* the current UA and makes the results available to you in two ways:
* as properties on a global Modernizr object, and as classes on the
* <html> element. This information allows you to progressively enhance
* your pages with a granular level of control over the experience.
*
* Modernizr has an optional (not included) conditional resource loader
* called Modernizr.load(), based on Yepnope.js (yepnopejs.com).
* To get a build that includes Modernizr.load(), as well as choosing
* which tests to include, go to www.modernizr.com/download/
*
* Authors Faruk Ates, Paul Irish, Alex Sexton
* Contributors Ryan Seddon, Ben Alman
*/
window.Modernizr = (function( window, document, undefined ) {
var version = '2.6.2',
Modernizr = {},
/*>>cssclasses*/
// option for enabling the HTML classes to be added
enableClasses = true,
/*>>cssclasses*/
docElement = document.documentElement,
/**
* Create our "modernizr" element that we do most feature tests on.
*/
mod = 'modernizr',
modElem = document.createElement(mod),
mStyle = modElem.style,
/**
* Create the input element for various Web Forms feature tests.
*/
inputElem /*>>inputelem*/ = document.createElement('input') /*>>inputelem*/ ,
/*>>smile*/
smile = ':)',
/*>>smile*/
toString = {}.toString,
// TODO :: make the prefixes more granular
/*>>prefixes*/
// List of property values to set for css tests. See ticket #21
prefixes = ' -webkit- -moz- -o- -ms- '.split(' '),
/*>>prefixes*/
/*>>domprefixes*/
// Following spec is to expose vendor-specific style properties as:
// elem.style.WebkitBorderRadius
// and the following would be incorrect:
// elem.style.webkitBorderRadius
// Webkit ghosts their properties in lowercase but Opera & Moz do not.
// Microsoft uses a lowercase `ms` instead of the correct `Ms` in IE8+
// erik.eae.net/archives/2008/03/10/21.48.10/
// More here: github.com/Modernizr/Modernizr/issues/issue/21
omPrefixes = 'Webkit Moz O ms',
cssomPrefixes = omPrefixes.split(' '),
domPrefixes = omPrefixes.toLowerCase().split(' '),
/*>>domprefixes*/
/*>>ns*/
ns = {'svg': 'http://www.w3.org/2000/svg'},
/*>>ns*/
tests = {},
inputs = {},
attrs = {},
classes = [],
slice = classes.slice,
featureName, // used in testing loop
/*>>teststyles*/
// Inject element with style element and some CSS rules
injectElementWithStyles = function( rule, callback, nodes, testnames ) {
var style, ret, node, docOverflow,
div = document.createElement('div'),
// After page load injecting a fake body doesn't work so check if body exists
body = document.body,
// IE6 and 7 won't return offsetWidth or offsetHeight unless it's in the body element, so we fake it.
fakeBody = body || document.createElement('body');
if ( parseInt(nodes, 10) ) {
// In order not to give false positives we create a node for each test
// This also allows the method to scale for unspecified uses
while ( nodes-- ) {
node = document.createElement('div');
node.id = testnames ? testnames[nodes] : mod + (nodes + 1);
div.appendChild(node);
}
}
// <style> elements in IE6-9 are considered 'NoScope' elements and therefore will be removed
// when injected with innerHTML. To get around this you need to prepend the 'NoScope' element
// with a 'scoped' element, in our case the soft-hyphen entity as it won't mess with our measurements.
// msdn.microsoft.com/en-us/library/ms533897%28VS.85%29.aspx
// Documents served as xml will throw if using ­ so use xml friendly encoded version. See issue #277
style = ['­','<style id="s', mod, '">', rule, '</style>'].join('');
div.id = mod;
// IE6 will false positive on some tests due to the style element inside the test div somehow interfering offsetHeight, so insert it into body or fakebody.
// Opera will act all quirky when injecting elements in documentElement when page is served as xml, needs fakebody too. #270
(body ? div : fakeBody).innerHTML += style;
fakeBody.appendChild(div);
if ( !body ) {
//avoid crashing IE8, if background image is used
fakeBody.style.background = '';
//Safari 5.13/5.1.4 OSX stops loading if ::-webkit-scrollbar is used and scrollbars are visible
fakeBody.style.overflow = 'hidden';
docOverflow = docElement.style.overflow;
docElement.style.overflow = 'hidden';
docElement.appendChild(fakeBody);
}
ret = callback(div, rule);
// If this is done after page load we don't want to remove the body so check if body exists
if ( !body ) {
fakeBody.parentNode.removeChild(fakeBody);
docElement.style.overflow = docOverflow;
} else {
div.parentNode.removeChild(div);
}
return !!ret;
},
/*>>teststyles*/
/*>>mq*/
// adapted from matchMedia polyfill
// by Scott Jehl and Paul Irish
// gist.github.com/786768
testMediaQuery = function( mq ) {
var matchMedia = window.matchMedia || window.msMatchMedia;
if ( matchMedia ) {
return matchMedia(mq).matches;
}
var bool;
injectElementWithStyles('@media ' + mq + ' { #' + mod + ' { position: absolute; } }', function( node ) {
bool = (window.getComputedStyle ?
getComputedStyle(node, null) :
node.currentStyle)['position'] == 'absolute';
});
return bool;
},
/*>>mq*/
/*>>hasevent*/
//
// isEventSupported determines if a given element supports the given event
// kangax.github.com/iseventsupported/
//
// The following results are known incorrects:
// Modernizr.hasEvent("webkitTransitionEnd", elem) // false negative
// Modernizr.hasEvent("textInput") // in Webkit. github.com/Modernizr/Modernizr/issues/333
// ...
isEventSupported = (function() {
var TAGNAMES = {
'select': 'input', 'change': 'input',
'submit': 'form', 'reset': 'form',
'error': 'img', 'load': 'img', 'abort': 'img'
};
function isEventSupported( eventName, element ) {
element = element || document.createElement(TAGNAMES[eventName] || 'div');
eventName = 'on' + eventName;
// When using `setAttribute`, IE skips "unload", WebKit skips "unload" and "resize", whereas `in` "catches" those
var isSupported = eventName in element;
if ( !isSupported ) {
// If it has no `setAttribute` (i.e. doesn't implement Node interface), try generic element
if ( !element.setAttribute ) {
element = document.createElement('div');
}
if ( element.setAttribute && element.removeAttribute ) {
element.setAttribute(eventName, '');
isSupported = is(element[eventName], 'function');
// If property was created, "remove it" (by setting value to `undefined`)
if ( !is(element[eventName], 'undefined') ) {
element[eventName] = undefined;
}
element.removeAttribute(eventName);
}
}
element = null;
return isSupported;
}
return isEventSupported;
})(),
/*>>hasevent*/
// TODO :: Add flag for hasownprop ? didn't last time
// hasOwnProperty shim by kangax needed for Safari 2.0 support
_hasOwnProperty = ({}).hasOwnProperty, hasOwnProp;
if ( !is(_hasOwnProperty, 'undefined') && !is(_hasOwnProperty.call, 'undefined') ) {
hasOwnProp = function (object, property) {
return _hasOwnProperty.call(object, property);
};
}
else {
hasOwnProp = function (object, property) { /* yes, this can give false positives/negatives, but most of the time we don't care about those */
return ((property in object) && is(object.constructor.prototype[property], 'undefined'));
};
}
// Adapted from ES5-shim https://github.com/kriskowal/es5-shim/blob/master/es5-shim.js
// es5.github.com/#x15.3.4.5
if (!Function.prototype.bind) {
Function.prototype.bind = function bind(that) {
var target = this;
if (typeof target != "function") {
throw new TypeError();
}
var args = slice.call(arguments, 1),
bound = function () {
if (this instanceof bound) {
var F = function(){};
F.prototype = target.prototype;
var self = new F();
var result = target.apply(
self,
args.concat(slice.call(arguments))
);
if (Object(result) === result) {
return result;
}
return self;
} else {
return target.apply(
that,
args.concat(slice.call(arguments))
);
}
};
return bound;
};
}
/**
* setCss applies given styles to the Modernizr DOM node.
*/
function setCss( str ) {
mStyle.cssText = str;
}
/**
* setCssAll extrapolates all vendor-specific css strings.
*/
function setCssAll( str1, str2 ) {
return setCss(prefixes.join(str1 + ';') + ( str2 || '' ));
}
/**
* is returns a boolean for if typeof obj is exactly type.
*/
function | ( obj, type ) {
return typeof obj === type;
}
/**
* contains returns a boolean for if substr is found within str.
*/
function contains( str, substr ) {
return !!~('' + str).indexOf(substr);
}
/*>>testprop*/
// testProps is a generic CSS / DOM property test.
// In testing support for a given CSS property, it's legit to test:
// `elem.style[styleName] !== undefined`
// If the property is supported it will return an empty string,
// if unsupported it will return undefined.
// We'll take advantage of this quick test and skip setting a style
// on our modernizr element, but instead just testing undefined vs
// empty string.
// Because the testing of the CSS property names (with "-", as
// opposed to the camelCase DOM properties) is non-portable and
// non-standard but works in WebKit and IE (but not Gecko or Opera),
// we explicitly reject properties with dashes so that authors
// developing in WebKit or IE first don't end up with
// browser-specific content by accident.
function testProps( props, prefixed ) {
for ( var i in props ) {
var prop = props[i];
if ( !contains(prop, "-") && mStyle[prop] !== undefined ) {
return prefixed == 'pfx' ? prop : true;
}
}
return false;
}
/*>>testprop*/
// TODO :: add testDOMProps
/**
* testDOMProps is a generic DOM property test; if a browser supports
* a certain property, it won't return undefined for it.
*/
function testDOMProps( props, obj, elem ) {
for ( var i in props ) {
var item = obj[props[i]];
if ( item !== undefined) {
// return the property name as a string
if (elem === false) return props[i];
// let's bind a function
if (is(item, 'function')){
// default to autobind unless override
return item.bind(elem || obj);
}
// return the unbound function or obj or value
return item;
}
}
return false;
}
/*>>testallprops*/
/**
* testPropsAll tests a list of DOM properties we want to check against.
* We specify literally ALL possible (known and/or likely) properties on
* the element including the non-vendor prefixed one, for forward-
* compatibility.
*/
function testPropsAll( prop, prefixed, elem ) {
var ucProp = prop.charAt(0).toUpperCase() + prop.slice(1),
props = (prop + ' ' + cssomPrefixes.join(ucProp + ' ') + ucProp).split(' ');
// did they call .prefixed('boxSizing') or are we just testing a prop?
if(is(prefixed, "string") || is(prefixed, "undefined")) {
return testProps(props, prefixed);
// otherwise, they called .prefixed('requestAnimationFrame', window[, elem])
} else {
props = (prop + ' ' + (domPrefixes).join(ucProp + ' ') + ucProp).split(' ');
return testDOMProps(props, prefixed, elem);
}
}
/*>>testallprops*/
/**
* Tests
* -----
*/
// The *new* flexbox
// dev.w3.org/csswg/css3-flexbox
tests['flexbox'] = function() {
return testPropsAll('flexWrap');
};
// The *old* flexbox
// www.w3.org/TR/2009/WD-css3-flexbox-20090723/
tests['flexboxlegacy'] = function() {
return testPropsAll('boxDirection');
};
// On the S60 and BB Storm, getContext exists, but always returns undefined
// so we actually have to call getContext() to verify
// github.com/Modernizr/Modernizr/issues/issue/97/
tests['canvas'] = function() {
var elem = document.createElement('canvas');
return !!(elem.getContext && elem.getContext('2d'));
};
tests['canvastext'] = function() {
return !!(Modernizr['canvas'] && is(document.createElement('canvas').getContext('2d').fillText, 'function'));
};
// webk.it/70117 is tracking a legit WebGL feature detect proposal
// We do a soft detect which may false positive in order to avoid
// an expensive context creation: bugzil.la/732441
tests['webgl'] = function() {
return !!window.WebGLRenderingContext;
};
/*
* The Modernizr.touch test only indicates if the browser supports
* touch events, which does not necessarily reflect a touchscreen
* device, as evidenced by tablets running Windows 7 or, alas,
* the Palm Pre / WebOS (touch) phones.
*
* Additionally, Chrome (desktop) used to lie about its support on this,
* but that has since been rectified: crbug.com/36415
*
* We also test for Firefox 4 Multitouch Support.
*
* For more info, see: modernizr.github.com/Modernizr/touch.html
*/
tests['touch'] = function() {
var bool;
if(('ontouchstart' in window) || window.DocumentTouch && document instanceof DocumentTouch) {
bool = true;
} else {
injectElementWithStyles(['@media (',prefixes.join('touch-enabled),('),mod,')','{#modernizr{top:9px;position:absolute}}'].join(''), function( node ) {
bool = node.offsetTop === 9;
});
}
return bool;
};
// geolocation is often considered a trivial feature detect...
// Turns out, it's quite tricky to get right:
//
// Using !!navigator.geolocation does two things we don't want. It:
// 1. Leaks memory in IE9: github.com/Modernizr/Modernizr/issues/513
// 2. Disables page caching in WebKit: webk.it/43956
//
// Meanwhile, in Firefox < 8, an about:config setting could expose
// a false positive that would throw an exception: bugzil.la/688158
tests['geolocation'] = function() {
return 'geolocation' in navigator;
};
tests['postmessage'] = function() {
return !!window.postMessage;
};
// Chrome incognito mode used to throw an exception when using openDatabase
// It doesn't anymore.
tests['websqldatabase'] = function() {
return !!window.openDatabase;
};
// Vendors had inconsistent prefixing with the experimental Indexed DB:
// - Webkit's implementation is accessible through webkitIndexedDB
// - Firefox shipped moz_indexedDB before FF4b9, but since then has been mozIndexedDB
// For speed, we don't test the legacy (and beta-only) indexedDB
tests['indexedDB'] = function() {
return !!testPropsAll("indexedDB", window);
};
// documentMode logic from YUI to filter out IE8 Compat Mode
// which false positives.
tests['hashchange'] = function() {
return isEventSupported('hashchange', window) && (document.documentMode === undefined || document.documentMode > 7);
};
// Per 1.6:
// This used to be Modernizr.historymanagement but the longer
// name has been deprecated in favor of a shorter and property-matching one.
// The old API is still available in 1.6, but as of 2.0 will throw a warning,
// and in the first release thereafter disappear entirely.
tests['history'] = function() {
return !!(window.history && history.pushState);
};
tests['draganddrop'] = function() {
var div = document.createElement('div');
return ('draggable' in div) || ('ondragstart' in div && 'ondrop' in div);
};
// FF3.6 was EOL'ed on 4/24/12, but the ESR version of FF10
// will be supported until FF19 (2/12/13), at which time, ESR becomes FF17.
// FF10 still uses prefixes, so check for it until then.
// for more ESR info, see: mozilla.org/en-US/firefox/organizations/faq/
tests['websockets'] = function() {
return 'WebSocket' in window || 'MozWebSocket' in window;
};
// css-tricks.com/rgba-browser-support/
tests['rgba'] = function() {
// Set an rgba() color and check the returned value
setCss('background-color:rgba(150,255,150,.5)');
return contains(mStyle.backgroundColor, 'rgba');
};
tests['hsla'] = function() {
// Same as rgba(), in fact, browsers re-map hsla() to rgba() internally,
// except IE9 who retains it as hsla
setCss('background-color:hsla(120,40%,100%,.5)');
return contains(mStyle.backgroundColor, 'rgba') || contains(mStyle.backgroundColor, 'hsla');
};
tests['multiplebgs'] = function() {
// Setting multiple images AND a color on the background shorthand property
// and then querying the style.background property value for the number of
// occurrences of "url(" is a reliable method for detecting ACTUAL support for this!
setCss('background:url(https://),url(https://),red url(https://)');
// If the UA supports multiple backgrounds, there should be three occurrences
// of the string "url(" in the return value for elemStyle.background
return (/(url\s*\(.*?){3}/).test(mStyle.background);
};
// this will false positive in Opera Mini
// github.com/Modernizr/Modernizr/issues/396
tests['backgroundsize'] = function() {
return testPropsAll('backgroundSize');
};
tests['borderimage'] = function() {
return testPropsAll('borderImage');
};
// Super comprehensive table about all the unique implementations of
// border-radius: muddledramblings.com/table-of-css3-border-radius-compliance
tests['borderradius'] = function() {
return testPropsAll('borderRadius');
};
// WebOS unfortunately false positives on this test.
tests['boxshadow'] = function() {
return testPropsAll('boxShadow');
};
// FF3.0 will false positive on this test
tests['textshadow'] = function() {
return document.createElement('div').style.textShadow === '';
};
tests['opacity'] = function() {
// Browsers that actually have CSS Opacity implemented have done so
// according to spec, which means their return values are within the
// range of [0.0,1.0] - including the leading zero.
setCssAll('opacity:.55');
// The non-literal . in this regex is intentional:
// German Chrome returns this value as 0,55
// github.com/Modernizr/Modernizr/issues/#issue/59/comment/516632
return (/^0.55$/).test(mStyle.opacity);
};
// Note, Android < 4 will pass this test, but can only animate
// a single property at a time
// daneden.me/2011/12/putting-up-with-androids-bullshit/
tests['cssanimations'] = function() {
return testPropsAll('animationName');
};
tests['csscolumns'] = function() {
return testPropsAll('columnCount');
};
tests['cssgradients'] = function() {
/**
* For CSS Gradients syntax, please see:
* webkit.org/blog/175/introducing-css-gradients/
* developer.mozilla.org/en/CSS/-moz-linear-gradient
* developer.mozilla.org/en/CSS/-moz-radial-gradient
* dev.w3.org/csswg/css3-images/#gradients-
*/
var str1 = 'background-image:',
str2 = 'gradient(linear,left top,right bottom,from(#9f9),to(white));',
str3 = 'linear-gradient(left top,#9f9, white);';
setCss(
// legacy webkit syntax (FIXME: remove when syntax not in use anymore)
(str1 + '-webkit- '.split(' ').join(str2 + str1) +
// standard syntax // trailing 'background-image:'
prefixes.join(str3 + str1)).slice(0, -str1.length)
);
return contains(mStyle.backgroundImage, 'gradient');
};
tests['cssreflections'] = function() {
return testPropsAll('boxReflect');
};
tests['csstransforms'] = function() {
return !!testPropsAll('transform');
};
tests['csstransforms3d'] = function() {
var ret = !!testPropsAll('perspective');
// Webkit's 3D transforms are passed off to the browser's own graphics renderer.
// It works fine in Safari on Leopard and Snow Leopard, but not in Chrome in
// some conditions. As a result, Webkit typically recognizes the syntax but
// will sometimes throw a false positive, thus we must do a more thorough check:
if ( ret && 'webkitPerspective' in docElement.style ) {
// Webkit allows this media query to succeed only if the feature is enabled.
// `@media (transform-3d),(-webkit-transform-3d){ ... }`
injectElementWithStyles('@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}', function( node, rule ) {
ret = node.offsetLeft === 9 && node.offsetHeight === 3;
});
}
return ret;
};
tests['csstransitions'] = function() {
return testPropsAll('transition');
};
/*>>fontface*/
// @font-face detection routine by Diego Perini
// javascript.nwbox.com/CSSSupport/
// false positives:
// WebOS github.com/Modernizr/Modernizr/issues/342
// WP7 github.com/Modernizr/Modernizr/issues/538
tests['fontface'] = function() {
var bool;
injectElementWithStyles('@font-face {font-family:"font";src:url("https://")}', function( node, rule ) {
var style = document.getElementById('smodernizr'),
sheet = style.sheet || style.styleSheet,
cssText = sheet ? (sheet.cssRules && sheet.cssRules[0] ? sheet.cssRules[0].cssText : sheet.cssText || '') : '';
bool = /src/i.test(cssText) && cssText.indexOf(rule.split(' ')[0]) === 0;
});
return bool;
};
/*>>fontface*/
// CSS generated content detection
tests['generatedcontent'] = function() {
var bool;
injectElementWithStyles(['#',mod,'{font:0/0 a}#',mod,':after{content:"',smile,'";visibility:hidden;font:3px/1 a}'].join(''), function( node ) {
bool = node.offsetHeight >= 3;
});
return bool;
};
// These tests evaluate support of the video/audio elements, as well as
// testing what types of content they support.
//
// We're using the Boolean constructor here, so that we can extend the value
// e.g. Modernizr.video // true
// Modernizr.video.ogg // 'probably'
//
// Codec values from : github.com/NielsLeenheer/html5test/blob/9106a8/index.html#L845
// thx to NielsLeenheer and zcorpan
// Note: in some older browsers, "no" was a return value instead of empty string.
// It was live in FF3.5.0 and 3.5.1, but fixed in 3.5.2
// It was also live in Safari 4.0.0 - 4.0.4, but fixed in 4.0.5
tests['video'] = function() {
var elem = document.createElement('video'),
bool = false;
// IE9 Running on Windows Server SKU can cause an exception to be thrown, bug #224
try {
if ( bool = !!elem.canPlayType ) {
bool = new Boolean(bool);
bool.ogg = elem.canPlayType('video/ogg; codecs="theora"') .replace(/^no$/,'');
// Without QuickTime, this value will be `undefined`. github.com/Modernizr/Modernizr/issues/546
bool.h264 = elem.canPlayType('video/mp4; codecs="avc1.42E01E"') .replace(/^no$/,'');
bool.webm = elem.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,'');
}
} catch(e) { }
return bool;
};
tests['audio'] = function() {
var elem = document.createElement('audio'),
bool = false;
try {
if ( bool = !!elem.canPlayType ) {
bool = new Boolean(bool);
bool.ogg = elem.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,'');
bool.mp3 = elem.canPlayType('audio/mpeg;') .replace(/^no$/,'');
// Mimetypes accepted:
// developer.mozilla.org/En/Media_formats_supported_by_the_audio_and_video_elements
// bit.ly/iphoneoscodecs
bool.wav = elem.canPlayType('audio/wav; codecs="1"') .replace(/^no$/,'');
bool.m4a = ( elem.canPlayType('audio/x-m4a;') ||
elem.canPlayType('audio/aac;')) .replace(/^no$/,'');
}
} catch(e) { }
return bool;
};
// In FF4, if disabled, window.localStorage should === null.
// Normally, we could not test that directly and need to do a
// `('localStorage' in window) && ` test first because otherwise Firefox will
// throw bugzil.la/365772 if cookies are disabled
// Also in iOS5 Private Browsing mode, attempting to use localStorage.setItem
// will throw the exception:
// QUOTA_EXCEEDED_ERRROR DOM Exception 22.
// Peculiarly, getItem and removeItem calls do not throw.
// Because we are forced to try/catch this, we'll go aggressive.
// Just FWIW: IE8 Compat mode supports these features completely:
// www.quirksmode.org/dom/html5.html
// But IE8 doesn't support either with local files
tests['localstorage'] = function() {
try {
localStorage.setItem(mod, mod);
localStorage.removeItem(mod);
return true;
} catch(e) {
return false;
}
};
tests['sessionstorage'] = function() {
try {
sessionStorage.setItem(mod, mod);
sessionStorage.removeItem(mod);
return true;
} catch(e) {
return false;
}
};
tests['webworkers'] = function() {
return !!window.Worker;
};
tests['applicationcache'] = function() {
return !!window.applicationCache;
};
// Thanks to Erik Dahlstrom
tests['svg'] = function() {
return !!document.createElementNS && !!document.createElementNS(ns.svg, 'svg').createSVGRect;
};
// specifically for SVG inline in HTML, not within XHTML
// test page: paulirish.com/demo/inline-svg
tests['inlinesvg'] = function() {
var div = document.createElement('div');
div.innerHTML = '<svg/>';
return (div.firstChild && div.firstChild.namespaceURI) == ns.svg;
};
// SVG SMIL animation
tests['smil'] = function() {
return !!document.createElementNS && /SVGAnimate/.test(toString.call(document.createElementNS(ns.svg, 'animate')));
};
// This test is only for clip paths in SVG proper, not clip paths on HTML content
// demo: srufaculty.sru.edu/david.dailey/svg/newstuff/clipPath4.svg
// However read the comments to dig into applying SVG clippaths to HTML content here:
// github.com/Modernizr/Modernizr/issues/213#issuecomment-1149491
tests['svgclippaths'] = function() {
return !!document.createElementNS && /SVGClipPath/.test(toString.call(document.createElementNS(ns.svg, 'clipPath')));
};
/*>>webforms*/
// input features and input types go directly onto the ret object, bypassing the tests loop.
// Hold this guy to execute in a moment.
function webforms() {
/*>>input*/
// Run through HTML5's new input attributes to see if the UA understands any.
// We're using f which is the <input> element created early on
// Mike Taylr has created a comprehensive resource for testing these attributes
// when applied to all input types:
// miketaylr.com/code/input-type-attr.html
// spec: www.whatwg.org/specs/web-apps/current-work/multipage/the-input-element.html#input-type-attr-summary
// Only input placeholder is tested while textarea's placeholder is not.
// Currently Safari 4 and Opera 11 have support only for the input placeholder
// Both tests are available in feature-detects/forms-placeholder.js
Modernizr['input'] = (function( props ) {
for ( var i = 0, len = props.length; i < len; i++ ) {
attrs[ props[i] ] = !!(props[i] in inputElem);
}
if (attrs.list){
// safari false positive's on datalist: webk.it/74252
// see also github.com/Modernizr/Modernizr/issues/146
attrs.list = !!(document.createElement('datalist') && window.HTMLDataListElement);
}
return attrs;
})('autocomplete autofocus list placeholder max min multiple pattern required step'.split(' '));
/*>>input*/
/*>>inputtypes*/
// Run through HTML5's new input types to see if the UA understands any.
// This is put behind the tests runloop because it doesn't return a
// true/false like all the other tests; instead, it returns an object
// containing each input type with its corresponding true/false value
// Big thanks to @miketaylr for the html5 forms expertise. miketaylr.com/
Modernizr['inputtypes'] = (function(props) {
for ( var i = 0, bool, inputElemType, defaultView, len = props.length; i < len; i++ ) {
inputElem.setAttribute('type', inputElemType = props[i]);
bool = inputElem.type !== 'text';
// We first check to see if the type we give it sticks..
// If the type does, we feed it a textual value, which shouldn't be valid.
// If the value doesn't stick, we know there's input sanitization which infers a custom UI
if ( bool ) {
inputElem.value = smile;
inputElem.style.cssText = 'position:absolute;visibility:hidden;';
if ( /^range$/.test(inputElemType) && inputElem.style.WebkitAppearance !== undefined ) {
docElement.appendChild(inputElem);
defaultView = document.defaultView;
// Safari 2-4 allows the smiley as a value, despite making a slider
bool = defaultView.getComputedStyle &&
defaultView.getComputedStyle(inputElem, null).WebkitAppearance !== 'textfield' &&
// Mobile android web browser has false positive, so must
// check the height to see if the widget is actually there.
(inputElem.offsetHeight !== 0);
docElement.removeChild(inputElem);
} else if ( /^(search|tel)$/.test(inputElemType) ){
// Spec doesn't define any special parsing or detectable UI
// behaviors so we pass these through as true
// Interestingly, opera fails the earlier test, so it doesn't
// even make it here.
} else if ( /^(url|email)$/.test(inputElemType) ) {
// Real url and email support comes with prebaked validation.
bool = inputElem.checkValidity && inputElem.checkValidity() === false;
} else {
// If the upgraded input compontent rejects the :) text, we got a winner
bool = inputElem.value != smile;
}
}
inputs[ props[i] ] = !!bool;
}
return inputs;
})('search tel url email datetime date month week time datetime-local number range color'.split(' '));
/*>>inputtypes*/
}
/*>>webforms*/
// End of test definitions
// -----------------------
// Run through all tests and detect their support in the current UA.
// todo: hypothetically we could be doing an array of tests and use a basic loop here.
for ( var feature in tests ) {
if ( hasOwnProp(tests, feature) ) {
// run the test, throw the return value into the Modernizr,
// then based on that boolean, define an appropriate className
// and push it into an array of classes we'll join later.
featureName = feature.toLowerCase();
Modernizr[featureName] = tests[feature]();
classes.push((Modernizr[featureName] ? '' : 'no-') + featureName);
}
}
/*>>webforms*/
// input tests need to run.
Modernizr.input || webforms();
/*>>webforms*/
/**
* addTest allows the user to define their own feature tests
* the result will be added onto the Modernizr object,
* as well as an appropriate className set on the html element
*
* @param feature - String naming the feature
* @param test - Function returning true if feature is supported, false if not
*/
Modernizr.addTest = function ( feature, test ) {
if ( typeof feature == 'object' ) {
for ( var key in feature ) {
if ( hasOwnProp( feature, key ) ) {
Modernizr.addTest( key, feature[ key ] );
}
}
} else {
feature = feature.toLowerCase();
if ( Modernizr[feature] !== undefined ) {
// we're going to quit if you're trying to overwrite an existing test
// if we were to allow it, we'd do this:
// var re = new RegExp("\\b(no-)?" + feature + "\\b");
// docElement.className = docElement.className.replace( re, '' );
// but, no rly, stuff 'em.
return Modernizr;
}
test = typeof test == 'function' ? test() : test;
if (typeof enableClasses !== "undefined" && enableClasses) {
docElement.className += ' ' + (test ? '' : 'no-') + feature;
}
Modernizr[feature] = test;
}
return Modernizr; // allow chaining.
};
// Reset modElem.cssText to nothing to reduce memory footprint.
setCss('');
modElem = inputElem = null;
/*>>shiv*/
/*! HTML5 Shiv v3.6.1 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed */
;(function(window, document) {
/*jshint evil:true */
/** Preset options */
var options = window.html5 || {};
/** Used to skip problem elements */
var reSkip = /^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i;
/** Not all elements can be cloned in IE **/
var saveClones = /^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i;
/** Detect whether the browser supports default html5 styles */
var supportsHtml5Styles;
/** Name of the expando, to work with multiple documents or to re-shiv one document */
var expando = '_html5shiv';
/** The id for the the documents expando */
var expanID = 0;
/** Cached data for each document */
var expandoData = {};
/** Detect whether the browser supports unknown elements */
var supportsUnknownElements;
(function() {
try {
var a = document.createElement('a');
a.innerHTML = '<xyz></xyz>';
//if the hidden property is implemented we can assume, that the browser supports basic HTML5 Styles
supportsHtml5Styles = ('hidden' in a);
supportsUnknownElements = a.childNodes.length == 1 || (function() {
// assign a false positive if unable to shiv
(document.createElement)('a');
var frag = document.createDocumentFragment();
return (
typeof frag.cloneNode == 'undefined' ||
typeof frag.createDocumentFragment == 'undefined' ||
typeof frag.createElement == 'undefined'
);
}());
} catch(e) {
supportsHtml5Styles = true;
supportsUnknownElements = true;
}
}());
/*--------------------------------------------------------------------------*/
/**
* Creates a style sheet with the given CSS text and adds it to the document.
* @private
* @param {Document} ownerDocument The document.
* @param {String} cssText The CSS text.
* @returns {StyleSheet} The style element.
*/
function addStyleSheet(ownerDocument, cssText) {
var p = ownerDocument.createElement('p'),
parent = ownerDocument.getElementsByTagName('head')[0] || ownerDocument.documentElement;
p.innerHTML = 'x<style>' + cssText + '</style>';
return parent.insertBefore(p.lastChild, parent.firstChild);
}
/**
* Returns the value of `html5.elements` as an array.
* @private
* @returns {Array} An array of shived element node names.
*/
function getElements() {
var elements = html5.elements;
return typeof elements == 'string' ? elements.split(' ') : elements;
}
/**
* Returns the data associated to the given document
* @private
* @param {Document} ownerDocument The document.
* @returns {Object} An object of data.
*/
function getExpandoData(ownerDocument) {
var data = expandoData[ownerDocument[expando]];
if (!data) {
data = {};
expanID++;
ownerDocument[expando] = expanID;
expandoData[expanID] = data;
}
return data;
}
/**
* returns a shived element for the given nodeName and document
* @memberOf html5
* @param {String} nodeName name of the element
* @param {Document} ownerDocument The context document.
* @returns {Object} The shived element.
*/
function createElement(nodeName, ownerDocument, data){
if (!ownerDocument) {
ownerDocument = document;
}
if(supportsUnknownElements){
return ownerDocument.createElement(nodeName);
}
if (!data) {
data = getExpandoData(ownerDocument);
}
var node;
if (data.cache[nodeName]) {
node = data.cache[nodeName].cloneNode();
} else if (saveClones.test(nodeName)) {
node = (data.cache[nodeName] = data.createElem(nodeName)).cloneNode();
} else {
node = data.createElem(nodeName);
}
// Avoid adding some elements to fragments in IE < 9 because
// * Attributes like `name` or `type` cannot be set/changed once an element
// is inserted into a document/fragment
// * Link elements with `src` attributes that are inaccessible, as with
// a 403 response, will cause the tab/window to crash
// * Script elements appended to fragments will execute when their `src`
// or `text` property is set
return node.canHaveChildren && !reSkip.test(nodeName) ? data.frag.appendChild(node) : node;
}
/**
* returns a shived DocumentFragment for the given document
* @memberOf html5
* @param {Document} ownerDocument The context document.
* @returns {Object} The shived DocumentFragment.
*/
function createDocumentFragment(ownerDocument, data){
if (!ownerDocument) {
ownerDocument = document;
}
if(supportsUnknownElements){
return ownerDocument.createDocumentFragment();
}
data = data || getExpandoData(ownerDocument);
var clone = data.frag.cloneNode(),
i = 0,
elems = getElements(),
l = elems.length;
for(;i<l;i++){
clone.createElement(elems[i]);
}
return clone;
}
/**
* Shivs the `createElement` and `createDocumentFragment` methods of the document.
* @private
* @param {Document|DocumentFragment} ownerDocument The document.
* @param {Object} data of the document.
*/
function shivMethods(ownerDocument, data) {
if (!data.cache) {
data.cache = {};
data.createElem = ownerDocument.createElement;
data.createFrag = ownerDocument.createDocumentFragment;
data.frag = data.createFrag();
}
ownerDocument.createElement = function(nodeName) {
//abort shiv
if (!html5.shivMethods) {
return data.createElem(nodeName);
}
return createElement(nodeName, ownerDocument, data);
};
ownerDocument.createDocumentFragment = Function('h,f', 'return function(){' +
'var n=f.cloneNode(),c=n.createElement;' +
'h.shivMethods&&(' +
// unroll the `createElement` calls
getElements().join().replace(/\w+/g, function(nodeName) {
data.createElem(nodeName);
data.frag.createElement(nodeName);
return 'c("' + nodeName + '")';
}) +
');return n}'
)(html5, data.frag);
}
/*--------------------------------------------------------------------------*/
/**
* Shivs the given document.
* @memberOf html5
* @param {Document} ownerDocument The document to shiv.
* @returns {Document} The shived document.
*/
function shivDocument(ownerDocument) {
if (!ownerDocument) {
ownerDocument = document;
}
var data = getExpandoData(ownerDocument);
if (html5.shivCSS && !supportsHtml5Styles && !data.hasCSS) {
data.hasCSS = !!addStyleSheet(ownerDocument,
// corrects block display not defined in IE6/7/8/9
'article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}' +
// adds styling not present in IE6/7/8/9
'mark{background:#FF0;color:#000}'
);
}
if (!supportsUnknownElements) {
shivMethods(ownerDocument, data);
}
return ownerDocument;
}
/*--------------------------------------------------------------------------*/
/**
* The `html5` object is exposed so that more elements can be shived and
* existing shiving can be detected on iframes.
* @type Object
* @example
*
* // options can be changed before the script is included
* html5 = { 'elements': 'mark section', 'shivCSS': false, 'shivMethods': false };
*/
var html5 = {
/**
* An array or space separated string of node names of the elements to shiv.
* @memberOf html5
* @type Array|String
*/
'elements': options.elements || 'abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video',
/**
* A flag to indicate that the HTML5 style sheet should be inserted.
* @memberOf html5
* @type Boolean
*/
'shivCSS': (options.shivCSS !== false),
/**
* Is equal to true if a browser supports creating unknown/HTML5 elements
* @memberOf html5
* @type boolean
*/
'supportsUnknownElements': supportsUnknownElements,
/**
* A flag to indicate that the document's `createElement` and `createDocumentFragment`
* methods should be overwritten.
* @memberOf html5
* @type Boolean
*/
'shivMethods': (options.shivMethods !== false),
/**
* A string to describe the type of `html5` object ("default" or "default print").
* @memberOf html5
* @type String
*/
'type': 'default',
// shivs the document according to the specified `html5` object options
'shivDocument': shivDocument,
//creates a shived element
createElement: createElement,
//creates a shived documentFragment
createDocumentFragment: createDocumentFragment
};
/*--------------------------------------------------------------------------*/
// expose html5
window.html5 = html5;
// shiv the document
shivDocument(document);
}(this, document));
/*>>shiv*/
// Assign private properties to the return object with prefix
Modernizr._version = version;
// expose these for the plugin API. Look in the source for how to join() them against your input
/*>>prefixes*/
Modernizr._prefixes = prefixes;
/*>>prefixes*/
/*>>domprefixes*/
Modernizr._domPrefixes = domPrefixes;
Modernizr._cssomPrefixes = cssomPrefixes;
/*>>domprefixes*/
/*>>mq*/
// Modernizr.mq tests a given media query, live against the current state of the window
// A few important notes:
// * If a browser does not support media queries at all (eg. oldIE) the mq() will always return false
// * A max-width or orientation query will be evaluated against the current state, which may change later.
// * You must specify values. Eg. If you are testing support for the min-width media query use:
// Modernizr.mq('(min-width:0)')
// usage:
// Modernizr.mq('only screen and (max-width:768)')
Modernizr.mq = testMediaQuery;
/*>>mq*/
/*>>hasevent*/
// Modernizr.hasEvent() detects support for a given event, with an optional element to test on
// Modernizr.hasEvent('gesturestart', elem)
Modernizr.hasEvent = isEventSupported;
/*>>hasevent*/
/*>>testprop*/
// Modernizr.testProp() investigates whether a given style property is recognized
// Note that the property names must be provided in the camelCase variant.
// Modernizr.testProp('pointerEvents')
Modernizr.testProp = function(prop){
return testProps([prop]);
};
/*>>testprop*/
/*>>testallprops*/
// Modernizr.testAllProps() investigates whether a given style property,
// or any of its vendor-prefixed variants, is recognized
// Note that the property names must be provided in the camelCase variant.
// Modernizr.testAllProps('boxSizing')
Modernizr.testAllProps = testPropsAll;
/*>>testallprops*/
/*>>teststyles*/
// Modernizr.testStyles() allows you to add custom styles to the document and test an element afterwards
// Modernizr.testStyles('#modernizr { position:absolute }', function(elem, rule){ ... })
Modernizr.testStyles = injectElementWithStyles;
/*>>teststyles*/
/*>>prefixed*/
// Modernizr.prefixed() returns the prefixed or nonprefixed property name variant of your input
// Modernizr.prefixed('boxSizing') // 'MozBoxSizing'
// Properties must be passed as dom-style camelcase, rather than `box-sizing` hypentated style.
// Return values will also be the camelCase variant, if you need to translate that to hypenated style use:
//
// str.replace(/([A-Z])/g, function(str,m1){ return '-' + m1.toLowerCase(); }).replace(/^ms-/,'-ms-');
// If you're trying to ascertain which transition end event to bind to, you might do something like...
//
// var transEndEventNames = {
// 'WebkitTransition' : 'webkitTransitionEnd',
// 'MozTransition' : 'transitionend',
// 'OTransition' : 'oTransitionEnd',
// 'msTransition' : 'MSTransitionEnd',
// 'transition' : 'transitionend'
// },
// transEndEventName = transEndEventNames[ Modernizr.prefixed('transition') ];
Modernizr.prefixed = function(prop, obj, elem){
if(!obj) {
return testPropsAll(prop, 'pfx');
} else {
// Testing DOM property e.g. Modernizr.prefixed('requestAnimationFrame', window) // 'mozRequestAnimationFrame'
return testPropsAll(prop, obj, elem);
}
};
/*>>prefixed*/
/*>>cssclasses*/
// Remove "no-js" class from <html> element, if it exists:
docElement.className = docElement.className.replace(/(^|\s)no-js(\s|$)/, '$1$2') +
// Add the new classes to the <html> element.
(enableClasses ? ' js ' + classes.join(' ') : '');
/*>>cssclasses*/
return Modernizr;
})(this, this.document);
| is |
grpchandler_test.go | // Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rpc
import (
"encoding/hex"
"fmt"
"time"
"github.com/33cn/chain33/queue"
"google.golang.org/grpc"
//"sync"
"testing"
//"time"
"github.com/33cn/chain33/client"
"github.com/stretchr/testify/require"
"strings"
"github.com/33cn/chain33/client/mocks"
"github.com/33cn/chain33/common"
"github.com/33cn/chain33/types"
pb "github.com/33cn/chain33/types"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"golang.org/x/net/context"
"google.golang.org/grpc/peer"
)
var (
g Grpc
qapi *mocks.QueueProtocolAPI
)
// Addr is an autogenerated mock type for the Addr type
type Addr struct {
mock.Mock
}
// Network provides a mock function with given fields:
func (_m *Addr) Network() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// String provides a mock function with given fields:
func (_m *Addr) String() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
func init() {
//addr := "192.168.1.1"
//remoteIpWhitelist[addr] = true
//grpcFuncWhitelist["*"] = true
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
Init(cfg)
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
}
func getOkCtx() context.Context {
addr := new(Addr)
addr.On("String").Return("192.168.1.1")
ctx := context.Background()
pr := &peer.Peer{
Addr: addr,
AuthInfo: nil,
}
ctx = peer.NewContext(ctx, pr)
return ctx
}
func testSendTransactionOk(t *testing.T) {
var in *types.Transaction
reply := &types.Reply{IsOk: true, Msg: nil}
qapi.On("SendTx", in).Return(reply, nil)
reply, err := g.SendTransaction(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, true, reply.IsOk, "reply should be ok")
}
func TestGrpc_SendTransactionSync(t *testing.T) {
var tx types.Transaction
reply := &types.Reply{IsOk: true, Msg: tx.Hash()}
mockAPI := new(mocks.QueueProtocolAPI)
mockAPI.On("SendTx", mock.Anything).Return(reply, nil)
mockAPI.On("QueryTx", mock.Anything).Return(&types.TransactionDetail{}, nil)
g := Grpc{}
g.cli.QueueProtocolAPI = mockAPI
reply, err := g.SendTransactionSync(getOkCtx(), &tx)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, true, reply.IsOk, "reply should be ok")
assert.Equal(t, tx.Hash(), reply.Msg)
}
func TestSendTransaction(t *testing.T) {
testSendTransactionOk(t)
}
func testVersionOK(t *testing.T) {
reply := &types.VersionInfo{Chain33: "6.0.2"}
qapi.On("Version").Return(reply, nil)
data, err := g.Version(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, "6.0.2", data.Chain33, "reply should be ok")
}
func TestVersion(t *testing.T) {
testVersionOK(t)
}
func testGetMemPoolOK(t *testing.T) {
var in *types.ReqGetMempool
qapi.On("GetMempool", in).Return(nil, nil)
data, err := g.GetMemPool(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func Test_GetMemPool(t *testing.T) {
testGetMemPoolOK(t)
}
func testGetLastMemPoolOK(t *testing.T) {
qapi.On("GetLastMempool").Return(nil, nil)
data, err := g.GetLastMemPool(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetLastMemPool(t *testing.T) {
testGetLastMemPoolOK(t)
}
func testGetProperFeeOK(t *testing.T) {
var in *types.ReqProperFee
qapi.On("GetProperFee", in).Return(&types.ReplyProperFee{ProperFee: 1000000}, nil)
data, err := g.GetProperFee(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, int64(1000000), data.ProperFee)
}
func TestGetProperFee(t *testing.T) {
testGetProperFeeOK(t)
}
func testQueryChainError(t *testing.T) {
var in *pb.ChainExecutor
qapi.On("QueryChain", in).Return(nil, fmt.Errorf("error")).Once()
_, err := g.QueryChain(getOkCtx(), in)
assert.EqualError(t, err, "error", "return error")
}
func testQueryChainOK(t *testing.T) {
var in *pb.ChainExecutor
var msg types.Message
var req types.ReqString
req.Data = "msg"
msg = &req
qapi.On("QueryChain", in).Return(msg, nil).Once()
data, err := g.QueryChain(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, true, data.IsOk, "reply should be ok")
var decodemsg types.ReqString
pb.Decode(data.Msg, &decodemsg)
assert.Equal(t, req.Data, decodemsg.Data)
}
func TestQueryChain(t *testing.T) {
testQueryChainError(t)
testQueryChainOK(t)
}
func testGetPeerInfoOK(t *testing.T) {
qapi.On("PeerInfo", mock.Anything).Return(nil, nil)
data, err := g.GetPeerInfo(getOkCtx(), &types.P2PGetPeerReq{})
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetPeerInfo(t *testing.T) {
testGetPeerInfoOK(t)
}
func testNetInfoOK(t *testing.T) {
qapi.On("GetNetInfo", mock.Anything).Return(nil, nil)
data, err := g.NetInfo(getOkCtx(), &types.P2PGetNetInfoReq{})
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestNetInfo(t *testing.T) {
testNetInfoOK(t)
}
func testGetAccountsOK(t *testing.T) {
qapi.On("ExecWalletFunc", "wallet", "WalletGetAccountList", mock.Anything).Return(&types.WalletAccounts{}, nil)
_, err := g.GetAccounts(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestGetAccount(t *testing.T) {
qapi.On("ExecWalletFunc", "wallet", "WalletGetAccount", mock.Anything).Return(&types.WalletAccount{}, nil)
_, err := g.GetAccount(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestGetAccounts(t *testing.T) {
testGetAccountsOK(t)
}
func testNewAccountOK(t *testing.T) {
var in *pb.ReqNewAccount
qapi.On("ExecWalletFunc", "wallet", "NewAccount", in).Return(&types.WalletAccount{}, nil)
_, err := g.NewAccount(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestNewAccount(t *testing.T) {
testNewAccountOK(t)
}
func testWalletTransactionListOK(t *testing.T) {
var in *pb.ReqWalletTransactionList
qapi.On("ExecWalletFunc", "wallet", "WalletTransactionList", in).Return(&pb.WalletTxDetails{}, nil)
_, err := g.WalletTransactionList(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestWalletTransactionList(t *testing.T) {
testWalletTransactionListOK(t)
}
func testImportPrivKeyOK(t *testing.T) {
var in *pb.ReqWalletImportPrivkey
qapi.On("ExecWalletFunc", "wallet", "WalletImportPrivkey", in).Return(&pb.WalletAccount{}, nil)
_, err := g.ImportPrivkey(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestImportPrivKey(t *testing.T) {
testImportPrivKeyOK(t)
}
func testSendToAddressOK(t *testing.T) {
var in *pb.ReqWalletSendToAddress
qapi.On("ExecWalletFunc", "wallet", "WalletSendToAddress", in).Return(&pb.ReplyHash{}, nil)
_, err := g.SendToAddress(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestSendToAddress(t *testing.T) {
testSendToAddressOK(t)
}
func testSetTxFeeOK(t *testing.T) {
var in *pb.ReqWalletSetFee
qapi.On("ExecWalletFunc", "wallet", "WalletSetFee", in).Return(&pb.Reply{}, nil)
_, err := g.SetTxFee(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestSetTxFee(t *testing.T) {
testSetTxFeeOK(t)
}
func testSetLablOK(t *testing.T) {
var in *pb.ReqWalletSetLabel
qapi.On("ExecWalletFunc", "wallet", "WalletSetLabel", in).Return(&pb.WalletAccount{}, nil)
_, err := g.SetLabl(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestSetLabl(t *testing.T) {
testSetLablOK(t)
}
func testMergeBalanceOK(t *testing.T) {
var in *pb.ReqWalletMergeBalance
qapi.On("ExecWalletFunc", "wallet", "WalletMergeBalance", in).Return(&pb.ReplyHashes{}, nil)
_, err := g.MergeBalance(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestMergeBalance(t *testing.T) {
testMergeBalanceOK(t)
}
func testSetPasswdOK(t *testing.T) {
var in *pb.ReqWalletSetPasswd
qapi.On("ExecWalletFunc", "wallet", "WalletSetPasswd", in).Return(&pb.Reply{}, nil)
_, err := g.SetPasswd(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestSetPasswd(t *testing.T) {
testSetPasswdOK(t)
}
func testLockOK(t *testing.T) {
var in *pb.ReqNil
qapi.On("ExecWalletFunc", "wallet", "WalletLock", in).Return(&pb.Reply{}, nil)
_, err := g.Lock(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestLock(t *testing.T) {
testLockOK(t)
}
func testUnLockOK(t *testing.T) {
var in *pb.WalletUnLock
qapi.On("ExecWalletFunc", "wallet", "WalletUnLock", in).Return(&pb.Reply{}, nil)
_, err := g.UnLock(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestUnLock(t *testing.T) {
testUnLockOK(t)
}
func testGenSeedOK(t *testing.T) {
var in *pb.GenSeedLang
qapi.On("ExecWalletFunc", "wallet", "GenSeed", in).Return(&pb.ReplySeed{}, nil)
_, err := g.GenSeed(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestGenSeed(t *testing.T) {
testGenSeedOK(t)
}
func testGetSeedOK(t *testing.T) {
var in *pb.GetSeedByPw
qapi.On("ExecWalletFunc", "wallet", "GetSeed", in).Return(&pb.ReplySeed{}, nil)
_, err := g.GetSeed(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestGetSeed(t *testing.T) {
testGetSeedOK(t)
}
func testSaveSeedOK(t *testing.T) {
var in *pb.SaveSeedByPw
qapi.On("ExecWalletFunc", "wallet", "SaveSeed", in).Return(&pb.Reply{}, nil)
_, err := g.SaveSeed(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestSaveSeed(t *testing.T) |
func testGetWalletStatusOK(t *testing.T) {
var in *pb.ReqNil
qapi.On("ExecWalletFunc", "wallet", "GetWalletStatus", in).Return(&pb.WalletStatus{}, nil)
_, err := g.GetWalletStatus(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestGetWalletStatus(t *testing.T) {
testGetWalletStatusOK(t)
}
func testDumpPrivkeyOK(t *testing.T) {
var in *pb.ReqString
qapi.On("ExecWalletFunc", "wallet", "DumpPrivkey", in).Return(&pb.ReplyString{}, nil)
_, err := g.DumpPrivkey(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestDumpPrivkey(t *testing.T) {
testDumpPrivkeyOK(t)
}
func testDumpPrivkeysFileOK(t *testing.T) {
var in *pb.ReqPrivkeysFile
qapi.On("ExecWalletFunc", "wallet", "DumpPrivkeysFile", in).Return(&pb.Reply{}, nil)
_, err := g.DumpPrivkeysFile(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestDumpPrivkeysFile(t *testing.T) {
testDumpPrivkeysFileOK(t)
}
func testImportPrivkeysFileOK(t *testing.T) {
var in *pb.ReqPrivkeysFile
qapi.On("ExecWalletFunc", "wallet", "ImportPrivkeysFile", in).Return(&pb.Reply{}, nil)
_, err := g.ImportPrivkeysFile(getOkCtx(), nil)
assert.Nil(t, err, "the error should be nil")
}
func TestImportPrivkeysFile(t *testing.T) {
testImportPrivkeysFileOK(t)
}
func testGetBlocksError(t *testing.T) {
var in = pb.ReqBlocks{IsDetail: true}
qapi.On("GetBlocks", &in).Return(nil, fmt.Errorf("error")).Once()
_, err := g.GetBlocks(getOkCtx(), &in)
assert.EqualError(t, err, "error", "the error should be error")
}
func testGetBlocksOK(t *testing.T) {
var in = pb.ReqBlocks{IsDetail: true}
var details types.BlockDetails
var block = &types.Block{Version: 1}
var detail = &types.BlockDetail{Block: block}
details.Items = append(details.Items, detail)
qapi.On("GetBlocks", &in).Return(&details, nil).Once()
data, err := g.GetBlocks(getOkCtx(), &in)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, true, data.IsOk)
var details2 types.BlockDetails
pb.Decode(data.Msg, &details2)
if !proto.Equal(&details, &details2) {
assert.Equal(t, types.Encode(&details), types.Encode(&details2))
}
}
func TestGetBlocks(t *testing.T) {
testGetBlocksError(t)
testGetBlocksOK(t)
}
func testGetHexTxByHashError(t *testing.T) {
var in *pb.ReqHash
qapi.On("QueryTx", in).Return(nil, fmt.Errorf("error")).Once()
_, err := g.GetHexTxByHash(getOkCtx(), in)
assert.EqualError(t, err, "error", "the error should be error")
}
func testGetHexTxByHashOK(t *testing.T) {
var in *pb.ReqHash
tx := &types.Transaction{Fee: 1}
var td = &types.TransactionDetail{Tx: tx}
var tdNil = &types.TransactionDetail{Tx: nil}
encodetx := common.ToHex(pb.Encode(tx))
qapi.On("QueryTx", in).Return(tdNil, nil).Once()
data, err := g.GetHexTxByHash(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, "", data.Tx)
qapi.On("QueryTx", in).Return(td, nil).Once()
data, err = g.GetHexTxByHash(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Equal(t, encodetx, data.Tx)
}
func TestGetHexTxByHash(t *testing.T) {
testGetHexTxByHashError(t)
testGetHexTxByHashOK(t)
}
func testGetTransactionByAddrOK(t *testing.T) {
var in *pb.ReqAddr
qapi.On("GetTransactionByAddr", in).Return(nil, nil)
data, err := g.GetTransactionByAddr(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetTransactionByAddr(t *testing.T) {
testGetTransactionByAddrOK(t)
}
func testGetTransactionByHashesOK(t *testing.T) {
var in *pb.ReqHashes
qapi.On("GetTransactionByHash", in).Return(nil, nil)
data, err := g.GetTransactionByHashes(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetTransactionByHashes(t *testing.T) {
testGetTransactionByHashesOK(t)
}
func testGetHeadersOK(t *testing.T) {
var in *pb.ReqBlocks
qapi.On("GetHeaders", in).Return(nil, nil)
data, err := g.GetHeaders(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetHeaders(t *testing.T) {
testGetHeadersOK(t)
}
func testGetBlockOverviewOK(t *testing.T) {
var in *pb.ReqHash
qapi.On("GetBlockOverview", in).Return(nil, nil)
data, err := g.GetBlockOverview(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetBlockOverview(t *testing.T) {
testGetBlockOverviewOK(t)
}
func testGetBlockHashOK(t *testing.T) {
var in *pb.ReqInt
qapi.On("GetBlockHash", in).Return(nil, nil)
data, err := g.GetBlockHash(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetBlockHash(t *testing.T) {
testGetBlockHashOK(t)
}
func testIsSyncOK(t *testing.T) {
var in *pb.ReqNil
qapi.On("IsSync").Return(nil, nil)
data, err := g.IsSync(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestIsSync(t *testing.T) {
testIsSyncOK(t)
}
func testIsNtpClockSyncOK(t *testing.T) {
var in *pb.ReqNil
qapi.On("IsNtpClockSync").Return(nil, nil)
data, err := g.IsNtpClockSync(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestIsNtpClockSync(t *testing.T) {
testIsNtpClockSyncOK(t)
}
func testGetLastHeaderOK(t *testing.T) {
var in *pb.ReqNil
qapi.On("GetLastHeader").Return(nil, nil)
data, err := g.GetLastHeader(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestGetLastHeader(t *testing.T) {
testGetLastHeaderOK(t)
}
func testQueryTransactionOK(t *testing.T) {
var in *pb.ReqHash
qapi.On("QueryTx", in).Return(nil, nil)
data, err := g.QueryTransaction(getOkCtx(), in)
assert.Nil(t, err, "the error should be nil")
assert.Nil(t, data)
}
func TestQueryTransaction(t *testing.T) {
testQueryTransactionOK(t)
}
func TestReWriteRawTx(t *testing.T) {
txHex1 := "0a05636f696e73122c18010a281080c2d72f222131477444795771577233553637656a7663776d333867396e7a6e7a434b58434b7120a08d0630a696c0b3f78dd9ec083a2131477444795771577233553637656a7663776d333867396e7a6e7a434b58434b71"
in := &types.ReWriteRawTx{
Tx: txHex1,
Fee: 29977777777,
Expire: "130s",
To: "aabbccdd",
Index: 0,
}
data, err := g.ReWriteTx(getOkCtx(), in)
assert.Nil(t, err)
assert.NotNil(t, data.Data)
rtTx := hex.EncodeToString(data.Data)
assert.NotEqual(t, txHex1, rtTx)
tx := &types.Transaction{}
err = types.Decode(data.Data, tx)
assert.Nil(t, err)
assert.Equal(t, tx.Fee, in.Fee)
assert.Equal(t, in.To, tx.To)
}
func TestGrpc_CreateNoBalanceTransaction(t *testing.T) {
_, err := g.CreateNoBalanceTransaction(getOkCtx(), &pb.NoBalanceTx{})
assert.NoError(t, err)
}
func TestGrpc_CreateNoBalanceTxs(t *testing.T) {
_, err := g.CreateNoBalanceTxs(getOkCtx(), &pb.NoBalanceTxs{TxHexs: []string{"0a05746f6b656e12413804223d0a0443434e5910a09c011a0d74657374207472616e73666572222231333559774e715367694551787577586650626d526d48325935334564673864343820a08d0630969a9fe6c4b9c7ba5d3a2231333559774e715367694551787577586650626d526d483259353345646738643438", "0a05746f6b656e12413804223d0a0443434e5910b0ea011a0d74657374207472616e73666572222231333559774e715367694551787577586650626d526d48325935334564673864343820a08d0630bca0a2dbc0f182e06f3a2231333559774e715367694551787577586650626d526d483259353345646738643438"}})
assert.NoError(t, err)
}
func TestGrpc_CreateRawTransaction(t *testing.T) {
_, err := g.CreateRawTransaction(getOkCtx(), &pb.CreateTx{})
assert.NoError(t, err)
}
func TestGrpc_CreateTransaction(t *testing.T) {
_, err := g.CreateTransaction(getOkCtx(), &pb.CreateTxIn{Execer: []byte("coins")})
assert.Equal(t, err, types.ErrActionNotSupport)
}
func TestGrpc_CreateRawTxGroup(t *testing.T) {
_, err := g.CreateRawTxGroup(getOkCtx(), &pb.CreateTransactionGroup{})
assert.Equal(t, types.ErrTxGroupCountLessThanTwo, err)
}
func TestGrpc_GetAddrOverview(t *testing.T) {
_, err := g.GetAddrOverview(getOkCtx(), &types.ReqAddr{})
assert.Equal(t, err, types.ErrInvalidAddress)
}
func TestGrpc_GetBalance(t *testing.T) {
qapi.On("StoreGet", mock.Anything).Return(nil, types.ErrInvalidParam)
_, err := g.GetBalance(getOkCtx(), &types.ReqBalance{})
assert.Equal(t, err, types.ErrInvalidParam)
}
func TestGrpc_GetAllExecBalance(t *testing.T) {
_, err := g.GetAllExecBalance(getOkCtx(), &pb.ReqAllExecBalance{})
assert.Equal(t, err, types.ErrInvalidAddress)
}
func TestGrpc_QueryConsensus(t *testing.T) {
qapi.On("QueryConsensus", mock.Anything).Return(&types.ReqString{Data: "test"}, nil)
_, err := g.QueryConsensus(getOkCtx(), &pb.ChainExecutor{})
assert.NoError(t, err)
}
func TestGrpc_ExecWallet(t *testing.T) {
qapi.On("ExecWallet", mock.Anything).Return(&types.ReqString{Data: "test"}, nil)
_, err := g.ExecWallet(getOkCtx(), &pb.ChainExecutor{})
assert.NoError(t, err)
}
func TestGrpc_GetLastBlockSequence(t *testing.T) {
qapi.On("GetLastBlockSequence", mock.Anything).Return(nil, nil)
_, err := g.GetLastBlockSequence(getOkCtx(), &types.ReqNil{})
assert.NoError(t, err)
}
func TestGrpc_GetBlockSequences(t *testing.T) {
qapi.On("GetBlockSequences", mock.Anything).Return(nil, nil)
_, err := g.GetBlockSequences(getOkCtx(), &types.ReqBlocks{})
assert.NoError(t, err)
}
func TestGrpc_GetBlockByHashes(t *testing.T) {
qapi.On("GetBlockByHashes", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.GetBlockByHashes(getOkCtx(), &types.ReqHashes{})
assert.NoError(t, err)
}
func TestGrpc_GetSequenceByHash(t *testing.T) {
qapi.On("GetSequenceByHash", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.GetSequenceByHash(getOkCtx(), &pb.ReqHash{})
assert.NoError(t, err)
}
func TestGrpc_SignRawTx(t *testing.T) {
qapi.On("ExecWalletFunc", "wallet", "SignRawTx", mock.Anything).Return(&pb.ReplySignRawTx{}, nil)
_, err := g.SignRawTx(getOkCtx(), &types.ReqSignRawTx{})
assert.NoError(t, err)
}
func TestGrpc_QueryRandNum(t *testing.T) {
qapi.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(&pb.ReplyHash{Hash: []byte("test")}, nil)
_, err := g.QueryRandNum(getOkCtx(), &pb.ReqRandHash{})
assert.NoError(t, err)
}
func TestGrpc_GetFork(t *testing.T) {
str := types.GetDefaultCfgstring()
newstr := strings.Replace(str, "Title=\"local\"", "Title=\"chain33\"", 1)
cfg := types.NewChain33Config(newstr)
cfg.SetDappFork("para", "fork100", 100)
Init(cfg)
api := new(mocks.QueueProtocolAPI)
api.On("GetConfig", mock.Anything).Return(cfg)
grpc := Grpc{}
grpc.cli.QueueProtocolAPI = api
val, err := grpc.GetFork(getOkCtx(), &pb.ReqKey{Key: []byte("para-fork100")})
assert.NoError(t, err)
assert.Equal(t, int64(100), val.Data)
cfg1 := types.NewChain33Config(types.GetDefaultCfgstring())
Init(cfg1)
api1 := new(mocks.QueueProtocolAPI)
api1.On("GetConfig", mock.Anything).Return(cfg1)
grpc1 := Grpc{}
grpc1.cli.QueueProtocolAPI = api1
val, err = grpc1.GetFork(getOkCtx(), &pb.ReqKey{Key: []byte("ForkBlockHash")})
assert.NoError(t, err)
assert.Equal(t, int64(1), val.Data)
}
func TestGrpc_LoadParaTxByTitle(t *testing.T) {
qapi.On("LoadParaTxByTitle", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.LoadParaTxByTitle(getOkCtx(), &pb.ReqHeightByTitle{})
assert.NoError(t, err)
}
func TestGrpc_GetParaTxByHeight(t *testing.T) {
qapi.On("GetParaTxByHeight", mock.Anything, mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.GetParaTxByHeight(getOkCtx(), &pb.ReqParaTxByHeight{})
assert.NoError(t, err)
}
func TestGrpc_GetServerTime(t *testing.T) {
_, err := g.GetServerTime(getOkCtx(), nil)
assert.NoError(t, err)
}
func TestGrpc_GetCryptoList(t *testing.T) {
qapi.On("GetCryptoList").Return(nil)
_, err := g.GetCryptoList(getOkCtx(), nil)
assert.NoError(t, err)
}
func TestGrpc_SendDelayTransaction(t *testing.T) {
qapi.On("SendDelayTx", mock.Anything, mock.Anything).Return(nil, nil)
_, err := g.SendDelayTransaction(getOkCtx(), nil)
assert.NoError(t, err)
}
func TestGrpc_WalletRecoverScript(t *testing.T) {
_, err := g.GetWalletRecoverAddress(getOkCtx(), nil)
assert.Equal(t, types.ErrInvalidParam, err)
_, err = g.SignWalletRecoverTx(getOkCtx(), nil)
assert.Equal(t, types.ErrInvalidParam, err)
}
func TestGrpc_GetChainConfig(t *testing.T) {
cfg, err := g.GetChainConfig(getOkCtx(), nil)
assert.NoError(t, err)
assert.Equal(t, types.DefaultCoinPrecision, cfg.GetCoinPrecision())
}
func TestGrpc_SendTransactions(t *testing.T) {
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
//Init(cfg)
g := Grpc{}
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
txCount := 10
in := &types.Transactions{Txs: make([]*types.Transaction, txCount)}
testMsg := []byte("test")
var testTx *types.Transaction
qapi.On("SendTx", testTx).Return(&types.Reply{IsOk: true, Msg: testMsg}, types.ErrInvalidParam)
testTx = &types.Transaction{}
qapi.On("SendTx", testTx).Return(&types.Reply{IsOk: true, Msg: testMsg}, nil)
in.Txs[txCount-1] = testTx
reply, err := g.SendTransactions(getOkCtx(), in)
require.Nil(t, err)
require.Equal(t, txCount, len(reply.GetReplyList()))
require.Equal(t, types.ErrInvalidParam.Error(), string(reply.GetReplyList()[0].Msg))
require.False(t, reply.GetReplyList()[0].IsOk)
require.Equal(t, testMsg, reply.GetReplyList()[txCount-1].Msg)
require.True(t, reply.GetReplyList()[txCount-1].IsOk)
}
func TestGrpc_ConvertExectoAddr(t *testing.T) {
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
g := Grpc{}
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
replyStr, err := g.ConvertExectoAddr(getOkCtx(), &types.ReqString{Data: "coins"})
assert.NoError(t, err)
t.Log("execAddr:", replyStr)
assert.Equal(t, "1GaHYpWmqAJsqRwrpoNcB8VvgKtSwjcHqt", replyStr.GetData())
}
func TestGrpc_GetCoinSymbol(t *testing.T) {
reply, err := g.GetCoinSymbol(context.Background(), &types.ReqNil{})
assert.NoError(t, err)
t.Log(reply.GetData())
}
func TestGrpc_ListPushes(t *testing.T) {
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
g := Grpc{}
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
qapi.On("ListPushes", mock.Anything).Return(&types.PushSubscribes{
Pushes: []*types.PushSubscribeReq{{Name: "mytest-block", Encode: "grpc", Type: 0}},
}, nil)
list, err := g.ListPushes(getOkCtx(), &types.ReqNil{})
assert.NoError(t, err)
t.Log(list)
assert.Equal(t, 1, len(list.Pushes))
assert.Equal(t, "mytest-block", list.Pushes[0].GetName())
qapi.On("GetPushSeqLastNum", mock.Anything).Return(&types.Int64{Data: 122}, nil)
seq, err := g.GetPushSeqLastNum(getOkCtx(), &types.ReqString{})
assert.NoError(t, err)
assert.Equal(t, int64(122), seq.GetData())
}
func TestGrpc_AddPushSubscribe(t *testing.T) {
cfg := types.NewChain33Config(types.GetDefaultCfgstring())
g := Grpc{}
qapi = new(mocks.QueueProtocolAPI)
qapi.On("GetConfig", mock.Anything).Return(cfg)
g.cli.QueueProtocolAPI = qapi
qapi.On("AddPushSubscribe", &types.PushSubscribeReq{}).Return(&types.ReplySubscribePush{IsOk: false}, types.ErrInvalidParam)
_, err := g.AddPushSubscribe(getOkCtx(), &types.PushSubscribeReq{})
assert.NotNil(t, err)
}
func mockblockchain(t *testing.T, q queue.Queue) {
go func() {
blockchainKey := "blockchain"
client := q.Client()
client.Sub(blockchainKey)
for msg := range client.Recv() {
t.Log("mockblockchain recv:", msg)
switch msg.Ty {
case types.EventSubscribePush:
//checkparam
req, _ := msg.GetData().(*types.PushSubscribeReq)
if req.GetType() == 2 || req.GetType() == 4 {
if len(req.GetContract()) == 0 {
t.Log("no config contractparam")
msg.Reply(client.NewMessage("rpc", types.EventReplySubscribePush, &types.ReplySubscribePush{IsOk: false, Msg: types.ErrInvalidParam.Error() + ":contractor must be configure"}))
continue
}
}
msg.Reply(client.NewMessage("rpc", types.EventReplySubscribePush, &types.ReplySubscribePush{IsOk: true}))
var txreceipt types.TxReceipts4Subscribe
var senddata types.PushData
senddata.Name = req.GetName()
senddata.Value = &types.PushData_TxReceipts{
TxReceipts: &txreceipt,
}
time.Sleep(time.Millisecond * 300)
cmsg := client.NewMessage("rpc", types.EventPushTxReceipt, &senddata)
client.SendTimeout(cmsg, false, time.Second)
t.Log("sendata", cmsg)
default:
t.Log("unsupport msg type", msg.Ty)
}
}
}()
}
func TestGrpc_SubEvent(t *testing.T) {
c := queue.New("mytest")
chain33Cfg := types.NewChain33Config(types.ReadFile("../cmd/chain33/chain33.test.toml"))
c.SetConfig(chain33Cfg)
go mockblockchain(t, c)
rpcCfg = new(types.RPC)
rpcCfg.GrpcBindAddr = "127.0.0.1:18802"
qcli := c.Client()
api, err := client.New(qcli, nil)
assert.Nil(t, err)
gapi := NewGRpcServer(qcli, api)
rpc := new(RPC)
rpc.cfg = rpcCfg
rpc.gapi = gapi
rpc.cli = qcli
rpc.api = api
go rpc.handleSysEvent()
defer gapi.Close()
go gapi.Listen()
time.Sleep(time.Millisecond * 500)
conn, err := grpc.Dial("127.0.0.1:18802", grpc.WithInsecure())
if err != nil {
t.Log("err", err)
return
}
gcli := types.NewChain33Client(conn)
var in types.ReqSubscribe
in.Name = "test-tx"
in.Type = 2
stream, err := gcli.SubEvent(context.Background(), &in)
if err != nil {
t.Log("SubEvent err:", err)
return
}
_, err = stream.Recv()
assert.NotNil(t, err)
in.Contract = make(map[string]bool)
in.Contract["token"] = true
stream, err = gcli.SubEvent(context.Background(), &in)
if err != nil {
t.Log("SubEvent err:", err)
return
}
data, err := stream.Recv()
assert.Nil(t, err)
t.Log("data:", data)
}
| {
testSaveSeedOK(t)
} |
Target_IC.py | from pyradioconfig.calculator_model_framework.interfaces.itarget import ITarget
class Target_IC_Nixi(ITarget):
_targetName = ITarget.IC_str
_description = ""
_store_config_output = True
_cfg_location = "nixi"
_tag = ITarget.IC_str
def | (self, model):
pass
| target_calculate |
testdata_verify.go | package problems
import (
"context"
"github.com/jsannemo/omogenjudge/util/go/cli"
"regexp"
"strings"
toolspb "github.com/jsannemo/omogenjudge/problemtools/api"
"github.com/jsannemo/omogenjudge/problemtools/util"
runpb "github.com/jsannemo/omogenjudge/runner/api"
)
var isTestGroupName = regexp.MustCompile(`^[a-z0-9]+$`).MatchString
var isTestCaseName = regexp.MustCompile(`^[a-z0-9\-_\.]+$`).MatchString
func verifyTestdata(ctx context.Context, problem *toolspb.Problem, validators []*runpb.CompiledProgram, runner runpb.RunServiceClient, reporter util.Reporter) error {
for _, g := range problem.TestGroups {
if len(g.Tests) == 0 {
reporter.Err("Empty test group %v", g.Name)
}
if !isTestGroupName(g.Name) {
reporter.Err("Invalid test group name: %v [a-z0-9]+", g.Name)
}
for _, tc := range g.Tests {
if !isTestCaseName(tc.Name) {
reporter.Err("Invalid test case name: %v [a-z0-9\\-_\\.]", tc.Name)
}
}
if err := verifyTestCaseFormats(ctx, g, validators, runner, reporter); err != nil |
}
return nil
}
func verifyTestCaseFormats(ctx context.Context, group *toolspb.TestGroup, validators []*runpb.CompiledProgram, runner runpb.RunServiceClient, reporter util.Reporter) error {
var inputFiles []string
var names []string
for _, tc := range group.Tests {
inputFiles = append(inputFiles, tc.InputPath)
names = append(names, tc.FullName)
}
for _, validator := range validators {
resp, err := runner.SimpleRun(ctx, &runpb.SimpleRunRequest{
Program: validator,
InputFiles: inputFiles,
Arguments: cli.FormatFlagMap(group.InputFlags),
})
if err != nil {
return err
}
for i, res := range resp.Results {
if res.Timeout {
reporter.Err("test case %s caused validator to time out")
} else if res.ExitCode != 42 {
msg := ""
if res.Stdout != "" {
msg = msg + strings.TrimSpace(res.Stdout)
}
if res.Stderr != "" {
msg = msg + strings.TrimSpace(res.Stderr)
}
reporter.Err("test case %s failed validation: '%s'", names[i], msg)
}
}
}
return nil
}
| {
return err
} |
__init__.py | from socorepo.locators import github_tags, nexus3, pypi |
LOCATOR_PARSERS = {
"github_tags": github_tags.parse_locator,
"nexus3": nexus3.parse_locator,
"pypi": pypi.parse_locator
} | |
test_image_utils.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for image utils."""
import errno
import math
import cryptography
import ddt
import mock
from oslo_concurrency import processutils
from oslo_utils import units
from six.moves import builtins
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume import throttling
class TestQemuImgInfo(test.TestCase):
@mock.patch('os.name', new='posix')
@mock.patch('oslo_utils.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info(self, mock_exec, mock_info):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=True,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('os.name', new='posix')
@mock.patch('oslo_utils.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info_not_root(self, mock_exec, mock_info):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
output = image_utils.qemu_img_info(test_path,
force_share=False,
run_as_root=False)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=False,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.image.image_utils.os')
@mock.patch('oslo_utils.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
mock_os.name = 'nt'
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('qemu-img', 'info', test_path,
run_as_root=True,
prlimit=image_utils.QEMU_IMG_LIMITS)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.utils.execute')
def test_get_qemu_img_version(self, mock_exec):
mock_out = "qemu-img version 2.0.0"
mock_err = mock.sentinel.err
mock_exec.return_value = (mock_out, mock_err)
expected_version = [2, 0, 0]
version = image_utils.get_qemu_img_version()
mock_exec.assert_called_once_with('qemu-img', '--version',
check_exit_code=False)
self.assertEqual(expected_version, version)
@mock.patch.object(image_utils, 'get_qemu_img_version')
def test_validate_qemu_img_version(self, mock_get_qemu_img_version):
fake_current_version = [1, 8]
mock_get_qemu_img_version.return_value = fake_current_version
minimum_version = '1.8'
image_utils.check_qemu_img_version(minimum_version)
mock_get_qemu_img_version.assert_called_once_with()
@mock.patch.object(image_utils, 'get_qemu_img_version')
def _test_validate_unsupported_qemu_img_version(self,
mock_get_qemu_img_version,
current_version=None):
mock_get_qemu_img_version.return_value = current_version
minimum_version = '2.0'
self.assertRaises(exception.VolumeBackendAPIException,
image_utils.check_qemu_img_version,
minimum_version)
mock_get_qemu_img_version.assert_called_once_with()
def test_validate_qemu_img_version_not_installed(self):
self._test_validate_unsupported_qemu_img_version()
def test_validate_older_qemu_img_version(self):
self._test_validate_unsupported_qemu_img_version(
current_version=[1, 8])
@ddt.ddt
class TestConvertImage(test.TestCase):
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=True)
def test_defaults_block_dev_with_size_info(self, mock_isblk,
mock_exec, mock_info):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
mock_info.return_value.virtual_size = 1048576
throttle = throttling.Throttle(prefix=['cgcmd'])
with mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True):
output = image_utils.convert_image(source, dest, out_format,
throttle=throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert',
'-O', out_format, '-t', 'none',
source, dest, run_as_root=True)
mock_exec.reset_mock()
with mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=False):
output = image_utils.convert_image(source, dest, out_format)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert',
'-O', out_format, source, dest,
run_as_root=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=True)
def test_defaults_block_dev_without_size_info(self, mock_isblk,
mock_exec,
mock_info):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
mock_info.side_effect = ValueError
throttle = throttling.Throttle(prefix=['cgcmd'])
with mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True):
output = image_utils.convert_image(source, dest, out_format,
throttle=throttle)
mock_info.assert_called_once_with(source, run_as_root=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert',
'-O', out_format, '-t', 'none',
source, dest, run_as_root=True)
mock_exec.reset_mock()
with mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=False):
output = image_utils.convert_image(source, dest, out_format)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert',
'-O', out_format, source, dest,
run_as_root=True)
@mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
def test_defaults_not_block_dev_with_size_info(self, mock_isblk,
mock_exec,
mock_info,
mock_odirect):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
out_subformat = 'fake_subformat'
mock_info.return_value.virtual_size = 1048576
output = image_utils.convert_image(source, dest, out_format,
out_subformat=out_subformat)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert', '-O',
out_format, '-o',
'subformat=%s' % out_subformat,
source, dest,
run_as_root=True)
@mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
def test_defaults_not_block_dev_without_size_info(self,
mock_isblk,
mock_exec,
mock_info,
mock_odirect):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
out_subformat = 'fake_subformat'
mock_info.side_effect = ValueError
output = image_utils.convert_image(source, dest, out_format,
out_subformat=out_subformat)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert', '-O',
out_format, '-o',
'subformat=%s' % out_subformat,
source, dest,
run_as_root=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=True)
def test_defaults_block_dev_ami_img(self, mock_isblk, mock_exec,
mock_info):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
mock_info.return_value.virtual_size = 1048576
with mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True):
output = image_utils.convert_image(source, dest, out_format,
src_format='AMI')
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert',
'-O', out_format, '-t', 'none',
source, dest, run_as_root=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
@mock.patch('cinder.volume.volume_utils.check_for_odirect_support')
def test_convert_to_vhd(self, mock_check_odirect, mock_isblk,
mock_exec, mock_info):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = "vhd"
mock_info.return_value.virtual_size = 1048576
output = image_utils.convert_image(source, dest, out_format)
self.assertIsNone(output)
# Qemu uses the legacy "vpc" format name, instead of "vhd".
mock_exec.assert_called_once_with('qemu-img', 'convert',
'-O', 'vpc',
source, dest, run_as_root=True)
@ddt.data(True, False)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
def test_convert_to_qcow2(self,
compress_option,
mock_isblk, mock_exec, mock_info):
self.override_config('image_compress_on_upload', compress_option)
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = 'qcow2'
mock_info.return_value.virtual_size = 1048576
image_utils.convert_image(source,
dest,
out_format,
compress=True)
exec_args = ['qemu-img', 'convert', '-O', 'qcow2']
if compress_option:
exec_args.append('-c')
exec_args.extend((source, dest))
mock_exec.assert_called_once_with(*exec_args,
run_as_root=True)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.volume.volume_utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
@mock.patch('os.path.dirname', return_value='fakedir')
@mock.patch('os.path.ismount', return_value=True)
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('cinder.image.image_utils.utils.tempdir')
@mock.patch.object(image_utils.LOG, 'error')
def test_not_enough_conversion_space(self,
mock_log,
mock_tempdir,
mock_make,
mock_ismount,
mock_dirname,
mock_isblk,
mock_exec,
mock_info,
mock_odirect,
mock_conf):
source = mock.sentinel.source
mock_conf.image_conversion_dir = 'fakedir'
dest = [mock_conf.image_conversion_dir]
out_format = mock.sentinel.out_format
mock_info.side_effect = ValueError
mock_exec.side_effect = processutils.ProcessExecutionError(
stderr='No space left on device')
self.assertRaises(processutils.ProcessExecutionError,
image_utils.convert_image,
source, dest, out_format)
mock_log.assert_called_with('Insufficient free space on fakedir for'
' image conversion.')
class TestResizeImage(test.TestCase):
@mock.patch('cinder.utils.execute')
def test_defaults(self, mock_exec):
source = mock.sentinel.source
size = mock.sentinel.size
output = image_utils.resize_image(source, size)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'resize', source,
'sentinel.sizeG', run_as_root=False)
@mock.patch('cinder.utils.execute')
def test_run_as_root(self, mock_exec):
source = mock.sentinel.source
size = mock.sentinel.size
output = image_utils.resize_image(source, size, run_as_root=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'resize', source,
'sentinel.sizeG', run_as_root=True)
class TestFetch(test.TestCase):
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('os.stat')
@mock.patch('cinder.image.image_utils.fileutils')
def test_defaults(self, mock_fileutils, mock_stat, mock_proxy):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
path = 'test_path'
_user_id = mock.sentinel._user_id
_project_id = mock.sentinel._project_id
mock_open = mock.mock_open()
mock_stat.return_value.st_size = 1048576
with mock.patch('cinder.image.image_utils.open',
new=mock_open, create=True):
output = image_utils.fetch(ctxt, image_service, image_id, path,
_user_id, _project_id)
self.assertIsNone(output)
mock_proxy.assert_called_once_with(mock_open.return_value)
image_service.download.assert_called_once_with(ctxt, image_id,
mock_proxy.return_value)
mock_open.assert_called_once_with(path, 'wb')
mock_fileutils.remove_path_on_error.assert_called_once_with(path)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
def test_fetch_enospc(self):
context = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
e = exception.ImageTooBig(image_id=image_id, reason = "fake")
e.errno = errno.ENOSPC
image_service.download.side_effect = e
path = '/test_path'
_user_id = mock.sentinel._user_id
_project_id = mock.sentinel._project_id
with mock.patch('cinder.image.image_utils.open',
new=mock.mock_open(), create=True):
self.assertRaises(exception.ImageTooBig,
image_utils.fetch,
context, image_service, image_id, path,
_user_id, _project_id)
def test_fetch_ioerror(self):
context = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
e = IOError()
e.errno = errno.ECONNRESET
e.strerror = 'Some descriptive message'
image_service.download.side_effect = e
path = '/test_path'
_user_id = mock.sentinel._user_id
_project_id = mock.sentinel._project_id
with mock.patch('cinder.image.image_utils.open',
new=mock.mock_open(), create=True):
self.assertRaisesRegex(exception.ImageDownloadFailed,
e.strerror,
image_utils.fetch,
context, image_service, image_id, path,
_user_id, _project_id)
class MockVerifier(object):
def update(self, data):
return
def verify(self):
return True
class BadVerifier(object):
def update(self, data):
return
def verify(self):
raise cryptography.exceptions.InvalidSignature(
'Invalid signature.'
)
class TestVerifyImageSignature(test.TestCase):
@mock.patch('cursive.signature_utils.get_verifier')
@mock.patch('oslo_utils.fileutils.remove_path_on_error')
def test_image_signature_verify_failed(self, mock_remove, mock_get):
self.mock_object(builtins, 'open', mock.mock_open())
ctxt = mock.sentinel.context
metadata = {'name': 'test image',
'is_public': False,
'protected': False,
'properties':
{'img_signature_certificate_uuid': 'fake_uuid',
'img_signature_hash_method': 'SHA-256',
'img_signature': 'signature',
'img_signature_key_type': 'RSA-PSS'}}
class FakeImageService(object):
def show(self, context, image_id):
return metadata
self.flags(verify_glance_signatures='enabled')
mock_get.return_value = BadVerifier()
self.assertRaises(exception.ImageSignatureVerificationException,
image_utils.verify_glance_image_signature,
ctxt, FakeImageService(), 'fake_id',
'fake_path')
mock_get.assert_called_once_with(
context=ctxt,
img_signature_certificate_uuid='fake_uuid',
img_signature_hash_method='SHA-256',
img_signature='signature',
img_signature_key_type='RSA-PSS')
@mock.patch('cursive.signature_utils.get_verifier')
def test_image_signature_metadata_missing(self, mock_get):
ctxt = mock.sentinel.context
metadata = {'name': 'test image',
'is_public': False,
'protected': False,
'properties': {}}
class FakeImageService(object):
def show(self, context, image_id):
return metadata
self.flags(verify_glance_signatures='enabled')
result = image_utils.verify_glance_image_signature(
ctxt, FakeImageService(), 'fake_id', 'fake_path')
self.assertFalse(result)
mock_get.assert_not_called()
@mock.patch('cursive.signature_utils.get_verifier')
def test_image_signature_metadata_incomplete(self, mock_get):
ctxt = mock.sentinel.context
metadata = {'name': 'test image',
'is_public': False,
'protected': False,
'properties':
{'img_signature_certificate_uuid': None,
'img_signature_hash_method': 'SHA-256',
'img_signature': 'signature',
'img_signature_key_type': 'RSA-PSS'}}
class FakeImageService(object):
def show(self, context, image_id):
return metadata
self.flags(verify_glance_signatures='enabled')
self.assertRaises(exception.InvalidSignatureImage,
image_utils.verify_glance_image_signature, ctxt,
FakeImageService(), 'fake_id', 'fake_path')
mock_get.assert_not_called()
@mock.patch('six.moves.builtins.open')
@mock.patch('eventlet.tpool.execute')
@mock.patch('cursive.signature_utils.get_verifier')
@mock.patch('oslo_utils.fileutils.remove_path_on_error')
def test_image_signature_verify_success(self, mock_remove, mock_get,
mock_exec, mock_open):
ctxt = mock.sentinel.context
metadata = {'name': 'test image',
'is_public': False,
'protected': False,
'properties':
{'img_signature_certificate_uuid': 'fake_uuid',
'img_signature_hash_method': 'SHA-256',
'img_signature': 'signature',
'img_signature_key_type': 'RSA-PSS'}}
class FakeImageService(object):
def show(self, context, image_id):
return metadata
self.flags(verify_glance_signatures='enabled')
mock_get.return_value = MockVerifier()
result = image_utils.verify_glance_image_signature(
ctxt, FakeImageService(), 'fake_id', 'fake_path')
self.assertTrue(result)
mock_exec.assert_called_once_with(
image_utils._verify_image,
mock_open.return_value.__enter__.return_value,
mock_get.return_value)
mock_get.assert_called_once_with(
context=ctxt,
img_signature_certificate_uuid='fake_uuid',
img_signature_hash_method='SHA-256',
img_signature='signature',
img_signature_key_type='RSA-PSS')
class TestVerifyImage(test.TestCase):
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_defaults(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
output = image_utils.fetch_verify_image(ctxt, image_service,
image_id, dest)
self.assertIsNone(output)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
dest, None, None)
mock_info.assert_called_once_with(dest,
run_as_root=True,
force_share=False)
mock_fileutils.remove_path_on_error.assert_called_once_with(dest)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_kwargs(self, mock_fetch, mock_fileutils, mock_info,
mock_check_space, mock_check_size):
ctxt = mock.sentinel.context
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 2
run_as_root = mock.sentinel.run_as_root
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
mock_data.virtual_size = 1
output = image_utils.fetch_verify_image(
ctxt, image_service, image_id, dest, user_id=user_id,
project_id=project_id, size=size, run_as_root=run_as_root)
self.assertIsNone(output)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
dest, None, None)
mock_fileutils.remove_path_on_error.assert_called_once_with(dest)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
mock_check_size.assert_called_once_with(mock_data.virtual_size,
size, image_id)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_format_error(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = None
mock_data.backing_file = None
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = 'test_backing_file'
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest)
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_size_error(self, mock_fetch, mock_fileutils, mock_info,
mock_check_size):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
size = 1
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
mock_data.virtual_size = 2 * units.Gi
mock_check_size.side_effect = exception.ImageUnacceptable(
image_id='fake_image_id', reason='test')
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest, size=size)
class TestTemporaryDir(test.TestCase):
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_conv_dir_exists(self, mock_tempdir, mock_make,
mock_conf):
mock_conf.image_conversion_dir = mock.sentinel.conv_dir
output = image_utils.temporary_dir()
self.assertTrue(mock_make.called)
mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir)
self.assertEqual(output, mock_tempdir.return_value)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_create_conv_dir(self, mock_tempdir, mock_make,
mock_conf):
mock_conf.image_conversion_dir = mock.sentinel.conv_dir
output = image_utils.temporary_dir()
mock_make.assert_called_once_with(mock.sentinel.conv_dir)
mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir)
self.assertEqual(output, mock_tempdir.return_value)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_no_conv_dir(self, mock_tempdir, mock_make,
mock_conf):
mock_conf.image_conversion_dir = None
output = image_utils.temporary_dir()
self.assertTrue(mock_make.called)
mock_tempdir.assert_called_once_with(dir=None)
self.assertEqual(output, mock_tempdir.return_value)
@ddt.ddt
class TestUploadVolume(test.TestCase):
@ddt.data((mock.sentinel.disk_format, mock.sentinel.disk_format, True),
(mock.sentinel.disk_format, mock.sentinel.disk_format, False),
('ploop', 'parallels', True),
('ploop', 'parallels', False))
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_diff_format(self, image_format, mock_os, mock_temp, mock_convert,
mock_info, mock_open, mock_conf, mock_proxy):
input_format, output_format, do_compress = image_format
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': input_format,
'container_format': mock.sentinel.container_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = output_format
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path, compress=do_compress)
self.assertIsNone(output)
mock_convert.assert_called_once_with(volume_path,
temp_file,
output_format,
run_as_root=True,
compress=do_compress)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
mock_open.assert_called_once_with(temp_file, 'rb')
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf, mock_chown, mock_proxy):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw',
'container_format': mock.sentinel.container_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
mock_os.access.return_value = False
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
self.assertFalse(mock_convert.called)
self.assertFalse(mock_info.called)
mock_chown.assert_called_once_with(volume_path)
mock_open.assert_called_once_with(volume_path, 'rb')
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = True)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format_compressed(self, mock_os, mock_temp, mock_convert,
mock_info, mock_open, mock_conf,
mock_chown, mock_proxy,
mock_engine_ready, mock_get_engine):
class fakeEngine(object):
def __init__(self):
pass
def compress_img(self, src, dest, run_as_root):
pass
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw',
'container_format': 'compressed'}
mock_conf.allow_compression_on_image_upload = True
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = 'raw'
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
mock_convert.assert_called_once_with(volume_path,
temp_file,
'raw',
compress=True,
run_as_root=True)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
mock_open.assert_called_once_with(temp_file, 'rb')
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
mock_engine.compress_img.assert_called()
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert,
mock_info, mock_open, mock_conf, mock_chown,
mock_proxy):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw',
'container_format': 'bare'}
volume_path = mock.sentinel.volume_path
mock_os.name = 'nt'
mock_os.access.return_value = False
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
self.assertFalse(mock_convert.called)
self.assertFalse(mock_info.called)
mock_open.assert_called_once_with(volume_path, 'rb')
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = True)
@mock.patch('eventlet.tpool.Proxy')
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format_on_nt_compressed(self, mock_os, mock_temp,
mock_convert, mock_info,
mock_open, mock_conf,
mock_chown, mock_proxy,
mock_engine_ready, mock_get_engine):
class fakeEngine(object):
def __init__(self):
pass
def compress_img(self, src, dest, run_as_root):
pass
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw',
'container_format': 'compressed'}
mock_conf.allow_compression_on_image_upload = True
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = 'raw'
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
mock_convert.assert_called_once_with(volume_path,
temp_file,
'raw',
compress=True,
run_as_root=True)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
mock_open.assert_called_once_with(temp_file, 'rb')
mock_proxy.assert_called_once_with(
mock_open.return_value.__enter__.return_value)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {}, mock_proxy.return_value)
mock_engine.compress_img.assert_called()
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': mock.sentinel.disk_format,
'container_format': mock.sentinel.container_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = mock.sentinel.other_disk_format
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
self.assertRaises(exception.ImageUnacceptable,
image_utils.upload_volume,
ctxt, image_service, image_meta, volume_path)
mock_convert.assert_called_once_with(volume_path,
temp_file,
mock.sentinel.disk_format,
run_as_root=True,
compress=True)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
self.assertFalse(image_service.update.called)
class TestFetchToVhd(test.TestCase):
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_defaults(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
out_subformat = 'fake_subformat'
output = image_utils.fetch_to_vhd(ctxt, image_service, image_id,
dest, blocksize,
volume_subformat=out_subformat)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'vpc', blocksize,
volume_subformat=out_subformat,
user_id=None,
project_id=None,
run_as_root=True)
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_kwargs(self, mock_fetch_to, mock_check_space):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
run_as_root = mock.sentinel.run_as_root
out_subformat = 'fake_subformat'
output = image_utils.fetch_to_vhd(ctxt, image_service, image_id,
dest, blocksize, user_id=user_id,
project_id=project_id,
run_as_root=run_as_root,
volume_subformat=out_subformat)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'vpc', blocksize,
volume_subformat=out_subformat,
user_id=user_id,
project_id=project_id,
run_as_root=run_as_root)
class TestFetchToRaw(test.TestCase):
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_defaults(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
output = image_utils.fetch_to_raw(ctxt, image_service, image_id,
dest, blocksize)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'raw', blocksize,
user_id=None, project_id=None,
size=None, run_as_root=True)
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_kwargs(self, mock_fetch_to, mock_check_space):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = mock.sentinel.size
run_as_root = mock.sentinel.run_as_root
output = image_utils.fetch_to_raw(ctxt, image_service, image_id,
dest, blocksize, user_id=user_id,
project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'raw', blocksize,
user_id=user_id, size=size,
project_id=project_id,
run_as_root=run_as_root)
class FakeImageService(object):
def __init__(self, db_driver=None, image_service=None, disk_format='raw'):
self.temp_images = None
self.disk_format = disk_format
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': self.disk_format,
'container_format': 'bare',
'status': 'active'}
class TestFetchToVolumeFormat(test.TestCase):
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_defaults(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert,
mock_check_space):
ctxt = mock.sentinel.context
ctxt.user_id = mock.sentinel.user_id
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(ctxt, image_service,
image_id, dest,
volume_format, blocksize)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=True),
mock.call(tmp, run_as_root=True)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, None, None)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=True,
src_format='raw')
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_kwargs(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert,
mock_check_space, mock_check_size):
ctxt = mock.sentinel.context
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=run_as_root,
src_format='raw')
mock_check_size.assert_called_once_with(data.virtual_size,
size, image_id)
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=True)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_convert_from_vhd(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert, mock_check_space,
mock_check_size):
ctxt = mock.sentinel.context
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
image_service = FakeImageService(disk_format='vhd')
expect_format = 'vpc'
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
mock_repl_xen.assert_called_once_with(tmp)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=run_as_root,
src_format=expect_format)
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_convert_from_iso(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_copy,
mock_convert, mock_check_space,
mock_check_size):
ctxt = mock.sentinel.context
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
image_service = FakeImageService(disk_format='iso')
expect_format = 'raw'
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=run_as_root,
src_format=expect_format)
@mock.patch('cinder.image.image_utils.check_available_space',
new=mock.Mock())
@mock.patch('cinder.image.image_utils.is_xenserver_format',
new=mock.Mock(return_value=False))
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_temporary_images(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
ctxt.user_id = mock.sentinel.user_id
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock.sentinel.tmp
dummy = mock.sentinel.dummy
mock_temp.return_value.__enter__.side_effect = [tmp, dummy]
with image_utils.TemporaryImages.fetch(image_service, ctxt,
image_id) as tmp_img:
self.assertEqual(tmp_img, tmp)
output = image_utils.fetch_to_volume_format(ctxt, image_service,
image_id, dest,
volume_format,
blocksize)
self.assertIsNone(output)
self.assertEqual(2, mock_temp.call_count)
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=True),
mock.call(dummy, force_share=False, run_as_root=True),
mock.call(tmp, run_as_root=True)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, None, None)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=True,
src_format='raw')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_and_is_raw(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = {'disk_format': 'raw',
'size': 41126400}
image_size_m = math.ceil(float(41126400) / units.Mi)
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp,
force_share=False,
run_as_root=run_as_root)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
mock_copy.assert_called_once_with(tmp, dest, image_size_m,
blocksize)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_not_raw(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = {'disk_format': 'not_raw'}
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp,
force_share=False,
run_as_root=run_as_root)
self.assertFalse(mock_fetch.called)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_no_metadata(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = None
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp,
force_share=False,
run_as_root=run_as_root)
self.assertFalse(mock_fetch.called)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_size_error(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert,
mock_check_size):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 1234
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = int(1234.5 * units.Gi)
tmp = mock_temp.return_value.__enter__.return_value
mock_check_size.side_effect = exception.ImageUnacceptable(
image_id='fake_image_id', reason='test')
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_qemu_img_parse_error(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = None
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_backing_file_error(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = mock.sentinel.backing_file
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.check_virtual_size')
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=True)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_xenserver_to_vhd(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert, mock_check_space,
mock_check_size):
ctxt = mock.sentinel.context
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
mock_repl_xen.assert_called_once_with(tmp)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=None,
run_as_root=run_as_root,
src_format='raw')
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_fetch_verify_image(self, mock_conf,
mock_temp, mock_info,
mock_fetch):
ctxt = mock.sentinel.context
image_service = mock.Mock(temp_images=None)
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
ctxt.user_id = user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
image_service.show.return_value = {'disk_format': 'raw',
'size': 41126400}
image_utils.fetch_verify_image(
ctxt, image_service, image_id, dest,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_info.assert_called_once_with(dest,
force_share=False,
run_as_root=run_as_root)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
dest, None, None)
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_get_qemu_data_returns_none(self, mock_conf, mock_temp, mock_info):
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
disk_format_raw = True
has_meta = True
output = image_utils.get_qemu_data(image_id, has_meta,
disk_format_raw, dest,
run_as_root=run_as_root)
self.assertIsNone(output)
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_get_qemu_data_with_image_meta_exception(self, mock_conf,
mock_temp, mock_info):
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
disk_format_raw = False
has_meta = True
self.assertRaises(
exception.ImageUnacceptable,
image_utils.get_qemu_data, image_id, has_meta, disk_format_raw,
dest, run_as_root=run_as_root)
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_get_qemu_data_without_image_meta_except(self, mock_conf,
mock_temp, mock_info):
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
run_as_root = mock.sentinel.run_as_root
disk_format_raw = False
has_meta = False
self.assertRaises(
exception.ImageUnacceptable,
image_utils.get_qemu_data, image_id, has_meta, disk_format_raw,
dest, run_as_root=run_as_root)
@mock.patch('cinder.image.accelerator.is_gzip_compressed',
return_value = True)
@mock.patch('cinder.image.accelerator.ImageAccel._get_engine')
@mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready',
return_value = True)
@mock.patch('cinder.image.image_utils.check_available_space')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_format',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_defaults_compressed(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert, mock_check_space,
mock_engine_ready, mock_get_engine,
mock_gzip_compressed):
class fakeEngine(object):
def __init__(self):
pass
def decompress_img(self, src, dest, run_as_root):
pass
class FakeImageService(object):
def __init__(self, db_driver=None,
image_service=None, disk_format='raw'):
self.temp_images = None
self.disk_format = disk_format
def show(self, context, image_id):
return {'size': 2 * units.Gi,
'disk_format': self.disk_format,
'container_format': 'compressed',
'status': 'active'}
ctxt = mock.sentinel.context
ctxt.user_id = mock.sentinel.user_id
image_service = FakeImageService()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
out_subformat = None
blocksize = mock.sentinel.blocksize
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
mock_engine = mock.Mock(spec=fakeEngine)
mock_get_engine.return_value = mock_engine
output = image_utils.fetch_to_volume_format(ctxt, image_service,
image_id, dest,
volume_format, blocksize)
self.assertIsNone(output)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, force_share=False, run_as_root=True),
mock.call(tmp, run_as_root=True)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, None, None)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
out_subformat=out_subformat,
run_as_root=True,
src_format='raw')
mock_engine.decompress_img.assert_called()
class TestXenserverUtils(test.TestCase):
def test_is_xenserver_format(self):
image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'}
self.assertTrue(image_utils.is_xenserver_format(image_meta1))
image_meta2 = {'disk_format': 'test_disk_format',
'container_format': 'test_cont_format'}
self.assertFalse(image_utils.is_xenserver_format(image_meta2))
@mock.patch('cinder.image.image_utils.utils.execute')
def test_extract_targz(self, mock_exec):
name = mock.sentinel.archive_name
target = mock.sentinel.target
output = image_utils.extract_targz(name, target)
mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target)
self.assertIsNone(output)
class TestVhdUtils(test.TestCase):
@mock.patch('cinder.image.image_utils.utils.execute')
def test_set_vhd_parent(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
parentpath = mock.sentinel.parentpath
output = image_utils.set_vhd_parent(vhd_path, parentpath)
mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path,
'-p', parentpath)
self.assertIsNone(output)
@mock.patch('cinder.image.image_utils.set_vhd_parent')
def test_fix_vhd_chain(self, mock_set_parent):
vhd_chain = (mock.sentinel.first,
mock.sentinel.second,
mock.sentinel.third,
mock.sentinel.fourth,
mock.sentinel.fifth)
output = image_utils.fix_vhd_chain(vhd_chain)
self.assertIsNone(output)
mock_set_parent.assert_has_calls([
mock.call(mock.sentinel.first, mock.sentinel.second),
mock.call(mock.sentinel.second, mock.sentinel.third),
mock.call(mock.sentinel.third, mock.sentinel.fourth),
mock.call(mock.sentinel.fourth, mock.sentinel.fifth)])
@mock.patch('cinder.image.image_utils.utils.execute',
return_value=(98765.43210, mock.sentinel.error))
def test_get_vhd_size(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
output = image_utils.get_vhd_size(vhd_path)
mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path,
'-v')
self.assertEqual(98765, output)
@mock.patch('cinder.image.image_utils.utils.execute')
def test_resize_vhd(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
size = 387549349
journal = mock.sentinel.journal
output = image_utils.resize_vhd(vhd_path, size, journal)
self.assertIsNone(output)
mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path,
'-s', str(size), '-j', journal)
@mock.patch('cinder.image.image_utils.utils.execute')
def test_coalesce_vhd(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
output = image_utils.coalesce_vhd(vhd_path)
self.assertIsNone(output)
mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n',
vhd_path)
@mock.patch('cinder.image.image_utils.temporary_dir')
@mock.patch('cinder.image.image_utils.coalesce_vhd')
@mock.patch('cinder.image.image_utils.resize_vhd')
@mock.patch('cinder.image.image_utils.get_vhd_size')
@mock.patch('cinder.image.image_utils.utils.execute')
def test_coalesce_chain(self, mock_exec, mock_size, mock_resize,
mock_coal, mock_temp):
vhd_chain = (mock.sentinel.first,
mock.sentinel.second,
mock.sentinel.third,
mock.sentinel.fourth,
mock.sentinel.fifth)
# os.path.join does not work with MagicMock objects on Windows.
mock_temp.return_value.__enter__.return_value = 'fake_temp_dir'
output = image_utils.coalesce_chain(vhd_chain)
self.assertEqual(mock.sentinel.fifth, output)
mock_size.assert_has_calls([
mock.call(mock.sentinel.first),
mock.call(mock.sentinel.second),
mock.call(mock.sentinel.third),
mock.call(mock.sentinel.fourth)])
mock_resize.assert_has_calls([
mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)])
mock_coal.assert_has_calls([
mock.call(mock.sentinel.first),
mock.call(mock.sentinel.second),
mock.call(mock.sentinel.third),
mock.call(mock.sentinel.fourth)])
@mock.patch('cinder.image.image_utils.os.path')
def test_discover_vhd_chain(self, mock_path):
directory = '/some/test/directory'
mock_path.join.side_effect = lambda x, y: '/'.join((x, y))
mock_path.exists.side_effect = (True, True, True, False)
output = image_utils.discover_vhd_chain(directory)
expected_output = ['/some/test/directory/0.vhd',
'/some/test/directory/1.vhd',
'/some/test/directory/2.vhd']
self.assertEqual(expected_output, output)
@mock.patch('cinder.image.image_utils.temporary_dir')
@mock.patch('cinder.image.image_utils.os.rename')
@mock.patch('cinder.image.image_utils.fileutils.delete_if_exists')
@mock.patch('cinder.image.image_utils.coalesce_chain')
@mock.patch('cinder.image.image_utils.fix_vhd_chain')
@mock.patch('cinder.image.image_utils.discover_vhd_chain')
@mock.patch('cinder.image.image_utils.extract_targz')
def test_replace_xenserver_image_with_coalesced_vhd(
self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete,
mock_rename, mock_temp):
image_file = mock.sentinel.image_file
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.replace_xenserver_image_with_coalesced_vhd(
image_file)
self.assertIsNone(output)
mock_targz.assert_called_once_with(image_file, tmp)
mock_discover.assert_called_once_with(tmp)
mock_fix.assert_called_once_with(mock_discover.return_value)
mock_coal.assert_called_once_with(mock_discover.return_value)
mock_delete.assert_called_once_with(image_file)
mock_rename.assert_called_once_with(mock_coal.return_value, image_file)
class TestCreateTemporaryFile(test.TestCase):
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.makedirs')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs,
mock_conf, mock_close):
mock_conf.image_conversion_dir = None
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
mock_mkstemp.assert_called_once_with(dir=None)
mock_close.assert_called_once_with(fd)
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.makedirs')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs,
mock_conf, mock_close):
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
self.assertTrue(mock_dirs.called)
mock_mkstemp.assert_called_once_with(dir=conv_dir)
mock_close.assert_called_once_with(fd)
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.fileutils.ensure_tree')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs,
mock_conf, mock_close):
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
mock_dirs.assert_called_once_with(conv_dir)
mock_mkstemp.assert_called_once_with(dir=conv_dir)
mock_close.assert_called_once_with(fd)
@mock.patch('cinder.image.image_utils.os.remove')
@mock.patch('cinder.image.image_utils.os.path.join')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.listdir')
@mock.patch('cinder.image.image_utils.os.path.exists', return_value=True)
def test_cleanup_temporary_file(self, mock_path, mock_listdir, mock_conf,
mock_join, mock_remove):
|
@mock.patch('cinder.image.image_utils.os.remove')
@mock.patch('cinder.image.image_utils.os.listdir')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.path.exists', return_value=False)
def test_cleanup_temporary_file_with_not_exist_path(self, mock_path,
mock_conf,
mock_listdir,
mock_remove):
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
image_utils.cleanup_temporary_file('host@backend1')
self.assertFalse(mock_listdir.called)
self.assertFalse(mock_remove.called)
@mock.patch('cinder.image.image_utils.os.remove')
@mock.patch('cinder.image.image_utils.os.path.join')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.listdir')
@mock.patch('cinder.image.image_utils.os.path.exists', return_value=True)
def test_cleanup_temporary_file_with_exception(self, mock_path,
mock_listdir, mock_conf,
mock_join, mock_remove):
mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2']
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
mock_join.return_value = '/test/tmp/tmphost@backend1'
mock_remove.side_effect = OSError
image_utils.cleanup_temporary_file('host@backend1')
mock_listdir.assert_called_once_with(conv_dir)
mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1')
class TestTemporaryFileContextManager(test.TestCase):
@mock.patch('cinder.image.image_utils.create_temporary_file',
return_value=mock.sentinel.temporary_file)
@mock.patch('cinder.image.image_utils.fileutils.delete_if_exists')
def test_temporary_file(self, mock_delete, mock_create):
with image_utils.temporary_file() as tmp_file:
self.assertEqual(mock.sentinel.temporary_file, tmp_file)
self.assertFalse(mock_delete.called)
mock_delete.assert_called_once_with(mock.sentinel.temporary_file)
class TestImageUtils(test.TestCase):
def test_get_virtual_size(self):
image_id = fake.IMAGE_ID
virtual_size = 1073741824
volume_size = 2
virt_size = image_utils.check_virtual_size(virtual_size,
volume_size,
image_id)
self.assertEqual(1, virt_size)
def test_get_bigger_virtual_size(self):
image_id = fake.IMAGE_ID
virtual_size = 3221225472
volume_size = 2
self.assertRaises(exception.ImageUnacceptable,
image_utils.check_virtual_size,
virtual_size,
volume_size,
image_id)
def test_decode_cipher(self):
expected = {'cipher_alg': 'aes-256',
'cipher_mode': 'xts',
'ivgen_alg': 'essiv'}
result = image_utils.decode_cipher('aes-xts-essiv', 256)
self.assertEqual(expected, result)
| mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2']
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
mock_join.return_value = '/test/tmp/tmphost@backend1'
image_utils.cleanup_temporary_file('host@backend1')
mock_listdir.assert_called_once_with(conv_dir)
mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') |
dialog-zha-device-children.ts | import { CSSResultGroup, html, LitElement, TemplateResult } from "lit";
import memoizeOne from "memoize-one";
import { computeRTLDirection } from "../../../../../common/util/compute_rtl";
import "../../../../../components/ha-code-editor";
import { createCloseHeading } from "../../../../../components/ha-dialog";
import { haStyleDialog } from "../../../../../resources/styles";
import { HomeAssistant } from "../../../../../types";
import { ZHADeviceChildrenDialogParams } from "./show-dialog-zha-device-children";
import "../../../../../components/data-table/ha-data-table";
import type {
DataTableColumnContainer,
DataTableRowData,
} from "../../../../../components/data-table/ha-data-table";
import "../../../../../components/ha-circular-progress";
import { fetchDevices, ZHADevice } from "../../../../../data/zha";
import { fireEvent } from "../../../../../common/dom/fire_event";
import { customElement, property, state } from "lit/decorators";
export interface DeviceRowData extends DataTableRowData {
id: string;
name: string;
lqi: number;
}
@customElement("dialog-zha-device-children")
class | extends LitElement {
@property({ attribute: false }) public hass!: HomeAssistant;
@state() private _device: ZHADevice | undefined;
@state() private _devices: Map<string, ZHADevice> | undefined;
private _deviceChildren = memoizeOne(
(
device: ZHADevice | undefined,
devices: Map<string, ZHADevice> | undefined
) => {
const outputDevices: DeviceRowData[] = [];
if (device && devices) {
device.neighbors.forEach((child) => {
const zhaDevice: ZHADevice | undefined = devices.get(child.ieee);
if (zhaDevice) {
outputDevices.push({
name: zhaDevice.user_given_name || zhaDevice.name,
id: zhaDevice.device_reg_id,
lqi: child.lqi,
});
}
});
}
return outputDevices;
}
);
private _columns: DataTableColumnContainer = {
name: {
title: "Name",
sortable: true,
filterable: true,
direction: "asc",
grows: true,
},
lqi: {
title: "LQI",
sortable: true,
filterable: true,
direction: "asc",
width: "75px",
},
};
public showDialog(params: ZHADeviceChildrenDialogParams): void {
this._device = params.device;
this._fetchData();
}
public closeDialog(): void {
this._device = undefined;
this._devices = undefined;
fireEvent(this, "dialog-closed", { dialog: this.localName });
}
protected render(): TemplateResult {
if (!this._device) {
return html``;
}
return html`
<ha-dialog
hideActions
open
@closed=${this.closeDialog}
.heading=${createCloseHeading(
this.hass,
this.hass.localize(`ui.dialogs.zha_device_info.device_children`)
)}
>
${!this._devices
? html`<ha-circular-progress
alt="Loading"
size="large"
active
></ha-circular-progress>`
: html`<ha-data-table
.columns=${this._columns}
.data=${this._deviceChildren(this._device, this._devices)}
auto-height
.dir=${computeRTLDirection(this.hass)}
.searchLabel=${this.hass.localize(
"ui.components.data-table.search"
)}
.noDataText=${this.hass.localize(
"ui.components.data-table.no-data"
)}
></ha-data-table>`}
</ha-dialog>
`;
}
private async _fetchData(): Promise<void> {
if (this._device && this.hass) {
const devices = await fetchDevices(this.hass!);
this._devices = new Map(
devices.map((device: ZHADevice) => [device.ieee, device])
);
}
}
static get styles(): CSSResultGroup {
return haStyleDialog;
}
}
declare global {
interface HTMLElementTagNameMap {
"dialog-zha-device-children": DialogZHADeviceChildren;
}
}
| DialogZHADeviceChildren |
models.py | from django.db import models
class Estacionamiento(models.Model):
nombre_duenio = models.CharField(max_length=30)
nombre_est = models.CharField(max_length=30,unique=True)
direccion = models.CharField(max_length=30)
telefono1 = models.IntegerField(max_length=11)
telefono2 = models.IntegerField(max_length=11,null=True, blank=True)
telefono3 = models.IntegerField(max_length=11,null=True, blank=True)
email1 = models.EmailField()
email2 = models.EmailField(null=True, blank=True)
email3 = models.EmailField(null=True, blank=True)
rif = models.IntegerField()
capacidad = models.IntegerField()
tarifa = models.DecimalField(max_digits=7, decimal_places=2)
horaI = models.TimeField()
horaF = models.TimeField()
reservaI = models.TimeField(null=True, blank=True)
reservaF = models.TimeField(null=True, blank=True)
def __unicode__(self):
return self.nombre_est
def __str__(self):
return self.nombre_est
class Reserva(models.Model):
estacionamiento = models.ForeignKey(Estacionamiento)
#puesto = models.ForeignKey(Puesto)
horaInicio = models.TimeField()
horaFin = models.TimeField()
def __unicode__(self):
return "Reserva"
def __str__(self):
return "Reserva"
class Pago(models.Model):
nombre = models.CharField(max_length=30)
cedula = models.PositiveIntegerField(max_length=8)
tipoTarjeta = models.CharField(max_length=8)
numeroTarjeta = models.CharField(max_length=16)
pago = models.DecimalField(max_digits=8, decimal_places=2, null=True, blank=True)
def __unicode__(self):
return "Pago"
| return "Pago" | def __str__(self): |
instance_config.py | from typing import List
from ...driver.billing_manager import ProductVersions
from ...instance_config import InstanceConfig
from .resource_utils import family_worker_type_cores_to_gcp_machine_type, gcp_machine_type_to_parts
from .resources import (
GCPComputeResource,
GCPDynamicSizedDiskResource,
GCPIPFeeResource,
GCPMemoryResource,
GCPResource,
GCPServiceFeeResource,
GCPStaticSizedDiskResource,
gcp_resource_from_dict,
)
GCP_INSTANCE_CONFIG_VERSION = 5
class GCPSlimInstanceConfig(InstanceConfig):
@staticmethod
def create(
product_versions: ProductVersions,
machine_type: str,
preemptible: bool,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
boot_disk_size_gb: int,
job_private: bool,
location: str,
) -> 'GCPSlimInstanceConfig': # pylint: disable=unused-argument
if local_ssd_data_disk:
data_disk_resource = GCPStaticSizedDiskResource.create(product_versions, 'local-ssd', data_disk_size_gb)
else:
data_disk_resource = GCPStaticSizedDiskResource.create(product_versions, 'pd-ssd', data_disk_size_gb)
machine_type_parts = gcp_machine_type_to_parts(machine_type)
assert machine_type_parts is not None, machine_type
instance_family = machine_type_parts.machine_family
resources = [
GCPComputeResource.create(product_versions, instance_family, preemptible),
GCPMemoryResource.create(product_versions, instance_family, preemptible),
GCPStaticSizedDiskResource.create(product_versions, 'pd-ssd', boot_disk_size_gb),
data_disk_resource,
GCPDynamicSizedDiskResource.create(product_versions, 'pd-ssd'),
GCPIPFeeResource.create(product_versions, 1024),
GCPServiceFeeResource.create(product_versions),
]
return GCPSlimInstanceConfig(
machine_type=machine_type,
preemptible=preemptible,
local_ssd_data_disk=local_ssd_data_disk,
data_disk_size_gb=data_disk_size_gb,
boot_disk_size_gb=boot_disk_size_gb,
job_private=job_private,
resources=resources,
)
def __init__(
self,
machine_type: str,
preemptible: bool,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
boot_disk_size_gb: int,
job_private: bool,
resources: List[GCPResource],
):
self.cloud = 'gcp'
self._machine_type = machine_type
self.preemptible = preemptible
self.local_ssd_data_disk = local_ssd_data_disk
self.data_disk_size_gb = data_disk_size_gb
self.job_private = job_private
self.boot_disk_size_gb = boot_disk_size_gb
machine_type_parts = gcp_machine_type_to_parts(self._machine_type)
assert machine_type_parts is not None, machine_type
self._instance_family = machine_type_parts.machine_family
self._worker_type = machine_type_parts.worker_type
self.cores = machine_type_parts.cores
self.resources = resources
def worker_type(self) -> str:
return self._worker_type
@staticmethod
def from_dict(data: dict) -> 'GCPSlimInstanceConfig':
if data['version'] < 4:
disks = data['disks']
assert len(disks) == 2, data
assert disks[0]['boot']
boot_disk_size_gb = disks[0]['size']
assert not disks[1]['boot']
local_ssd_data_disk = disks[1]['type'] == 'local-ssd'
data_disk_size_gb = disks[1]['size']
job_private = data['job-private']
preemptible = data['instance']['preemptible']
machine_type = family_worker_type_cores_to_gcp_machine_type(
data['instance']['family'],
data['instance']['type'],
data['instance']['cores'],
) | preemptible = data['preemptible']
local_ssd_data_disk = data['local_ssd_data_disk']
data_disk_size_gb = data['data_disk_size_gb']
boot_disk_size_gb = data['boot_disk_size_gb']
job_private = data['job_private']
machine_type_parts = gcp_machine_type_to_parts(machine_type)
assert machine_type_parts is not None, machine_type
instance_family = machine_type_parts.machine_family
resources = data.get('resources')
if resources is None:
assert data['version'] < 5, data
preemptible_str = 'preemptible' if preemptible else 'nonpreemptible'
if local_ssd_data_disk:
data_disk_resource = GCPStaticSizedDiskResource('disk/local-ssd/1', data_disk_size_gb)
else:
data_disk_resource = GCPStaticSizedDiskResource('disk/pd-ssd/1', data_disk_size_gb)
resources = [
GCPComputeResource(f'compute/{instance_family}-{preemptible_str}/1'),
GCPMemoryResource(f'memory/{instance_family}-{preemptible_str}/1'),
GCPStaticSizedDiskResource('disk/pd-ssd/1', boot_disk_size_gb),
data_disk_resource,
GCPDynamicSizedDiskResource('disk/pd-ssd/1'),
GCPIPFeeResource('service-fee/1'),
GCPServiceFeeResource('ip-fee/1024/1'),
]
else:
resources = [gcp_resource_from_dict(data) for data in resources]
return GCPSlimInstanceConfig(
machine_type,
preemptible,
local_ssd_data_disk,
data_disk_size_gb,
boot_disk_size_gb,
job_private,
resources,
)
def to_dict(self) -> dict:
return {
'version': GCP_INSTANCE_CONFIG_VERSION,
'cloud': 'gcp',
'machine_type': self._machine_type,
'preemptible': self.preemptible,
'local_ssd_data_disk': self.local_ssd_data_disk,
'data_disk_size_gb': self.data_disk_size_gb,
'boot_disk_size_gb': self.boot_disk_size_gb,
'job_private': self.job_private,
'resources': [resource.to_dict() for resource in self.resources],
} | instance_family = data['instance']['family']
else:
machine_type = data['machine_type'] |
bitcoin_fi.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="fi" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Santacoin</source>
<translation>Tietoa Santacoinista</translation>
</message>
<message>
<location line="+39"/>
<source><b>Santacoin</b> version</source>
<translation><b>Santacoin</b> versio</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Tämä on kokeellinen ohjelmisto.
Levitetään MIT/X11 ohjelmistolisenssin alaisuudessa. Tarkemmat tiedot löytyvät tiedostosta COPYING tai osoitteesta http://www.opensource.org/licenses/mit-license.php.
Tämä ohjelma sisältää OpenSSL projektin OpenSSL työkalupakin (http://www.openssl.org/), Eric Youngin ([email protected]) kehittämän salausohjelmiston sekä Thomas Bernardin UPnP ohjelmiston.
</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Tekijänoikeus</translation>
</message>
<message>
<location line="+0"/>
<source>The Santacoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Osoitekirja</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Kaksoisnapauta muokataksesi osoitetta tai nimeä</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Luo uusi osoite</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopioi valittu osoite leikepöydälle</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Uusi Osoite</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Santacoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Nämä ovat Santacoin-osoitteesi joihin voit vastaanottaa maksuja. Voit haluta antaa jokaiselle maksajalle omansa, että pystyt seuraamaan keneltä maksut tulevat.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Kopioi Osoite</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Näytä &QR-koodi</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Santacoin address</source>
<translation>Allekirjoita viesti todistaaksesi, että omistat Santacoin-osoitteen</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Allekirjoita &viesti</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Poista valittu osoite listalta</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Vie auki olevan välilehden tiedot tiedostoon</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Santacoin address</source>
<translation>Tarkista viestin allekirjoitus varmistaaksesi, että se allekirjoitettiin tietyllä Santacoin-osoitteella</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Varmista viesti...</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Poista</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Santacoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Kopioi &Nimi</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Muokkaa</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Lähetä &Rahaa</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Vie osoitekirja</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma separated file (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Virhe viedessä osoitekirjaa</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Ei voida kirjoittaa tiedostoon %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Nimi</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Osoite</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(ei nimeä)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Tunnuslauseen Dialogi</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Kirjoita tunnuslause</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Uusi tunnuslause</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Kiroita uusi tunnuslause uudelleen</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Anna lompakolle uusi tunnuslause.<br/>Käytä tunnuslausetta, jossa on ainakin <b>10 satunnaista mekkiä</b> tai <b>kahdeksan sanaa</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Salaa lompakko</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Tätä toimintoa varten sinun täytyy antaa lompakon tunnuslause sen avaamiseksi.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Avaa lompakko</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source> | <location line="+5"/>
<source>Decrypt wallet</source>
<translation>Pura lompakon salaus</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Vaihda tunnuslause</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Anna vanha ja uusi tunnuslause.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Vahvista lompakon salaus</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR SANTACOINS</b>!</source>
<translation>Varoitus: Jos salaat lompakkosi ja menetät tunnuslauseesi, <b>MENETÄT KAIKKI SANTACOINISI</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Haluatko varmasti salata lompakkosi?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>TÄRKEÄÄ: Kaikki vanhat lompakon varmuuskopiot pitäisi korvata uusilla suojatuilla varmuuskopioilla. Turvallisuussyistä edelliset varmuuskopiot muuttuvat turhiksi, kun aloitat suojatun lompakon käytön.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Varoitus: Caps Lock on käytössä!</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Lompakko salattu</translation>
</message>
<message>
<location line="-56"/>
<source>Santacoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your santacoins from being stolen by malware infecting your computer.</source>
<translation>Santacoin sulkeutuu lopettaakseen salausprosessin. Muista, että salattukaan lompakko ei täysin suojaa sitä haittaohjelmien aiheuttamilta varkauksilta.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Lompakon salaus epäonnistui</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Lompakon salaaminen epäonnistui sisäisen virheen vuoksi. Lompakkoasi ei salattu.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Annetut tunnuslauseet eivät täsmää.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Lompakon avaaminen epäonnistui.</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Annettu tunnuslause oli väärä.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Lompakon salauksen purku epäonnistui.</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Lompakon tunnuslause vaihdettiin onnistuneesti.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>&Allekirjoita viesti...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Synkronoidaan verkon kanssa...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Yleisnäkymä</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Lompakon tilanteen yleiskatsaus</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Rahansiirrot</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Selaa rahansiirtohistoriaa</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Muokkaa tallennettujen nimien ja osoitteiden listaa</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Näytä Santacoinien vastaanottamiseen käytetyt osoitteet</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>L&opeta</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Sulje ohjelma</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Santacoin</source>
<translation>Näytä tietoa Santacoin-projektista</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Tietoja &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Näytä tietoja QT:ta</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Asetukset...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Salaa lompakko...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Varmuuskopioi Lompakko...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Vaihda Tunnuslause...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Tuodaan lohkoja levyltä</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Ladataan lohkoindeksiä...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Santacoin address</source>
<translation>Lähetä kolikoita Santacoin-osoitteeseen</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Santacoin</source>
<translation>Muuta Santacoinin konfiguraatioasetuksia</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Varmuuskopioi lompakko toiseen sijaintiin</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Vaihda lompakon salaukseen käytettävä tunnuslause</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>&Testausikkuna</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Avaa debuggaus- ja diagnostiikkakonsoli</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>Varmista &viesti...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Santacoin</source>
<translation>Santacoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Lompakko</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Lähetä</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Vastaanota</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Osoitteet</translation>
</message>
<message>
<location line="+22"/>
<source>&About Santacoin</source>
<translation>&Tietoa Santacoinista</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Näytä / Piilota</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Näytä tai piilota Santacoin-ikkuna</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Suojaa yksityiset avaimet, jotka kuuluvat lompakkoosi</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Santacoin addresses to prove you own them</source>
<translation>Allekirjoita viestisi omalla Santacoin -osoitteellasi todistaaksesi, että omistat ne</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Santacoin addresses</source>
<translation>Varmista, että viestisi on allekirjoitettu määritetyllä Santacoin -osoitteella</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Tiedosto</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Asetukset</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Apua</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Välilehtipalkki</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Santacoin client</source>
<translation>Santacoin-asiakas</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Santacoin network</source>
<translation><numerusform>%n aktiivinen yhteys Santacoin-verkkoon</numerusform><numerusform>%n aktiivista yhteyttä Santacoin-verkkoon</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Käsitelty %1 lohkoa rahansiirtohistoriasta</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n tunti</numerusform><numerusform>%n tuntia</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n viikko</numerusform><numerusform>%n viikkoa</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>Viimeisin vastaanotettu lohko tuotettu %1.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Virhe</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Varoitus</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Tietoa</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Rahansiirtohistoria on ajan tasalla</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Saavutetaan verkkoa...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Vahvista maksukulu</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Lähetetyt rahansiirrot</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Saapuva rahansiirto</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Päivä: %1
Määrä: %2
Tyyppi: %3
Osoite: %4</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>URI käsittely</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Santacoin address or malformed URI parameters.</source>
<translation>URIa ei voitu jäsentää! Tämä voi johtua kelvottomasta Santacoin-osoitteesta tai virheellisistä URI parametreista.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Lompakko on <b>salattu</b> ja tällä hetkellä <b>avoinna</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Lompakko on <b>salattu</b> ja tällä hetkellä <b>lukittuna</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Santacoin can no longer continue safely and will quit.</source>
<translation>Peruuttamaton virhe on tapahtunut. Santacoin ei voi enää jatkaa turvallisesti ja sammutetaan.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Verkkohälytys</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Muokkaa osoitetta</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Nimi</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Tähän osoitteeseen liitetty nimi</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Osoite</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Osoite, joka liittyy tämän osoitekirjan merkintään. Tätä voidaan muuttaa vain lähtevissä osoitteissa.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Uusi vastaanottava osoite</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Uusi lähettävä osoite</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Muokkaa vastaanottajan osoitetta</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Muokkaa lähtevää osoitetta</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Osoite "%1" on jo osoitekirjassa.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Santacoin address.</source>
<translation>Antamasi osoite "%1" ei ole validi Santacoin-osoite.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Lompakkoa ei voitu avata.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Uuden avaimen luonti epäonnistui.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Santacoin-Qt</source>
<translation>Santacoin-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versio</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Käyttö:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>komentorivi parametrit</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Käyttöliittymäasetukset</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Set language, for example "de_DE" (default: system locale)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Käynnistä pienennettynä</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Näytä aloitusruutu käynnistettäessä (oletus: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Asetukset</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Yleiset</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Maksa rahansiirtopalkkio</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Santacoin after logging in to the system.</source>
<translation>Käynnistä Santacoin kirjautumisen yhteydessä.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Santacoin on system login</source>
<translation>&Käynnistä Santacoin kirjautumisen yhteydessä</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Verkko</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Santacoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Avaa Santacoin-asiakasohjelman portti reitittimellä automaattisesti. Tämä toimii vain, jos reitittimesi tukee UPnP:tä ja se on käytössä.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Portin uudelleenohjaus &UPnP:llä</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Santacoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Ota yhteys Santacoin-verkkoon SOCKS-proxyn läpi (esimerkiksi kun haluat käyttää Tor-verkkoa).</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Ota yhteys SOCKS-proxyn kautta:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxyn &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Välityspalvelimen IP-osoite (esim. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Portti</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Proxyn Portti (esim. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS &Versio:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Proxyn SOCKS-versio (esim. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Ikkuna</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Näytä ainoastaan ilmaisinalueella ikkunan pienentämisen jälkeen.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Pienennä ilmaisinalueelle työkalurivin sijasta</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Ikkunaa suljettaessa vain pienentää Santacoin-ohjelman ikkunan lopettamatta itse ohjelmaa. Kun tämä asetus on valittuna, ohjelman voi sulkea vain valitsemalla Lopeta ohjelman valikosta.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>P&ienennä suljettaessa</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Käyttöliittymä</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>&Käyttöliittymän kieli</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Santacoin.</source>
<translation>Tässä voit määritellä käyttöliittymän kielen. Muutokset astuvat voimaan seuraavan kerran, kun Santacoin käynnistetään.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Yksikkö jona santacoin-määrät näytetään</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Valitse mitä yksikköä käytetään ensisijaisesti santacoin-määrien näyttämiseen.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Santacoin addresses in the transaction list or not.</source>
<translation>Näytetäänkö Santacoin-osoitteet rahansiirrot listassa vai ei.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Näytä osoitteet rahansiirrot listassa</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Peruuta</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Hyväksy</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>oletus</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Varoitus</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Santacoin.</source>
<translation>Tämä asetus astuu voimaan seuraavalla kerralla, kun Santacoin käynnistetään.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Antamasi proxy-osoite on virheellinen.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Santacoin network after a connection is established, but this process has not completed yet.</source>
<translation>Näytetyt tiedot eivät välttämättä ole ajantasalla. Lompakkosi synkronoituu Santacoin-verkon kanssa automaattisesti yhteyden muodostamisen jälkeen, mutta synkronointi on vielä meneillään.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Vahvistamatta:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Lompakko</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Epäkypsää:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Louhittu saldo, joka ei ole vielä kypsynyt</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Viimeisimmät rahansiirrot</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Tililläsi tällä hetkellä olevien Santacoinien määrä</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Niiden saapuvien rahansiirtojen määrä, joita Santacoin-verkko ei vielä ole ehtinyt vahvistaa ja siten eivät vielä näy saldossa.</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>Ei ajan tasalla</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start santacoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>QR-koodi Dialogi</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Vastaanota maksu</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Määrä:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Tunniste:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Viesti:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Tallenna nimellä...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Virhe käännettäessä URI:a QR-koodiksi.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Syötetty määrä on virheellinen. Tarkista kirjoitusasu.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Tuloksen URI liian pitkä, yritä lyhentää otsikon tekstiä / viestiä.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Tallenna QR-koodi</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG kuvat (*png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Pääteohjelman nimi</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>Ei saatavilla</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Pääteohjelman versio</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>T&ietoa</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Käytössä oleva OpenSSL-versio</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Käynnistysaika</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Verkko</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Yhteyksien lukumäärä</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Käyttää testiverkkoa</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Lohkoketju</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Nykyinen Lohkojen määrä</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Arvioitu lohkojen kokonaismäärä</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Viimeisimmän lohkon aika</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Avaa</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Komentorivi parametrit</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Santacoin-Qt help message to get a list with possible Santacoin command-line options.</source>
<translation>Näytä Santacoin-Qt komentoriviparametrien ohjesivu, jossa on listattuna mahdolliset komentoriviparametrit.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Näytä</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konsoli</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Kääntöpäiväys</translation>
</message>
<message>
<location line="-104"/>
<source>Santacoin - Debug window</source>
<translation>Santacoin - Debug ikkuna</translation>
</message>
<message>
<location line="+25"/>
<source>Santacoin Core</source>
<translation>Santacoin-ydin</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Debug lokitiedosto</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Santacoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Avaa lokitiedosto nykyisestä data-kansiosta. Tämä voi viedä useamman sekunnin, jos lokitiedosto on iso.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Tyhjennä konsoli</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Santacoin RPC console.</source>
<translation>Tervetuloa Santacoin RPC konsoliin.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Ylös- ja alas-nuolet selaavat historiaa ja <b>Ctrl-L</b> tyhjentää ruudun.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Kirjoita <b>help</b> nähdäksesi yleiskatsauksen käytettävissä olevista komennoista.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Lähetä Santacoineja</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Lähetä monelle vastaanottajalle</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Lisää &Vastaanottaja</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Poista kaikki rahansiirtokentät</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>&Tyhjennnä Kaikki</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123,456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Vahvista lähetys</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Lähetä</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> to %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Hyväksy Santacoinien lähettäminen</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Haluatko varmasti lähettää %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> ja </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Vastaanottajan osoite on virheellinen. Tarkista osoite.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Maksettavan summan tulee olla suurempi kuin 0 Santacoinia.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Määrä ylittää käytettävissä olevan saldon.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Kokonaismäärä ylittää saldosi kun %1 maksukulu lisätään summaan.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Sama osoite toistuu useamman kerran. Samaan osoitteeseen voi lähettää vain kerran per maksu.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Virhe: Rahansiirto hylättiin. Tämä voi tapahtua jos jotkin santacoineistasi on jo käytetty, esimerkiksi jos olet käyttänyt kopiota wallet.dat-lompakkotiedostosta ja santacoinit on merkitty käytetyksi vain kopiossa.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Lomake</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>M&äärä:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Maksun saaja:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Anna nimi tälle osoitteelle, jos haluat lisätä sen osoitekirjaan</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Nimi:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Valitse osoite osoitekirjasta</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Liitä osoite leikepöydältä</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Poista </translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Santacoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Anna Santacoin-osoite (esim. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Allekirjoitukset - Allekirjoita / Varmista viesti</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Allekirjoita viesti</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Voit allekirjoittaa viestit omalla osoitteellasi todistaaksesi että omistat ne. Ole huolellinen, että et allekirjoita mitään epämääräistä, phishing-hyökkääjät voivat huijata sinua allekirjoittamaan luovuttamalla henkilöllisyytesi. Allekirjoita selvitys täysin yksityiskohtaisesti mihin olet sitoutunut.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Osoite, jolla viesti allekirjoitetaan (esimerkiksi Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Valitse osoite osoitekirjasta</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Liitä osoite leikepöydältä</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Kirjoita tähän viesti minkä haluat allekirjoittaa</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Allekirjoitus</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopioi tämänhetkinen allekirjoitus leikepöydälle</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Santacoin address</source>
<translation>Allekirjoita viesti todistaaksesi, että omistat tämän Santacoin-osoitteen</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Allekirjoita &viesti</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Tyhjennä kaikki allekirjoita-viesti-kentät</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>&Tyhjennä Kaikki</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Varmista viesti</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Syötä allekirjoittava osoite, viesti ja allekirjoitus alla oleviin kenttiin varmistaaksesi allekirjoituksen aitouden. Varmista että kopioit kaikki kentät täsmälleen oikein, myös rivinvaihdot, välilyönnit, tabulaattorit, jne.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Osoite, jolla viesti allekirjoitettiin (esimerkiksi Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Santacoin address</source>
<translation>Tarkista viestin allekirjoitus varmistaaksesi, että se allekirjoitettiin tietyllä Santacoin-osoitteella</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Tyhjennä kaikki varmista-viesti-kentät</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Santacoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Anna Santacoin-osoite (esim. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klikkaa "Allekirjoita Viesti luodaksesi allekirjoituksen </translation>
</message>
<message>
<location line="+3"/>
<source>Enter Santacoin signature</source>
<translation>Syötä Santacoin-allekirjoitus</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Syötetty osoite on virheellinen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Tarkista osoite ja yritä uudelleen.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Syötetyn osoitteen avainta ei löydy.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Lompakon avaaminen peruttiin.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Yksityistä avainta syötetylle osoitteelle ei ole saatavilla.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Viestin allekirjoitus epäonnistui.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Viesti allekirjoitettu.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Allekirjoitusta ei pystytty tulkitsemaan.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Tarkista allekirjoitus ja yritä uudelleen.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Allekirjoitus ei täsmää viestin tiivisteeseen.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Viestin varmistus epäonnistui.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Viesti varmistettu.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Santacoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Avoinna %1 asti</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/vahvistamaton</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 vahvistusta</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Tila</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>lähetetty %n noodin läpi</numerusform><numerusform>lähetetty %n noodin läpi</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Päivämäärä</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Lähde</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generoitu</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Lähettäjä</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Saaja</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>oma osoite</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>nimi</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Credit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>kypsyy %n lohkon kuluttua</numerusform><numerusform>kypsyy %n lohkon kuluttua</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>ei hyväksytty</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debit</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Maksukulu</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Netto määrä</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Viesti</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Viesti</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Siirtotunnus</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Generoitujen kolikoiden täytyy kypsyä 120 lohkon ajan ennen kuin ne voidaan lähettää. Kun loit tämän lohkon, se lähetettiin verkkoon lisättäväksi lohkoketjuun. Jos se ei päädy osaksi lohkoketjua, sen tila vaihtuu "ei hyväksytty" ja sitä ei voida lähettää. Näin voi joskus käydä, jos toinen noodi löytää lohkon muutamaa sekuntia ennen tai jälkeen sinun lohkosi löytymisen.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Debug tiedot</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Rahansiirto</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Sisääntulot</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Määrä</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>tosi</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>epätosi</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, ei ole vielä onnistuneesti lähetetty</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>tuntematon</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Rahansiirron yksityiskohdat</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Tämä ruutu näyttää yksityiskohtaisen tiedon rahansiirrosta</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Päivämäärä</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Laatu</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Osoite</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Määrä</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Avoinna %1 asti</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Ei yhteyttä verkkoon (%1 vahvistusta)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Vahvistamatta (%1/%2 vahvistusta)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Vahvistettu (%1 vahvistusta)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Louhittu saldo on käytettävissä kun se kypsyy %n lohkon päästä</numerusform><numerusform>Louhittu saldo on käytettävissä kun se kypsyy %n lohkon päästä</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Tätä lohkoa ei vastaanotettu mistään muusta solmusta ja sitä ei mahdollisesti hyväksytä!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generoitu mutta ei hyväksytty</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Vastaanotettu osoitteella</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Vastaanotettu</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Saaja</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Maksu itsellesi</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Louhittu</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(ei saatavilla)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Rahansiirron tila. Siirrä osoitin kentän päälle nähdäksesi vahvistusten lukumäärä.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Rahansiirron vastaanottamisen päivämäärä ja aika.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Rahansiirron laatu.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Rahansiirron kohteen Santacoin-osoite</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Saldoon lisätty tai siitä vähennetty määrä.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Kaikki</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Tänään</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Tällä viikolla</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Tässä kuussa</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Viime kuussa</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Tänä vuonna</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Alue...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Vastaanotettu osoitteella</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Saaja</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Itsellesi</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Louhittu</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Muu</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Anna etsittävä osoite tai tunniste</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Minimimäärä</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopioi osoite</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopioi nimi</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopioi määrä</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Muokkaa nimeä</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Näytä rahansiirron yksityiskohdat</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Vie rahansiirron tiedot</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Comma separated file (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Vahvistettu</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Aika</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Laatu</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Nimi</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Osoite</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Määrä</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Virhe tietojen viennissä</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Ei voida kirjoittaa tiedostoon %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Alue:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>kenelle</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Lähetä Santacoineja</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Vie auki olevan välilehden tiedot tiedostoon</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Varmuuskopio Onnistui</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Santacoin version</source>
<translation>Santacoinin versio</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Käyttö:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or santacoind</source>
<translation>Lähetä käsky palvelimelle tai santacoind:lle</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Lista komennoista</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Hanki apua käskyyn</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Asetukset:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: santacoin.conf)</source>
<translation>Määritä asetustiedosto (oletus: santacoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: santacoind.pid)</source>
<translation>Määritä pid-tiedosto (oletus: santacoin.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Määritä data-hakemisto</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Aseta tietokannan välimuistin koko megatavuina (oletus: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 5129 or testnet: 15129)</source>
<translation>Kuuntele yhteyksiä portista <port> (oletus: 5129 tai testnet: 15129)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Pidä enintään <n> yhteyttä verkkoihin (oletus: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Yhdistä noodiin hakeaksesi naapurien osoitteet ja katkaise yhteys</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Määritä julkinen osoitteesi</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Kynnysarvo aikakatkaisulle heikosti toimiville verkoille (oletus: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Sekuntien määrä, kuinka kauan uudelleenkytkeydytään verkkoihin (oletus: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Virhe valmisteltaessa RPC-portin %u avaamista kuunneltavaksi: %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 5128 or testnet: 15128)</source>
<translation>Kuuntele JSON-RPC -yhteyksiä portista <port> (oletus: 5128 or testnet: 15128)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Hyväksy merkkipohjaiset- ja JSON-RPC-käskyt</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Aja taustalla daemonina ja hyväksy komennot</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Käytä test -verkkoa</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Hyväksy yhteyksiä ulkopuolelta (vakioasetus: 1 jos -proxy tai -connect ei määritelty)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=santacoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Santacoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Virhe ilmennyt asetettaessa RPC-porttia %u IPv6:n kuuntelemiseksi, palataan takaisin IPv4:ään %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Santacoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Aseta suurin korkean prioriteetin / matalan palkkion siirron koko tavuissa (vakioasetus: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Varoitus: -paytxfee on asetettu erittäin korkeaksi! Tämä on maksukulu jonka tulet maksamaan kun lähetät siirron.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Varoitus: Näytetyt siirrot eivät välttämättä pidä paikkaansa! Sinun tai toisten noodien voi olla tarpeen asentaa päivitys.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Santacoin will not work properly.</source>
<translation>Varoitus: Tarkista että tietokoneesi kellonaika ja päivämäärä ovat paikkansapitäviä! Santacoin ei toimi oikein väärällä päivämäärällä ja/tai kellonajalla.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Lohkon luonnin asetukset:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Yhidstä ainoastaan määrättyihin noodeihin</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Hae oma IP osoite (vakioasetus: 1 kun kuuntelemassa ja ei -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Virhe avattaessa lohkoindeksiä</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Varoitus: Levytila on vähissä!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Virhe: Järjestelmävirhe</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Ei onnistuttu kuuntelemaan missään portissa. Käytä -listen=0 jos haluat tätä.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>Lohkon kirjoitus epäonnistui</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Hae naapureita DNS hauilla (vakioasetus: 1 paitsi jos -connect)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Tuodaan lohkoja ulkoisesta blk000??.dat tiedostosta</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Tietoa</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Virheellinen -tor osoite '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Suurin vastaanottopuskuri yksittäiselle yhteydelle, <n>*1000 tavua (vakioasetus: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Suurin lähetyspuskuri yksittäiselle yhteydelle, <n>*1000 tavua (vakioasetus: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Yhdistä vain noodeihin verkossa <net> (IPv4, IPv6 tai Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Tulosta enemmän debug tietoa. Aktivoi kaikki -debug* asetukset</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Tulosta lisää verkkoyhteys debug tietoa</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Lisää debuggaustiedon tulostukseen aikaleima</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Santacoin Wiki for SSL setup instructions)</source>
<translation>SSL asetukset (katso Santacoin Wikistä tarkemmat SSL ohjeet)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Valitse käytettävän SOCKS-proxyn versio (4-5, vakioasetus: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Lähetä jäljitys/debug-tieto konsoliin, debug.log-tiedoston sijaan</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Lähetä jäljitys/debug-tieto debuggeriin</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Aseta suurin lohkon koko tavuissa (vakioasetus: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Asetan pienin lohkon koko tavuissa (vakioasetus: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Pienennä debug.log tiedosto käynnistyksen yhteydessä (vakioasetus: 1 kun ei -debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Määritä yhteyden aikakataisu millisekunneissa (vakioasetus: 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Järjestelmävirhe:</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Käytä UPnP:tä kuunneltavan portin avaamiseen (vakioasetus: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Käytä UPnP:tä kuunneltavan portin avaamiseen (vakioasetus: 1 kun kuuntelemassa)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Käytä proxyä tor yhteyksien avaamiseen (vakioasetus: sama kuin -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Käyttäjätunnus JSON-RPC-yhteyksille</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Varoitus</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Varoitus: Tämä versio on vanhentunut, päivitys tarpeen!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Salasana JSON-RPC-yhteyksille</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Salli JSON-RPC yhteydet tietystä ip-osoitteesta</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Lähetä käskyjä solmuun osoitteessa <ip> (oletus: 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Suorita käsky kun paras lohko muuttuu (%s cmd on vaihdettu block hashin kanssa)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Päivitä lompakko uusimpaan formaattiin</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Aseta avainpoolin koko arvoon <n> (oletus: 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Skannaa uudelleen lohkoketju lompakon puuttuvien rahasiirtojen vuoksi</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Käytä OpenSSL:ää (https) JSON-RPC-yhteyksille</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Palvelimen sertifikaatti-tiedosto (oletus: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Palvelimen yksityisavain (oletus: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Hyväksyttävä salaus (oletus:
TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Tämä ohjeviesti</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Kytkeytyminen %s tällä tietokonella ei onnistu (kytkeytyminen palautti virheen %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Yhdistä socks proxyn läpi</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Salli DNS kyselyt -addnode, -seednode ja -connect yhteydessä</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Ladataan osoitteita...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Virhe ladattaessa wallet.dat-tiedostoa: Lompakko vioittunut</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Santacoin</source>
<translation>Virhe ladattaessa wallet.dat-tiedostoa: Tarvitset uudemman version Santacoinista</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Santacoin to complete</source>
<translation>Lompakko tarvitsee uudelleenkirjoittaa: käynnistä Santacoin uudelleen</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Virhe ladattaessa wallet.dat-tiedostoa</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Virheellinen proxy-osoite '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Tuntematon verkko -onlynet parametrina: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Tuntematon -socks proxy versio pyydetty: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>-bind osoitteen '%s' selvittäminen epäonnistui</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>-externalip osoitteen '%s' selvittäminen epäonnistui</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>-paytxfee=<amount>: '%s' on virheellinen</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Virheellinen määrä</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Lompakon saldo ei riitä</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Ladataan lohkoindeksiä...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Linää solmu mihin liittyä pitääksesi yhteyden auki</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Santacoin is probably already running.</source>
<translation>Kytkeytyminen %s ei onnistu tällä tietokoneella. Santacoin on todennäköisesti jo ajamassa.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Rahansiirtopalkkio per KB lisätään lähettämääsi rahansiirtoon</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Ladataan lompakkoa...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Et voi päivittää lompakkoasi vanhempaan versioon</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Oletusosoitetta ei voi kirjoittaa</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Skannataan uudelleen...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Lataus on valmis</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Käytä %s optiota</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Virhe</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Sinun täytyy asettaa rpcpassword=<password> asetustiedostoon:
%s
Jos tiedostoa ei ole, niin luo se ainoastaan omistajan kirjoitusoikeuksin.</translation>
</message>
</context>
</TS> | <translation>Tätä toimintoa varten sinun täytyy antaa lompakon tunnuslause salauksen purkuun.</translation>
</message>
<message> |
main.rs | extern crate indicatif;
use std::fs;
use std::path::PathBuf;
use std::time::Instant;
use std::sync::{Arc, Mutex};
use indicatif::ParallelProgressIterator;
use indicatif::ProgressIterator;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
const FILES_DIR: &str = ".";
fn main() {
let start = Instant::now();
let files = read_files_parallel(FILES_DIR);
let elapsed = start.elapsed();
println!("Loaded {} files in {:?}", files.len(), elapsed);
println!("Average size per file: {} bytes", calc_average_size(&files));
}
fn calc_average_size(text_files: &Vec<String>) -> usize {
let mut sum = 0;
for text in text_files {
sum += text.len();
}
sum / text_files.len()
}
| fn dir_to_paths(dir_path: &str) -> Vec<PathBuf> {
fs::read_dir(dir_path)
.unwrap()
.map(|file| file.unwrap().path())
.collect()
}
fn read_files(dir_path: &str) -> Vec<String> {
let paths = dir_to_paths(dir_path);
let mut contents: Vec<String> = Vec::new();
paths.iter().progress().for_each(|path| {
let result = fs::read_to_string(path.as_path());
match result {
Ok(content) => contents.push(content),
Err(e) => println!("Unable to read file {}", e),
}
});
contents
}
fn read_files_parallel(dir_path: &str) -> Vec<String> {
let paths = dir_to_paths(dir_path);
let contents = Arc::new(Mutex::new(Vec::new()));
paths.par_iter().progress().for_each(|path| {
let result = fs::read_to_string(path.as_path());
match result {
Ok(content) => contents.lock().unwrap().push(content),
Err(e) => println!("Unable to read file {}", e),
}
});
let unlocked = contents.lock().unwrap().to_owned();
return unlocked;
} | |
kubenet_linux_test.go | //go:build !dockerless
// +build !dockerless
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
"net"
"strings"
"testing"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
utilsets "k8s.io/apimachinery/pkg/util/sets"
sysctltest "k8s.io/component-helpers/node/utils/sysctl/testing"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
mockcni "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/util/bandwidth"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
netutils "k8s.io/utils/net"
)
// test it fulfills the NetworkPlugin interface
var _ network.NetworkPlugin = &kubenetNetworkPlugin{}
func newFakeKubenetPlugin(initMap map[kubecontainer.ContainerID]utilsets.String, execer exec.Interface, host network.Host) *kubenetNetworkPlugin {
return &kubenetNetworkPlugin{
podIPs: initMap,
execer: execer,
mtu: 1460,
host: host,
}
}
func TestGetPodNetworkStatus(t *testing.T) {
podIPMap := make(map[kubecontainer.ContainerID]utilsets.String)
podIPMap[kubecontainer.ContainerID{ID: "1"}] = utilsets.NewString("10.245.0.2")
podIPMap[kubecontainer.ContainerID{ID: "2"}] = utilsets.NewString("10.245.0.3")
podIPMap[kubecontainer.ContainerID{ID: "3"}] = utilsets.NewString("10.245.0.4", "2000::")
podIPMap[kubecontainer.ContainerID{ID: "4"}] = utilsets.NewString("2000::2")
testCases := []struct {
id string
expectError bool
expectIP utilsets.String
}{
//in podCIDR map
{
id: "1",
expectError: false,
expectIP: utilsets.NewString("10.245.0.2"),
},
{
id: "2",
expectError: false,
expectIP: utilsets.NewString("10.245.0.3"),
},
{
id: "3",
expectError: false,
expectIP: utilsets.NewString("10.245.0.4", "2000::"),
},
{
id: "4",
expectError: false,
expectIP: utilsets.NewString("2000::2"),
},
//not in podIP map
{
id: "does-not-exist-map",
expectError: true,
expectIP: nil,
},
//TODO: add test cases for retrieving ip inside container network namespace
}
fakeCmds := make([]fakeexec.FakeCommandAction, 0)
for _, t := range testCases {
// the fake commands return the IP from the given index, or an error
fCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
ips, ok := podIPMap[kubecontainer.ContainerID{ID: t.id}]
if !ok {
return nil, nil, fmt.Errorf("Pod IP %q not found", t.id)
}
ipsList := ips.UnsortedList()
return []byte(ipsList[0]), nil, nil
},
},
}
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fCmd, cmd, args...)
})
}
fexec := fakeexec.FakeExec{
CommandScript: fakeCmds,
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
fakeKubenet := newFakeKubenetPlugin(podIPMap, &fexec, fhost)
for i, tc := range testCases {
out, err := fakeKubenet.GetPodNetworkStatus("", "", kubecontainer.ContainerID{ID: tc.id})
if tc.expectError {
if err == nil {
t.Errorf("Test case %d expects error but got none", i)
}
continue
} else {
if err != nil {
t.Errorf("Test case %d expects error but got error: %v", i, err)
}
}
seen := make(map[string]bool)
allExpected := tc.expectIP.UnsortedList()
for _, expectedIP := range allExpected {
for _, outIP := range out.IPs {
if expectedIP == outIP.String() {
seen[expectedIP] = true
break
}
}
}
if len(tc.expectIP) != len(seen) {
t.Errorf("Test case %d expects ip %s but got %s", i, tc.expectIP, out.IP.String())
}
}
}
// TestTeardownCallsShaper tests that a `TearDown` call does call
// `shaper.Reset`
func | (t *testing.T) {
fexec := &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
fshaper := &bandwidth.FakeShaper{}
mockcni := &mockcni.MockCNI{}
ips := make(map[kubecontainer.ContainerID]utilsets.String)
kubenet := newFakeKubenetPlugin(ips, fexec, fhost)
kubenet.loConfig = &libcni.NetworkConfig{
Network: &types.NetConf{
Name: "loopback-fake",
Type: "loopback",
},
}
kubenet.cniConfig = mockcni
kubenet.iptables = ipttest.NewFake()
kubenet.bandwidthShaper = fshaper
mockcni.On("DelNetwork", mock.AnythingOfType("*context.timerCtx"), mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
details := make(map[string]interface{})
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
kubenet.podIPs[existingContainerID] = utilsets.NewString("10.0.0.1")
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
t.Fatalf("Unexpected error in TearDownPod: %v", err)
}
assert.Equal(t, []string{"10.0.0.1/32"}, fshaper.ResetCIDRs, "shaper.Reset should have been called")
mockcni.AssertExpectations(t)
}
// TestInit tests that a `Init` call with an MTU sets the MTU
func TestInit_MTU(t *testing.T) {
var fakeCmds []fakeexec.FakeCommandAction
{
// modprobe br-netfilter
fCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return make([]byte, 0), nil, nil
},
},
}
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fCmd, cmd, args...)
})
}
fexec := &fakeexec.FakeExec{
CommandScript: fakeCmds,
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
ips := make(map[kubecontainer.ContainerID]utilsets.String)
kubenet := newFakeKubenetPlugin(ips, fexec, fhost)
kubenet.iptables = ipttest.NewFake()
sysctl := sysctltest.NewFake()
sysctl.Settings["net/bridge/bridge-nf-call-iptables"] = 0
kubenet.sysctl = sysctl
if err := kubenet.Init(nettest.NewFakeHost(nil), kubeletconfig.HairpinNone, "10.0.0.0/8", 1234); err != nil {
t.Fatalf("Unexpected error in Init: %v", err)
}
assert.Equal(t, 1234, kubenet.mtu, "kubenet.mtu should have been set")
assert.Equal(t, 1, sysctl.Settings["net/bridge/bridge-nf-call-iptables"], "net/bridge/bridge-nf-call-iptables sysctl should have been set")
}
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
// This is how kubenet is invoked from the cri.
func TestTearDownWithoutRuntime(t *testing.T) {
testCases := []struct {
podCIDR []string
expectedPodCIDR []string
ip string
}{
{
podCIDR: []string{"10.0.0.0/24"},
expectedPodCIDR: []string{"10.0.0.0/24"},
ip: "10.0.0.1",
},
{
podCIDR: []string{"10.0.0.1/24"},
expectedPodCIDR: []string{"10.0.0.0/24"},
ip: "10.0.0.1",
},
{
podCIDR: []string{"2001:beef::/48"},
expectedPodCIDR: []string{"2001:beef::/48"},
ip: "2001:beef::1",
},
{
podCIDR: []string{"2001:beef::1/48"},
expectedPodCIDR: []string{"2001:beef::/48"},
ip: "2001:beef::1",
},
}
for _, tc := range testCases {
fhost := nettest.NewFakeHost(nil)
fhost.Legacy = false
mockcni := &mockcni.MockCNI{}
fexec := &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
ips := make(map[kubecontainer.ContainerID]utilsets.String)
kubenet := newFakeKubenetPlugin(ips, fexec, fhost)
kubenet.loConfig = &libcni.NetworkConfig{
Network: &types.NetConf{
Name: "loopback-fake",
Type: "loopback",
},
}
kubenet.cniConfig = mockcni
kubenet.iptables = ipttest.NewFake()
details := make(map[string]interface{})
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = strings.Join(tc.podCIDR, ",")
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
if len(kubenet.podCIDRs) != len(tc.podCIDR) {
t.Errorf("generated podCidr: %q, expecting: %q are not of the same length", kubenet.podCIDRs, tc.podCIDR)
continue
}
for idx := range tc.podCIDR {
if kubenet.podCIDRs[idx].String() != tc.expectedPodCIDR[idx] {
t.Errorf("generated podCidr: %q, expecting: %q", kubenet.podCIDRs[idx].String(), tc.expectedPodCIDR[idx])
}
}
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
kubenet.podIPs[existingContainerID] = utilsets.NewString(tc.ip)
mockcni.On("DelNetwork", mock.AnythingOfType("*context.timerCtx"), mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
t.Fatalf("Unexpected error in TearDownPod: %v", err)
}
// Assert that the CNI DelNetwork made it through and we didn't crash
// without a runtime.
mockcni.AssertExpectations(t)
}
}
func TestGetRoutesConfig(t *testing.T) {
for _, test := range []struct {
cidrs []string
routes string
}{
{
cidrs: []string{"10.0.0.1/24"},
routes: `{"dst": "0.0.0.0/0"}`,
},
{
cidrs: []string{"2001:4860:4860::8888/32"},
routes: `{"dst": "::/0"}`,
},
{
cidrs: []string{"2001:4860:4860::8888/32", "10.0.0.1/24"},
routes: `{"dst": "0.0.0.0/0"},{"dst": "::/0"}`,
},
} {
var cidrs []*net.IPNet
for _, c := range test.cidrs {
_, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err)
cidrs = append(cidrs, cidr)
}
fakeKubenet := &kubenetNetworkPlugin{podCIDRs: cidrs}
assert.Equal(t, test.routes, fakeKubenet.getRoutesConfig())
}
}
func TestGetRangesConfig(t *testing.T) {
for _, test := range []struct {
cidrs []string
ranges string
}{
{
cidrs: []string{"10.0.0.0/24"},
ranges: `
[{
"subnet": "10.0.0.0/24"
}]`,
},
{
cidrs: []string{"2001:4860::/32"},
ranges: `
[{
"subnet": "2001:4860::/32"
}]`,
},
{
cidrs: []string{"10.0.0.0/24", "2001:4860::/32"},
ranges: `
[{
"subnet": "10.0.0.0/24"
}],
[{
"subnet": "2001:4860::/32"
}]`,
},
} {
var cidrs []*net.IPNet
for _, c := range test.cidrs {
_, cidr, err := netutils.ParseCIDRSloppy(c)
assert.NoError(t, err)
cidrs = append(cidrs, cidr)
}
fakeKubenet := &kubenetNetworkPlugin{podCIDRs: cidrs}
assert.Equal(t, test.ranges, fakeKubenet.getRangesConfig())
}
}
//TODO: add unit test for each implementation of network plugin interface
| TestTeardownCallsShaper |
common.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common types and traits for cuckoo/cuckatoo family of solvers
use crate::blake2::blake2b::blake2b;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crate::pow::error::{Error, ErrorKind};
use crate::pow::num::{PrimInt, ToPrimitive};
use crate::pow::siphash::siphash24;
use std::fmt;
use std::hash::Hash;
use std::io::Cursor;
use std::ops::{BitOrAssign, Mul};
/// Operations needed for edge type (going to be u32 or u64)
pub trait EdgeType: PrimInt + ToPrimitive + Mul + BitOrAssign + Hash {}
impl EdgeType for u32 {}
impl EdgeType for u64 {}
/// An edge in the Cuckoo graph, simply references two u64 nodes.
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct Edge<T>
where
T: EdgeType,
{
pub u: T,
pub v: T,
}
impl<T> fmt::Display for Edge<T>
where
T: EdgeType,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(u: {}, v: {})",
self.u.to_u64().unwrap_or(0),
self.v.to_u64().unwrap_or(0)
)
}
}
/// An element of an adjencency list
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Link<T>
where
T: EdgeType,
{
pub next: T,
pub to: T,
}
impl<T> fmt::Display for Link<T>
where
T: EdgeType,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"(next: {}, to: {})",
self.next.to_u64().unwrap_or(0),
self.to.to_u64().unwrap_or(0)
)
}
}
pub fn set_header_nonce(header: &[u8], nonce: Option<u64>) -> Result<[u64; 4], Error> {
if let Some(n) = nonce {
let len = header.len();
let mut header = header.to_owned();
header.truncate(len - 4); // drop last 4 bytes (u32) off the end
header.write_u32::<LittleEndian>(n as u32)?;
create_siphash_keys(&header)
} else {
create_siphash_keys(&header)
}
}
pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> {
let h = blake2b(32, &[], &header);
let hb = h.as_bytes();
let mut rdr = Cursor::new(hb);
Ok([
rdr.read_u64::<LittleEndian>()?,
rdr.read_u64::<LittleEndian>()?,
rdr.read_u64::<LittleEndian>()?,
rdr.read_u64::<LittleEndian>()?,
])
}
/// Macro to clean up u64 unwrapping
#[macro_export]
macro_rules! to_u64 {
($n:expr) => {
$n.to_u64().ok_or(ErrorKind::IntegerCast)?
};
}
/// Macro to clean up u64 unwrapping as u32
#[macro_export]
macro_rules! to_u32 {
($n:expr) => {
$n.to_u64().ok_or(ErrorKind::IntegerCast)? as u32
};
}
/// Macro to clean up u64 unwrapping as usize
#[macro_export]
macro_rules! to_usize {
($n:expr) => {
$n.to_u64().ok_or(ErrorKind::IntegerCast)? as usize
};
}
/// Macro to clean up casting to edge type
/// TODO: this macro uses unhygenic data T
#[macro_export]
macro_rules! to_edge {
($n:expr) => {
T::from($n).ok_or(ErrorKind::IntegerCast)?
};
}
/// Utility struct to calculate commonly used Cuckoo parameters calculated
/// from header, nonce, edge_bits, etc.
pub struct CuckooParams<T>
where
T: EdgeType,
{
pub edge_bits: u8,
pub proof_size: usize,
pub num_edges: u64,
pub siphash_keys: [u64; 4],
pub edge_mask: T,
}
impl<T> CuckooParams<T>
where
T: EdgeType,
{
/// Instantiates new params and calculate edge mask, etc
pub fn new(edge_bits: u8, proof_size: usize) -> Result<CuckooParams<T>, Error> {
let num_edges = (1 as u64) << edge_bits;
let edge_mask = to_edge!(num_edges - 1);
Ok(CuckooParams {
edge_bits,
proof_size,
num_edges,
siphash_keys: [0; 4],
edge_mask,
})
}
/// Reset the main keys used for siphash from the header and nonce
pub fn | (&mut self, header: Vec<u8>, nonce: Option<u64>) -> Result<(), Error> {
self.siphash_keys = set_header_nonce(&header, nonce)?;
Ok(())
}
/// Return siphash masked for type
pub fn sipnode(&self, edge: T, uorv: u64, shift: bool) -> Result<T, Error> {
let hash_u64 = siphash24(
&self.siphash_keys,
2 * edge.to_u64().ok_or(ErrorKind::IntegerCast)? + uorv,
);
let mut masked = hash_u64 & self.edge_mask.to_u64().ok_or(ErrorKind::IntegerCast)?;
if shift {
masked <<= 1;
masked |= uorv;
}
Ok(T::from(masked).ok_or(ErrorKind::IntegerCast)?)
}
}
| reset_header_nonce |
mod.rs | //! Logic and data structures related to impl specialization, explained in
//! greater detail below.
//!
//! At the moment, this implementation support only the simple "chain" rule:
//! If any two impls overlap, one must be a strict subset of the other.
//!
//! See the [rustc dev guide] for a bit more detail on how specialization
//! fits together with the rest of the trait machinery.
//!
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/specialization.html
pub mod specialization_graph;
use specialization_graph::GraphExt;
use crate::infer::{InferCtxt, InferOk, TyCtxtInferExt};
use crate::traits::select::IntercrateAmbiguityCause;
use crate::traits::{self, coherence, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine};
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::{struct_span_err, EmissionGuarantee};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::lint::LintDiagnosticBuilder;
use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
use rustc_session::lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS;
use rustc_span::{Span, DUMMY_SP};
use super::util::impl_trait_ref_and_oblig;
use super::{FulfillmentContext, SelectionContext};
/// Information pertinent to an overlapping impl error.
#[derive(Debug)]
pub struct OverlapError {
pub with_impl: DefId,
pub trait_desc: String,
pub self_desc: Option<String>,
pub intercrate_ambiguity_causes: Vec<IntercrateAmbiguityCause>,
pub involves_placeholder: bool,
}
/// Given a subst for the requested impl, translate it to a subst
/// appropriate for the actual item definition (whether it be in that impl,
/// a parent impl, or the trait).
///
/// When we have selected one impl, but are actually using item definitions from
/// a parent impl providing a default, we need a way to translate between the
/// type parameters of the two impls. Here the `source_impl` is the one we've
/// selected, and `source_substs` is a substitution of its generics.
/// And `target_node` is the impl/trait we're actually going to get the
/// definition from. The resulting substitution will map from `target_node`'s
/// generics to `source_impl`'s generics as instantiated by `source_subst`.
///
/// For example, consider the following scenario:
///
/// ```rust
/// trait Foo { ... }
/// impl<T, U> Foo for (T, U) { ... } // target impl
/// impl<V> Foo for (V, V) { ... } // source impl
/// ```
///
/// Suppose we have selected "source impl" with `V` instantiated with `u32`.
/// This function will produce a substitution with `T` and `U` both mapping to `u32`.
/// | /// impl<'a, I, T: 'a> Iterator for Cloned<I>
/// where I: Iterator<Item = &'a T>, T: Clone
/// ```
///
/// In a case like this, the substitution for `T` is determined indirectly,
/// through associated type projection. We deal with such cases by using
/// *fulfillment* to relate the two impls, requiring that all projections are
/// resolved.
pub fn translate_substs<'a, 'tcx>(
infcx: &InferCtxt<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_impl: DefId,
source_substs: SubstsRef<'tcx>,
target_node: specialization_graph::Node,
) -> SubstsRef<'tcx> {
debug!(
"translate_substs({:?}, {:?}, {:?}, {:?})",
param_env, source_impl, source_substs, target_node
);
let source_trait_ref =
infcx.tcx.impl_trait_ref(source_impl).unwrap().subst(infcx.tcx, &source_substs);
// translate the Self and Param parts of the substitution, since those
// vary across impls
let target_substs = match target_node {
specialization_graph::Node::Impl(target_impl) => {
// no need to translate if we're targeting the impl we started with
if source_impl == target_impl {
return source_substs;
}
fulfill_implication(infcx, param_env, source_trait_ref, target_impl).unwrap_or_else(
|_| {
bug!(
"When translating substitutions for specialization, the expected \
specialization failed to hold"
)
},
)
}
specialization_graph::Node::Trait(..) => source_trait_ref.substs,
};
// directly inherent the method generics, since those do not vary across impls
source_substs.rebase_onto(infcx.tcx, source_impl, target_substs)
}
/// Is `impl1` a specialization of `impl2`?
///
/// Specialization is determined by the sets of types to which the impls apply;
/// `impl1` specializes `impl2` if it applies to a subset of the types `impl2` applies
/// to.
#[instrument(skip(tcx), level = "debug")]
pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId, DefId)) -> bool {
// The feature gate should prevent introducing new specializations, but not
// taking advantage of upstream ones.
let features = tcx.features();
let specialization_enabled = features.specialization || features.min_specialization;
if !specialization_enabled && (impl1_def_id.is_local() || impl2_def_id.is_local()) {
return false;
}
// We determine whether there's a subset relationship by:
//
// - replacing bound vars with placeholders in impl1,
// - assuming the where clauses for impl1,
// - instantiating impl2 with fresh inference variables,
// - unifying,
// - attempting to prove the where clauses for impl2
//
// The last three steps are encapsulated in `fulfill_implication`.
//
// See RFC 1210 for more details and justification.
// Currently we do not allow e.g., a negative impl to specialize a positive one
if tcx.impl_polarity(impl1_def_id) != tcx.impl_polarity(impl2_def_id) {
return false;
}
// create a parameter environment corresponding to a (placeholder) instantiation of impl1
let penv = tcx.param_env(impl1_def_id);
let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap();
// Create an infcx, taking the predicates of impl1 as assumptions:
tcx.infer_ctxt().enter(|infcx| {
// Normalize the trait reference. The WF rules ought to ensure
// that this always succeeds.
let impl1_trait_ref = match traits::fully_normalize(
&infcx,
FulfillmentContext::new(),
ObligationCause::dummy(),
penv,
impl1_trait_ref,
) {
Ok(impl1_trait_ref) => impl1_trait_ref,
Err(err) => {
bug!("failed to fully normalize {:?}: {:?}", impl1_trait_ref, err);
}
};
// Attempt to prove that impl2 applies, given all of the above.
fulfill_implication(&infcx, penv, impl1_trait_ref, impl2_def_id).is_ok()
})
}
/// Attempt to fulfill all obligations of `target_impl` after unification with
/// `source_trait_ref`. If successful, returns a substitution for *all* the
/// generics of `target_impl`, including both those needed to unify with
/// `source_trait_ref` and those whose identity is determined via a where
/// clause in the impl.
fn fulfill_implication<'a, 'tcx>(
infcx: &InferCtxt<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_trait_ref: ty::TraitRef<'tcx>,
target_impl: DefId,
) -> Result<SubstsRef<'tcx>, ()> {
debug!(
"fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)",
param_env, source_trait_ref, target_impl
);
let selcx = &mut SelectionContext::new(&infcx);
let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl);
let (target_trait_ref, obligations) =
impl_trait_ref_and_oblig(selcx, param_env, target_impl, target_substs);
// do the impls unify? If not, no specialization.
let Ok(InferOk { obligations: more_obligations, .. }) =
infcx.at(&ObligationCause::dummy(), param_env).eq(source_trait_ref, target_trait_ref)
else {
debug!(
"fulfill_implication: {:?} does not unify with {:?}",
source_trait_ref, target_trait_ref
);
return Err(());
};
// attempt to prove all of the predicates for impl2 given those for impl1
// (which are packed up in penv)
infcx.save_and_restore_in_snapshot_flag(|infcx| {
// If we came from `translate_substs`, we already know that the
// predicates for our impl hold (after all, we know that a more
// specialized impl holds, so our impl must hold too), and
// we only want to process the projections to determine the
// the types in our substs using RFC 447, so we can safely
// ignore region obligations, which allows us to avoid threading
// a node-id to assign them with.
//
// If we came from specialization graph construction, then
// we already make a mockery out of the region system, so
// why not ignore them a bit earlier?
let mut fulfill_cx = FulfillmentContext::new_ignoring_regions();
for oblig in obligations.chain(more_obligations) {
fulfill_cx.register_predicate_obligation(&infcx, oblig);
}
match fulfill_cx.select_all_or_error(infcx).as_slice() {
[] => {
debug!(
"fulfill_implication: an impl for {:?} specializes {:?}",
source_trait_ref, target_trait_ref
);
// Now resolve the *substitution* we built for the target earlier, replacing
// the inference variables inside with whatever we got from fulfillment.
Ok(infcx.resolve_vars_if_possible(target_substs))
}
errors => {
// no dice!
debug!(
"fulfill_implication: for impls on {:?} and {:?}, \
could not fulfill: {:?} given {:?}",
source_trait_ref,
target_trait_ref,
errors,
param_env.caller_bounds()
);
Err(())
}
}
})
}
// Query provider for `specialization_graph_of`.
pub(super) fn specialization_graph_provider(
tcx: TyCtxt<'_>,
trait_id: DefId,
) -> specialization_graph::Graph {
let mut sg = specialization_graph::Graph::new();
let overlap_mode = specialization_graph::OverlapMode::get(tcx, trait_id);
let mut trait_impls: Vec<_> = tcx.all_impls(trait_id).collect();
// The coherence checking implementation seems to rely on impls being
// iterated over (roughly) in definition order, so we are sorting by
// negated `CrateNum` (so remote definitions are visited first) and then
// by a flattened version of the `DefIndex`.
trait_impls
.sort_unstable_by_key(|def_id| (-(def_id.krate.as_u32() as i64), def_id.index.index()));
for impl_def_id in trait_impls {
if let Some(impl_def_id) = impl_def_id.as_local() {
// This is where impl overlap checking happens:
let insert_result = sg.insert(tcx, impl_def_id.to_def_id(), overlap_mode);
// Report error if there was one.
let (overlap, used_to_be_allowed) = match insert_result {
Err(overlap) => (Some(overlap), None),
Ok(Some(overlap)) => (Some(overlap.error), Some(overlap.kind)),
Ok(None) => (None, None),
};
if let Some(overlap) = overlap {
report_overlap_conflict(tcx, overlap, impl_def_id, used_to_be_allowed, &mut sg);
}
} else {
let parent = tcx.impl_parent(impl_def_id).unwrap_or(trait_id);
sg.record_impl_from_cstore(tcx, parent, impl_def_id)
}
}
sg
}
// This function is only used when
// encountering errors and inlining
// it negatively impacts perf.
#[cold]
#[inline(never)]
fn report_overlap_conflict(
tcx: TyCtxt<'_>,
overlap: OverlapError,
impl_def_id: LocalDefId,
used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
sg: &mut specialization_graph::Graph,
) {
let impl_polarity = tcx.impl_polarity(impl_def_id.to_def_id());
let other_polarity = tcx.impl_polarity(overlap.with_impl);
match (impl_polarity, other_polarity) {
(ty::ImplPolarity::Negative, ty::ImplPolarity::Positive) => {
report_negative_positive_conflict(
tcx,
&overlap,
impl_def_id,
impl_def_id.to_def_id(),
overlap.with_impl,
sg,
);
}
(ty::ImplPolarity::Positive, ty::ImplPolarity::Negative) => {
report_negative_positive_conflict(
tcx,
&overlap,
impl_def_id,
overlap.with_impl,
impl_def_id.to_def_id(),
sg,
);
}
_ => {
report_conflicting_impls(tcx, overlap, impl_def_id, used_to_be_allowed, sg);
}
}
}
fn report_negative_positive_conflict(
tcx: TyCtxt<'_>,
overlap: &OverlapError,
local_impl_def_id: LocalDefId,
negative_impl_def_id: DefId,
positive_impl_def_id: DefId,
sg: &mut specialization_graph::Graph,
) {
let impl_span = tcx
.sess
.source_map()
.guess_head_span(tcx.span_of_impl(local_impl_def_id.to_def_id()).unwrap());
let mut err = struct_span_err!(
tcx.sess,
impl_span,
E0751,
"found both positive and negative implementation of trait `{}`{}:",
overlap.trait_desc,
overlap.self_desc.clone().map_or_else(String::new, |ty| format!(" for type `{}`", ty))
);
match tcx.span_of_impl(negative_impl_def_id) {
Ok(span) => {
err.span_label(
tcx.sess.source_map().guess_head_span(span),
"negative implementation here".to_string(),
);
}
Err(cname) => {
err.note(&format!("negative implementation in crate `{}`", cname));
}
}
match tcx.span_of_impl(positive_impl_def_id) {
Ok(span) => {
err.span_label(
tcx.sess.source_map().guess_head_span(span),
"positive implementation here".to_string(),
);
}
Err(cname) => {
err.note(&format!("positive implementation in crate `{}`", cname));
}
}
sg.has_errored = Some(err.emit());
}
fn report_conflicting_impls(
tcx: TyCtxt<'_>,
overlap: OverlapError,
impl_def_id: LocalDefId,
used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
sg: &mut specialization_graph::Graph,
) {
let impl_span =
tcx.sess.source_map().guess_head_span(tcx.span_of_impl(impl_def_id.to_def_id()).unwrap());
// Work to be done after we've built the DiagnosticBuilder. We have to define it
// now because the struct_lint methods don't return back the DiagnosticBuilder
// that's passed in.
fn decorate<G: EmissionGuarantee>(
tcx: TyCtxt<'_>,
overlap: OverlapError,
used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
impl_span: Span,
err: LintDiagnosticBuilder<'_, G>,
) -> G {
let msg = format!(
"conflicting implementations of trait `{}`{}{}",
overlap.trait_desc,
overlap
.self_desc
.clone()
.map_or_else(String::new, |ty| { format!(" for type `{}`", ty) }),
match used_to_be_allowed {
Some(FutureCompatOverlapErrorKind::Issue33140) => ": (E0119)",
_ => "",
}
);
let mut err = err.build(&msg);
match tcx.span_of_impl(overlap.with_impl) {
Ok(span) => {
err.span_label(
tcx.sess.source_map().guess_head_span(span),
"first implementation here".to_string(),
);
err.span_label(
impl_span,
format!(
"conflicting implementation{}",
overlap.self_desc.map_or_else(String::new, |ty| format!(" for `{}`", ty))
),
);
}
Err(cname) => {
let msg = match to_pretty_impl_header(tcx, overlap.with_impl) {
Some(s) => format!("conflicting implementation in crate `{}`:\n- {}", cname, s),
None => format!("conflicting implementation in crate `{}`", cname),
};
err.note(&msg);
}
}
for cause in &overlap.intercrate_ambiguity_causes {
cause.add_intercrate_ambiguity_hint(&mut err);
}
if overlap.involves_placeholder {
coherence::add_placeholder_note(&mut err);
}
err.emit()
}
match used_to_be_allowed {
None => {
let reported = if overlap.with_impl.is_local()
|| !tcx.orphan_check_crate(()).contains(&impl_def_id)
{
let err = struct_span_err!(tcx.sess, impl_span, E0119, "");
Some(decorate(
tcx,
overlap,
used_to_be_allowed,
impl_span,
LintDiagnosticBuilder::new(err),
))
} else {
Some(tcx.sess.delay_span_bug(impl_span, "impl should have failed the orphan check"))
};
sg.has_errored = reported;
}
Some(kind) => {
let lint = match kind {
FutureCompatOverlapErrorKind::Issue33140 => ORDER_DEPENDENT_TRAIT_OBJECTS,
FutureCompatOverlapErrorKind::LeakCheck => COHERENCE_LEAK_CHECK,
};
tcx.struct_span_lint_hir(
lint,
tcx.hir().local_def_id_to_hir_id(impl_def_id),
impl_span,
|ldb| {
decorate(tcx, overlap, used_to_be_allowed, impl_span, ldb);
},
);
}
};
}
/// Recovers the "impl X for Y" signature from `impl_def_id` and returns it as a
/// string.
crate fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Option<String> {
use std::fmt::Write;
let trait_ref = tcx.impl_trait_ref(impl_def_id)?;
let mut w = "impl".to_owned();
let substs = InternalSubsts::identity_for_item(tcx, impl_def_id);
// FIXME: Currently only handles ?Sized.
// Needs to support ?Move and ?DynSized when they are implemented.
let mut types_without_default_bounds = FxHashSet::default();
let sized_trait = tcx.lang_items().sized_trait();
if !substs.is_empty() {
types_without_default_bounds.extend(substs.types());
w.push('<');
w.push_str(
&substs
.iter()
.map(|k| k.to_string())
.filter(|k| k != "'_")
.collect::<Vec<_>>()
.join(", "),
);
w.push('>');
}
write!(w, " {} for {}", trait_ref.print_only_trait_path(), tcx.type_of(impl_def_id)).unwrap();
// The predicates will contain default bounds like `T: Sized`. We need to
// remove these bounds, and add `T: ?Sized` to any untouched type parameters.
let predicates = tcx.predicates_of(impl_def_id).predicates;
let mut pretty_predicates =
Vec::with_capacity(predicates.len() + types_without_default_bounds.len());
for (mut p, _) in predicates {
if let Some(poly_trait_ref) = p.to_opt_poly_trait_pred() {
if Some(poly_trait_ref.def_id()) == sized_trait {
types_without_default_bounds.remove(&poly_trait_ref.self_ty().skip_binder());
continue;
}
if ty::BoundConstness::ConstIfConst == poly_trait_ref.skip_binder().constness {
let new_trait_pred = poly_trait_ref.map_bound(|mut trait_pred| {
trait_pred.constness = ty::BoundConstness::NotConst;
trait_pred
});
p = tcx.mk_predicate(new_trait_pred.map_bound(ty::PredicateKind::Trait))
}
}
pretty_predicates.push(p.to_string());
}
pretty_predicates
.extend(types_without_default_bounds.iter().map(|ty| format!("{}: ?Sized", ty)));
if !pretty_predicates.is_empty() {
write!(w, "\n where {}", pretty_predicates.join(", ")).unwrap();
}
w.push(';');
Some(w)
} | /// where-clauses add some trickiness here, because they can be used to "define"
/// an argument indirectly:
///
/// ```rust |
test_bucket.py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class BucketTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
values = {'Max': 1, 'Interval': 1, }
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
data=values,
))
def test_create_bucket_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.create(max=1, interval=1)
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_bucket_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.holodeck.assert_has_request(Request(
'get',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
)) | actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"buckets": [
{
"sid": "BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"rate_limit_sid": "RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"max": 5,
"interval": 60,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets/BLaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://verify.twilio.com/v2/Services/VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/RateLimits/RKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Buckets?PageSize=50&Page=0",
"next_page_url": null,
"key": "buckets"
}
}
'''
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://verify.twilio.com/v2/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/RateLimits/RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Buckets/BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.verify.v2.services("VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.rate_limits("RKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.buckets("BLXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual) | |
save_single_task_for_canceling_transfer_out.go | package domain
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/responses"
)
| // api document: https://help.aliyun.com/api/domain/savesingletaskforcancelingtransferout.html
func (client *Client) SaveSingleTaskForCancelingTransferOut(request *SaveSingleTaskForCancelingTransferOutRequest) (response *SaveSingleTaskForCancelingTransferOutResponse, err error) {
response = CreateSaveSingleTaskForCancelingTransferOutResponse()
err = client.DoAction(request, response)
return
}
// SaveSingleTaskForCancelingTransferOutWithChan invokes the domain.SaveSingleTaskForCancelingTransferOut API asynchronously
// api document: https://help.aliyun.com/api/domain/savesingletaskforcancelingtransferout.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SaveSingleTaskForCancelingTransferOutWithChan(request *SaveSingleTaskForCancelingTransferOutRequest) (<-chan *SaveSingleTaskForCancelingTransferOutResponse, <-chan error) {
responseChan := make(chan *SaveSingleTaskForCancelingTransferOutResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.SaveSingleTaskForCancelingTransferOut(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// SaveSingleTaskForCancelingTransferOutWithCallback invokes the domain.SaveSingleTaskForCancelingTransferOut API asynchronously
// api document: https://help.aliyun.com/api/domain/savesingletaskforcancelingtransferout.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SaveSingleTaskForCancelingTransferOutWithCallback(request *SaveSingleTaskForCancelingTransferOutRequest, callback func(response *SaveSingleTaskForCancelingTransferOutResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *SaveSingleTaskForCancelingTransferOutResponse
var err error
defer close(result)
response, err = client.SaveSingleTaskForCancelingTransferOut(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// SaveSingleTaskForCancelingTransferOutRequest is the request struct for api SaveSingleTaskForCancelingTransferOut
type SaveSingleTaskForCancelingTransferOutRequest struct {
*requests.RpcRequest
DomainName string `position:"Query" name:"DomainName"`
UserClientIp string `position:"Query" name:"UserClientIp"`
Lang string `position:"Query" name:"Lang"`
}
// SaveSingleTaskForCancelingTransferOutResponse is the response struct for api SaveSingleTaskForCancelingTransferOut
type SaveSingleTaskForCancelingTransferOutResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
TaskNo string `json:"TaskNo" xml:"TaskNo"`
}
// CreateSaveSingleTaskForCancelingTransferOutRequest creates a request to invoke SaveSingleTaskForCancelingTransferOut API
func CreateSaveSingleTaskForCancelingTransferOutRequest() (request *SaveSingleTaskForCancelingTransferOutRequest) {
request = &SaveSingleTaskForCancelingTransferOutRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Domain", "2018-01-29", "SaveSingleTaskForCancelingTransferOut", "domain", "openAPI")
request.Method = requests.POST
return
}
// CreateSaveSingleTaskForCancelingTransferOutResponse creates a response to parse from SaveSingleTaskForCancelingTransferOut response
func CreateSaveSingleTaskForCancelingTransferOutResponse() (response *SaveSingleTaskForCancelingTransferOutResponse) {
response = &SaveSingleTaskForCancelingTransferOutResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} | // SaveSingleTaskForCancelingTransferOut invokes the domain.SaveSingleTaskForCancelingTransferOut API synchronously |
diff.py | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Diff Object"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import OrderedDict
from future.utils import viewkeys
from .base import Model, proxy_gen
from .trial import Trial
from .graphs.diff_graph import DiffGraph
class Diff(Model):
"""This model represents a diff between two trials
Initialize it by passing both trials ids:
diff = Diff(1, 2)
There are four visualization modes for the graph:
tree: activation tree without any filters
diff.graph.mode = 0
no match: tree transformed into a graph by the addition of sequence and
return edges and removal of intermediate call edges
diff.graph.mode = 1
exact match: calls are only combined when all the sub-call match
diff.graph.mode = 2
namesapce: calls are combined without considering the sub-calls
diff.graph.mode = 3
You can change the graph width and height by the variables:
diff.graph.width = 600
diff.graph.height = 400
"""
__modelname__ = "Diff"
DEFAULT = {
"graph.width": 500,
"graph.height": 500,
"graph.mode": 3,
"graph.time_limit": None,
}
REPLACE = {
"graph_width": "graph.width",
"graph_height": "graph.height",
"graph_mode": "graph.mode",
"graph_time_limit": "graph.time_limit",
}
def __init__(self, trial_ref1, trial_ref2, **kwargs):
super(Diff, self).__init__(trial_ref1, trial_ref2, **kwargs)
self.trial1 = Trial(trial_ref1)
self.trial2 = Trial(trial_ref2)
self.graph = DiffGraph(self)
self.initialize_default(kwargs)
@property
def trial(self):
"""Return a tuple with information from both trials """
extra = ("start", "finish", "duration_text")
ignore = ("id",)
return diff_dict(
self.trial1.to_dict(ignore=ignore, extra=extra), # pylint: disable=no-member
self.trial2.to_dict(ignore=ignore, extra=extra)) # pylint: disable=no-member
@property
def modules(self):
"""Diff modules from trials"""
return diff_set(
set(proxy_gen(self.trial1.modules)),
set(proxy_gen(self.trial2.modules)))
@property
def environment(self):
"""Diff environment variables"""
return diff_set(
set(self.trial1.environment_attrs),
set(self.trial2.environment_attrs))
@property
def file_accesses(self):
"""Diff file accesses"""
return diff_set(
set(self.trial1.file_accesses),
set(self.trial2.file_accesses),
create_replaced=False)
def _ipython_display_(self):
"""Display history graph"""
if hasattr(self, "graph"):
# pylint: disable=protected-access
return self.graph._ipython_display_()
from IPython.display import display
display({
'text/plain': 'Diff {}:{}'.format(
self.trial1.id,
self.trial2.id
)
})
| Return a dict with keys shared by both dicts that have different values
key -> [before[key], after[key]]
"""
result = OrderedDict()
for key in viewkeys(before):
if key != "id" and before[key] != after[key]:
result[key] = [before[key], after[key]]
return result
def diff_set(before, after, create_replaced=True):
"""Compare sets to get additions, removals and replacements
Return 3 sets:
added -- objects present in second set, but not present in first set
removed -- objects present in first set, but not present in second set
replaced -- objects that have the same name in both sets, but are different
"""
removed = before - after
added = after - before
replaced = set()
removed_by_name = {}
for element_removed in removed:
removed_by_name[element_removed.name] = element_removed
for element_added in added:
element_removed = removed_by_name.get(element_added.name)
if element_removed and create_replaced:
replaced.add((element_removed, element_added))
if create_replaced:
for (element_removed, element_added) in replaced:
removed.discard(element_removed)
added.discard(element_added)
return (added, removed, replaced) | def diff_dict(before, after):
"""Compare dicts. |
main.py | # DataManager -> responsible for talking to the Google Sheets API.
# FlightSearch -> responsible for talking to the Flight Search API.
# FlightData -> responsible for structuring the flight data
# NotificationManager -> responsible for sending notifications with the deal flight details
from data_manager import DataManager
from flight_search import FlightSearch
from notification_manager import NotificationManager
from utils import format_notification
dm = DataManager()
fs = FlightSearch()
nm = NotificationManager()
sheet_data = dm.get_google_sheet_curret_data()
for data_row in sheet_data:
if data_row["iataCode"] == "":
data_row["iataCode"] = fs.get_iata_codes(data_row["city"])
dm.update_iata(data_row)
flight_info = fs.get_flights_data(data_row["iataCode"])
if flight_info is None:
|
if data_row["lowestPrice"] >= flight_info.price:
msg = f"\n({flight_info.city_from.split('-')[0].strip()}) -> ({flight_info.city_to.split('-')[0].strip()})"
print(msg)
if flight_info.stop_overs > 0:
msg = f"Flight from {flight_info.city_from.split('-')[0].strip()} to {flight_info.city_to.split('-')[0].strip()} has 1 stop over, via {flight_info.via_city}."
print(msg)
nm.send_text(format_notification(flight_info))
nm.send_emails(format_notification(flight_info))
| continue |
order.ts | import { BasicOrder, Order, OrderWithDetails } from "../types/order";
import { db } from "../db";
import { OkPacket, RowDataPacket } from "mysql2";
export const create = (order: BasicOrder, callback: Function) => {
const queryString =
"INSERT INTO ticketOrder (id_ticket, id_user) VALUES (?, ?)";
db.query(
queryString,
[order.ticket.id_ticket, order.user.id_user],
(err, result) => {
if (err) {
return callback(err);
}
const insertId = (<OkPacket>result).insertId;
callback(null, insertId);
}
);
};
export const findByIdUser = (id_user: number, callback: Function) => {
const queryString = `
SELECT
o.*,
t.*,
u.name AS user_name,
u.email
FROM ticketOrder AS o
INNER JOIN user AS u ON u.id_user=o.id_user
INNER JOIN ticket AS t ON t.id_ticket=o.id_ticket
WHERE o.id_user=?`;
db.query(queryString, id_user, (err, result) => {
if (err) {
return callback(err);
}
if ((<RowDataPacket>result)[0] === undefined) {
const err = new Error(`Order with id: ${id_user} do not exist.`);
return callback(err);
}
const row = <RowDataPacket>result;
callback(null, row);
});
};
export const findOne = (orderId: number, callback: Function) => {
const queryString = ` | u.email
FROM ticketOrder AS o
INNER JOIN user AS u ON u.id_user=o.id_user
INNER JOIN ticket AS t ON t.id_ticket=o.id_ticket
WHERE o.id_order=?`;
db.query(queryString, orderId, (err, result) => {
if (err) {
return callback(err);
}
if ((<RowDataPacket>result)[0] === undefined) {
const err = new Error(`Order with id: ${orderId} do not exist.`);
return callback(err);
}
const row = (<RowDataPacket>result)[0];
const order: OrderWithDetails = {
orderId: row.id_order,
ticket: {
id_ticket: row.id_ticket,
name: row.name,
description: row.description,
},
user: {
id_user: row.id_user,
id_type_user: row.id_type_user,
name: row.user_name,
email: row.email,
},
};
callback(null, order);
});
};
export const findAll = (callback: Function) => {
const queryString = `
SELECT
o.*,
t.*,
u.id_type_user AS user_type,
u.name AS user_name,
u.email
FROM ticketOrder AS o
INNER JOIN user AS u ON u.id_user=o.id_user
INNER JOIN ticket AS t ON t.id_ticket=o.id_ticket`;
db.query(queryString, (err, result) => {
if (err) {
return callback(err);
}
const rows = <RowDataPacket[]>result;
console.log(`Fetch all Orders`);
const orders: Order[] = [];
rows.forEach((row) => {
const order: OrderWithDetails = {
orderId: row.id_order,
ticket: {
id_ticket: row.id_ticket,
name: row.name,
description: row.description,
},
user: {
id_user: row.id_user,
id_type_user: row.user_type,
name: row.user_name,
email: row.email,
},
};
orders.push(order);
});
callback(null, orders);
});
};
export const update = (order: Order, callback: Function) => {
const queryString = `UPDATE ticketOrder SET id_ticket=?, id_user=? WHERE id_order=?`;
db.query(
queryString,
[order.ticket.id_ticket, order.user.id_user],
(err, result) => {
if (err) {
return callback(err);
}
if ((<RowDataPacket>result)[0] === undefined) {
const err = new Error(`Ordee: ${order} do not exist.`);
return callback(err);
}
callback(null, result);
}
);
}; | SELECT
o.*,
t.*,
u.name AS user_name, |
ServiceAgreementsClientFactory.d.ts | import { Descriptor } from 'pip-services3-commons-node';
import { Factory } from 'pip-services3-components-node';
export declare class | extends Factory {
static Descriptor: Descriptor;
static NullClientV1Descriptor: Descriptor;
static DirectClientV1Descriptor: Descriptor;
static HttpClientV1Descriptor: Descriptor;
static LambdaClientV1Descriptor: Descriptor;
constructor();
}
| ServiceAgreementsClientFactory |
HomePage.tsx | import React, {useEffect} from 'react';
import { SafeAreaView, StyleSheet, View, useWindowDimensions, StatusBar } from 'react-native';
import PhotosContainer from '../components/PhotosContainer';
import {default as Reanimated,} from 'react-native-reanimated';
interface Props {
scrollY2: Reanimated.SharedValue<number>;
scrollY3: Reanimated.SharedValue<number>;
scrollY4: Reanimated.SharedValue<number>;
scale: Reanimated.SharedValue<number>;
numColumnsAnimated: Reanimated.SharedValue<number>;
HEADER_HEIGHT: number;
FOOTER_HEIGHT: number;
headerShown: Reanimated.SharedValue<number>;
}
const HomePage: React.FC<Props> = (props) => {
useEffect(()=>{
console.log(Date.now()+': HomePage re-rendered');
});
const SCREEN_WIDTH = useWindowDimensions().width; |
return (
<SafeAreaView style={styles.SafeAreaView}>
<View style={[styles.View, {width: SCREEN_WIDTH, zIndex:1, marginTop:(StatusBar.currentHeight||0)}]}>
<PhotosContainer
scrollY2={props.scrollY2}
scrollY3={props.scrollY3}
scrollY4={props.scrollY4}
scale={props.scale}
numColumnsAnimated={props.numColumnsAnimated}
HEADER_HEIGHT={props.HEADER_HEIGHT}
FOOTER_HEIGHT={props.FOOTER_HEIGHT}
headerShown={props.headerShown}
/>
</View>
</SafeAreaView>
);
};
const styles = StyleSheet.create({
SafeAreaView: {
flex: 1,
position: 'relative',
backgroundColor: 'white'
},
View: {
position: 'absolute',
top: 0,
left: 0
}
});
export default React.memo(HomePage); | const SCREEN_HEIGHT = useWindowDimensions().height; |
battery.py | class Battery:
| def __init__(self, evaluator):
raise NotImplementedError
def get_action(self, current_state):
raise NotImplementedError |
|
demo_realsense.py | from utils import *
from darknet import Darknet
import cv2
import pyrealsense2 as rs
def demo(cfgfile, weightfile):
|
############################################
if __name__ == '__main__':
if len(sys.argv) == 4:
cfgfile = sys.argv[1]
weightfile = sys.argv[2]
namesfile = sys.argv[3]
demo(cfgfile, weightfile)
#demo('cfg/tiny-yolo-voc.cfg', 'tiny-yolo-voc.weights', 'data/hands.names')
else:
print('Usage:')
print(' python demo.py cfgfile weightfile namesfile')
print('')
print(' perform detection on camera')
| m = Darknet(cfgfile)
m.print_network()
m.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
class_names = load_class_names(namesfile)
use_cuda = 1
if use_cuda:
m.cuda()
# cap = cv2.VideoCapture(0)
# RealSense Start
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
# Setting exposure
s = profile.get_device().query_sensors()[1]
s.set_option(rs.option.exposure, 166)
while True:
# res, img = cap.read()
# Reading image from camera
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
if not color_frame:
continue
img = np.asanyarray(color_frame.get_data())
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
sized = cv2.resize(img, (m.width, m.height))
bboxes = do_detect(m, sized, 0.6, 0.4, use_cuda)
print('------')
draw_img = plot_boxes_cv2(img, bboxes, None, class_names)
cv2.imshow(cfgfile, draw_img)
cv2.waitKey(1) |
dnsserver.js | // Copyright (c) 2010 Tom Hughes-Croucher
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
var sys = require('sys'),
util = require('util'),
Buffer = require('buffer').Buffer,
dgram = require('dgram');
function Server() {
dgram.Socket.call(this, 'udp4');
var self = this;
this.on('message', function(msg, rinfo) {
//split up the message into the dns request header info and the query
var request = processRequest(msg);
var response = new Response(self, rinfo, request);
this.emit('request', request, response);
});
}
exports.Server = Server;
util.inherits(exports.Server, dgram.Socket);
exports.createServer = function() {
return new Server();
}
// slices a single byte into bits
// assuming only single bytes
function sliceBits(b, off, len) {
var s = 7 - (off + len - 1);
b = b >>> s;
return b & ~(0xff << len);
}
//takes a buffer as a request
function processRequest(req) {
//see rfc1035 for more details
//http://tools.ietf.org/html/rfc1035#section-4.1.1
var query = {};
query.header = {};
//TODO write code to break questions up into an array
query.question = {};
var tmpSlice;
var tmpByte;
//transaction id
// 2 bytes
query.header.id = req.slice(0,2);
//slice out a byte for the next section to dice into binary.
tmpSlice = req.slice(2,3);
//convert the binary buf into a string and then pull the char code
//for the byte
tmpByte = tmpSlice.toString('binary', 0, 1).charCodeAt(0);
//qr
// 1 bit
query.header.qr = sliceBits(tmpByte, 0,1);
//opcode
// 0 = standard, 1 = inverse, 2 = server status, 3-15 reserved
// 4 bits
query.header.opcode = sliceBits(tmpByte, 1,4);
//authorative answer
// 1 bit
query.header.aa = sliceBits(tmpByte, 5,1);
//truncated
// 1 bit
query.header.tc = sliceBits(tmpByte, 6,1);
//recursion desired
// 1 bit
query.header.rd = sliceBits(tmpByte, 7,1);
//slice out a byte to dice into binary
tmpSlice = req.slice(3,4);
//convert the binary buf into a string and then pull the char code
//for the byte
tmpByte = tmpSlice.toString('binary', 0, 1).charCodeAt(0);
//recursion available
// 1 bit
query.header.ra = sliceBits(tmpByte, 0,1);
//reserved 3 bits
// rfc says always 0
query.header.z = sliceBits(tmpByte, 1,3);
//response code
// 0 = no error, 1 = format error, 2 = server failure
// 3 = name error, 4 = not implemented, 5 = refused
// 6-15 reserved
// 4 bits
query.header.rcode = sliceBits(tmpByte, 4,4);
//question count
// 2 bytes
query.header.qdcount = req.slice(4,6);
//answer count
// 2 bytes
query.header.ancount = req.slice(6,8);
//ns count
// 2 bytes
query.header.nscount = req.slice(8,10);
//addition resources count
// 2 bytes
query.header.arcount = req.slice(10, 12);
//assuming one question
//qname is the sequence of domain labels
//qname length is not fixed however it is 4
//octets from the end of the buffer
query.question.qname = req.slice(12, req.length - 4);
//qtype
query.question.qtype = req.slice(req.length - 4, req.length - 2);
//qclass
query.question.qclass = req.slice(req.length - 2, req.length);
query.question.name = qnameToDomain(query.question.qname);
query.question.type = query.question.qtype[0] * 256 + query.question.qtype[1];
query.question.class = query.question.qclass[0] * 256 + query.question.qclass[1];
return query;
}
function Response(socket, rinfo, query) {
this.socket = socket;
this.rinfo = rinfo;
this.header = {};
//1 byte
this.header.id = query.header.id; //same as query id
//combined 1 byte
this.header.qr = 1; //this is a response
this.header.opcode = 0; //standard for now TODO: add other types 4-bit!
this.header.aa = 0; //authority... TODO this should be modal
this.header.tc = 0; //truncation
this.header.rd = 1; //recursion asked for
//combined 1 byte
this.header.ra = 0; //no rescursion here TODO
this.header.z = 0; // spec says this MUST always be 0. 3bit
this.header.rcode = 0; //TODO add error codes 4 bit.
//1 byte
this.header.qdcount = 1; //1 question
//1 byte
this.header.ancount = 0; //number of rrs returned from query
//1 byte
this.header.nscount = 0;
//1 byte
this.header.arcount = 0;
this.question = {};
this.question.qname = query.question.qname;
this.question.qtype = query.question.qtype;
this.question.qclass = query.question.qclass;
this.rr = [];
}
Response.prototype.addRR = function(domain, qtype, qclass, ttl, rdlength, rdata) {
var r = {}, address;
r.qname = domainToQname(domain);
r.qtype = qtype;
r.qclass = qclass;
r.ttl = ttl;
if (address = inet_aton(rdlength)) {
r.rdlength = 4;
r.rdata = address;
} else {
r.rdlength = rdlength;
r.rdata = rdata;
}
this.rr.push(r);
this.header.ancount++;
}
Response.prototype.send = function(callback) {
var buffer = this.toBuffer();
this.socket.send(buffer, 0, buffer.length, this.rinfo.port, this.rinfo.address, callback || function() {});
}
Response.prototype.toBuffer = function() {
//calculate len in octets
//NB not calculating rr this is done later
//headers(12) + qname(qname + 2 + 2)
//e.g. 16 + 2 * qname;
//qnames are Buffers so length is already in octs
var qnameLen = this.question.qname.length;
var len = 16 + qnameLen;
var buf = getZeroBuf(len);
this.header.id.copy(buf, 0, 0, 2);
buf[2] = 0x00 | this.header.qr << 7 | this.header.opcode << 3 | this.header.aa << 2 | this.header.tc << 1 | this.header.rd;
buf[3] = 0x00 | this.header.ra << 7 | this.header.z << 4 | this.header.rcode;
numToBuffer(buf, 4, this.header.qdcount, 2);
numToBuffer(buf, 6, this.header.ancount, 2);
numToBuffer(buf, 8, this.header.nscount, 2);
numToBuffer(buf, 10, this.header.arcount, 2); |
this.question.qname.copy(buf, 12, 0, qnameLen);
this.question.qtype.copy(buf, 12+qnameLen, 0, 2);
this.question.qclass.copy(buf, 12+qnameLen+2, 0, 2);
var rrStart = 12+qnameLen+4;
for (var i = 0; i < this.rr.length; i++) {
//TODO figure out if this is actually cheaper than just iterating
//over the rr section up front and counting before creating buf
//
//create a new buffer to hold the request plus the rr
//len of each response is 14 bytes of stuff + qname len
var tmpBuf = getZeroBuf(buf.length + this.rr[i].qname.length + 14);
buf.copy(tmpBuf, 0, 0, buf.length);
this.rr[i].qname.copy(tmpBuf, rrStart, 0, this.rr[i].qname.length);
numToBuffer(tmpBuf, rrStart+this.rr[i].qname.length, this.rr[i].qtype, 2);
numToBuffer(tmpBuf, rrStart+this.rr[i].qname.length+2, this.rr[i].qclass, 2);
numToBuffer(tmpBuf, rrStart+this.rr[i].qname.length+4, this.rr[i].ttl, 4);
numToBuffer(tmpBuf, rrStart+this.rr[i].qname.length+8, this.rr[i].rdlength, 2);
numToBuffer(tmpBuf, rrStart+this.rr[i].qname.length+10, this.rr[i].rdata, this.rr[i].rdlength); // rdlength indicates rdata length
rrStart = rrStart + this.rr[i].qname.length + 14;
buf = tmpBuf;
}
//TODO compression
return buf;
}
function inet_aton(address) {
var parts = address.match(/^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/);
return parts ? parts[1] * 16777216 + parts[2] * 65536 + parts[3] * 256 + parts[4] * 1 : false;
}
function domainToQname(domain) {
var tokens = domain.split(".");
var len = domain.length + 2;
var qname = new Buffer(len);
var offset = 0;
for (var i = 0; i < tokens.length; i++) {
qname[offset] = tokens[i].length;
offset++;
for (var j = 0; j < tokens[i].length; j++) {
qname[offset] = tokens[i].charCodeAt(j);
offset++;
}
}
qname[offset] = 0;
return qname;
}
function getZeroBuf(len) {
var buf = new Buffer(len);
for (var i = 0; i < buf.length; i++) buf[i] = 0;
return buf;
}
//take a number and make sure it's written to the buffer as
//the correct length of bytes with leading 0 padding where necessary
// takes buffer, offset, number, length in bytes to insert
function numToBuffer(buf, offset, num, len, debug) {
if (typeof num != 'number') {
throw new Error('Num must be a number');
}
for (var i = offset; i < offset + len; i++) {
var shift = 8*((len - 1) - (i - offset));
var insert = (num >> shift) & 255;
buf[i] = insert;
}
return buf;
}
function qnameToDomain(qname) {
var domain= '';
for (var i = 0; i < qname.length; i++) {
if (qname[i] == 0) {
//last char chop trailing .
domain = domain.substring(0, domain.length - 1);
break;
}
var tmpBuf = qname.slice(i+1, i+qname[i]+1);
domain += tmpBuf.toString('binary', 0, tmpBuf.length);
domain += '.';
i = i + qname[i];
}
return domain;
} |
//end header |
adagrad_optimizer.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {ENV} from '../environment';
import {keep, tidy} from '../globals';
import {fill, scalar} from '../ops/ops';
import {ConfigDict, Serializable, SerializableConstructor, SerializationMap} from '../serialization';
import {Scalar} from '../tensor';
import {NamedVariableMap} from '../tensor_types';
import {Optimizer} from './optimizer';
import * as optimizer_utils from './optimizer_utils';
/** @doclink Optimizer */
export class AdagradOptimizer extends Optimizer {
static className = 'AdagradOptimizer';
private c: Scalar;
private epsilon: Scalar;
private accumulatedGrads: NamedVariableMap = {};
constructor(
protected learningRate: number, private initialAccumulatorValue = 0.1) {
super();
this.c = keep(scalar(-learningRate));
const epsilon = optimizer_utils.getOptimizerDefaultEpsilonValue();
this.epsilon = keep(scalar(epsilon));
}
applyGradients(variableGradients: NamedVariableMap) {
for (const variableName in variableGradients) {
const value = ENV.engine.registeredVariables[variableName];
if (this.accumulatedGrads[variableName] == null) {
const trainable = false;
tidy(() => {
this.accumulatedGrads[variableName] =
fill(value.shape, this.initialAccumulatorValue)
.variable(trainable);
});
}
const gradient = variableGradients[variableName];
const accumulatedGrad = this.accumulatedGrads[variableName];
|
const newValue =
this.c
.mul(gradient.div(newAccumulatedGrad.add(this.epsilon).sqrt()))
.add(value);
value.assign(newValue);
});
}
}
dispose() {
this.epsilon.dispose();
this.c.dispose();
if (this.accumulatedGrads != null) {
Object.keys(this.accumulatedGrads)
.forEach(name => this.accumulatedGrads[name].dispose());
}
}
getConfig(): ConfigDict {
return {
learningRate: this.learningRate,
initialAccumulatorValue: this.initialAccumulatorValue,
};
}
static fromConfig<T extends Serializable>(
cls: SerializableConstructor<T>, config: ConfigDict): T {
return new cls(config.learningRate, config.initialAccumulatorValue);
}
}
SerializationMap.register(AdagradOptimizer); | tidy(() => {
const newAccumulatedGrad = accumulatedGrad.add(gradient.square());
this.accumulatedGrads[variableName].assign(newAccumulatedGrad); |
attrs.js | import {functionalMerge, normalMerge, toArrayMerge} from '@form-create/utils/lib/mergeprops';
export const keyAttrs = ['type', 'slot', 'emitPrefix', 'value', 'name', 'native', 'hidden', 'display', 'inject', 'options', 'emit', 'nativeEmit', 'link', 'prefix', 'suffix', 'update', 'sync'];
export const arrayAttrs = ['validate', 'children', 'control'];
export const normalAttrs = ['effect'];
export function | () {
return [...keyAttrs, ...normalMerge, ...toArrayMerge, ...functionalMerge, ...arrayAttrs, ...normalAttrs];
}
| attrs |
top.py | import itertools
import numbers
import numpy as np
import toolz
from .. import base, core, sharedict, utils
from ..compatibility import apply, Mapping
from ..delayed import to_task_dask
from ..optimization import SubgraphCallable
def subs(task, substitution):
""" Create a new task with the values substituted
This is like dask.core.subs, but takes a dict of many substitutions to
perform simultaneously. It is not as concerned with micro performance.
"""
if isinstance(task, dict):
return {k: subs(v, substitution) for k, v in task.items()}
if type(task) in (tuple, list, set):
return type(task)([subs(x, substitution) for x in task])
try:
return substitution[task]
except (KeyError, TypeError):
return task
def index_subs(ind, substitution):
""" A simple subs function that works both on tuples and strings """
if ind is None:
return ind
else:
return tuple([substitution.get(c, c) for c in ind])
def atop_token(i, prefix='_'):
return prefix + '%d' % i
def _top(func, output, output_indices, *arrind_pairs, **kwargs):
""" Create a TOP symbolic mutable mapping, given the inputs to top
This is like the ``top`` function, but rather than construct a dict, it
returns a symbolic TOP object.
See Also
--------
top
TOP
"""
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
graph = sharedict.ShareDict()
# Transform indices to canonical elements
# We use terms like _0, and _1 rather than provided index elements
arrind_pairs = list(arrind_pairs)
unique_indices = {i for ii in arrind_pairs[1::2]
if ii is not None
for i in ii} | set(output_indices)
sub = {k: atop_token(i, '.')
for i, k in enumerate(sorted(unique_indices))}
output_indices = index_subs(tuple(output_indices), sub)
arrind_pairs[1::2] = [tuple(a) if a is not None else a
for a in arrind_pairs[1::2]]
arrind_pairs[1::2] = [index_subs(a, sub)
for a in arrind_pairs[1::2]]
new_axes = {index_subs((k,), sub)[0]: v for k, v in new_axes.items()}
# Unpack dask values in non-array arguments
argpairs = list(toolz.partition(2, arrind_pairs))
for i, (arg, ind) in enumerate(argpairs):
if ind is None:
arg2, dsk2 = to_task_dask(arg)
if dsk2:
graph.update(dsk2)
argpairs[i] = (arg2, ind)
# separate argpairs into two separate tuples
inputs = tuple([name for name, _ in argpairs])
inputs_indices = tuple([index for _, index in argpairs])
# Unpack delayed objects in kwargs
if kwargs:
kwargs, dsk_kwargs = to_task_dask(kwargs)
# replace keys in kwargs with _0 tokens
new_keys = list(core.get_dependencies(dsk_kwargs, task=kwargs))
new_tokens = tuple(atop_token(i) for i in range(len(inputs), len(inputs) + len(new_keys)))
sub = dict(zip(new_keys, new_tokens))
inputs = inputs + tuple(new_keys)
inputs_indices = inputs_indices + (None,) * len(new_keys)
kwargs = subs(kwargs, sub)
graph.update(dsk_kwargs)
indices = [(k, v) for k, v in zip(inputs, inputs_indices)]
keys = tuple(map(atop_token, range(len(inputs))))
# Construct local graph
if not kwargs:
dsk = {output: (func,) + keys}
else:
_keys = list(keys)
if new_keys:
_keys = _keys[:-len(new_keys)]
dsk = {output: (apply, func, _keys, kwargs)}
# Construct final output
top = TOP(output, output_indices, dsk, indices,
numblocks=numblocks, concatenate=concatenate, new_axes=new_axes)
graph.update_with_key(top, output)
graph.dependencies = {output: {arg for arg, ind in argpairs if ind is not None}}
return graph
class TOP(Mapping):
""" Tensor Operation
This is a lazily constructed mapping for tensor operation graphs.
This defines a dictionary using an operation and an indexing pattern.
It is built for many operations like elementwise, transpose, tensordot, and
so on. We choose to keep these as symbolic mappings rather than raw
dictionaries because we are able to fuse them during optimization,
sometimes resulting in much lower overhead.
See Also
--------
top
atop
"""
def | (self, output, output_indices, dsk, indices,
numblocks, concatenate=None, new_axes=None):
self.output = output
self.output_indices = tuple(output_indices)
self.dsk = dsk
self.indices = tuple((name, tuple(ind) if ind is not None else ind)
for name, ind in indices)
self.numblocks = numblocks
self.concatenate = concatenate
self.new_axes = new_axes or {}
@property
def _dict(self):
if hasattr(self, '_cached_dict'):
return self._cached_dict
else:
keys = tuple(map(atop_token, range(len(self.indices))))
func = SubgraphCallable(self.dsk, self.output, keys)
self._cached_dict = top(
func,
self.output,
self.output_indices,
*list(toolz.concat(self.indices)),
new_axes=self.new_axes,
numblocks=self.numblocks,
concatenate=self.concatenate
)
return self._cached_dict
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return int(np.prod(list(self._out_numblocks().values())))
def _out_numblocks(self):
d = {}
indices = {k: v for k, v in self.indices if v is not None}
for k, v in self.numblocks.items():
for a, b in zip(indices[k], v):
d[a] = max(d.get(a, 0), b)
return {k: v for k, v in d.items() if k in self.output_indices}
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.::
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarrassingly parallel communication pattern and is read as
$$ z_i = func(x_i, y_i) $$
More complex patterns may emerge, including multiple indices::
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarrassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Pass ``concatenate=True`` to concatenate arrays ahead of time
>>> top(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,
... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP
{('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),
(concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))
('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),
(concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
Support keyword arguments with apply
>>> def f(a, b=0): return a + b
>>> top(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP
{('z', 0): (apply, f, [('x', 0)], {'b': 10}),
('z', 1): (apply, f, [('x', 1)], {'b': 10})}
Include literals by indexing with ``None``
>>> top(add, 'z', 'i', 'x', 'i', 100, None, numblocks={'x': (2,)}) # doctest: +SKIP
{('z', 0): (add, ('x', 0), 100),
('z', 1): (add, ('x', 1), 100)}
See Also
--------
atop
"""
from .core import broadcast_dimensions, zero_broadcast_dimensions, concatenate_axes
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(toolz.partition(2, arrind_pairs))
assert set(numblocks) == {name for name, ind in argpairs if ind is not None}
all_indices = {x for _, ind in argpairs if ind for x in ind}
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(itertools.product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
dsk = {}
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
if ind is None:
args.append(arg)
else:
tups = lol_tuples((arg,), ind, kd, dummies)
if any(nb == 1 for nb in numblocks[arg]):
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
else:
tups2 = tups
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(args)
if not kwargs: # will not be used in an apply, should be a tuple
valtups = [tuple(vt) for vt in valtups]
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
# Unpack delayed objects in kwargs
if kwargs:
task, dsk2 = to_task_dask(kwargs)
if dsk2:
dsk.update(utils.ensure_dict(dsk2))
kwargs2 = task
else:
kwargs2 = kwargs
vals = [(apply, func, vt, kwargs2) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
dsk.update(dict(zip(keys, vals)))
return dsk
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many dask.array operations are special cases of atop including elementwise,
broadcasting, reductions, tensordot, and transpose.
Parameters
----------
func : callable
Function to apply to individual tuples of blocks
out_ind : iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args : sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
**kwargs : dict
Extra keyword arguments to pass to function
dtype : np.dtype
Datatype of resulting array.
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
Examples
--------
2D embarrassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j', dtype='f8') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij', dtype=x.dtype) # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterable of blocks on any array that holds that
index. To receive arrays concatenated along contracted dimensions instead
pass ``concatenate=True``.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i', dtype='f8') # doctest: +SKIP
Add new single-chunk dimensions with the ``new_axes=`` keyword, including
the length of the new dimension. New dimensions will always be in a single
chunk.
>>> def f(x):
... return x[:, None] * np.ones((1, 5))
>>> z = atop(f, 'az', x, 'a', new_axes={'z': 5}, dtype=x.dtype) # doctest: +SKIP
If the applied function changes the size of each chunk you can specify this
with a ``adjust_chunks={...}`` dictionary holding a function for each index
that modifies the dimension size in that index.
>>> def double(x):
... return np.concatenate([x, x])
>>> y = atop(double, 'ij', x, 'ij',
... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype) # doctest: +SKIP
Include literals by indexing with None
>>> y = atop(add, 'ij', x, 'ij', 1234, None, dtype=x.dtype) # doctest: +SKIP
See Also
--------
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) # May be None at this point
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
# Input Validation
if len(set(out_ind)) != len(out_ind):
raise ValueError("Repeated elements not allowed in output index",
[k for k, v in toolz.frequencies(out_ind).items() if v > 1])
new = (set(out_ind)
- {a for arg in args[1::2] if arg is not None for a in arg}
- set(new_axes or ()))
if new:
raise ValueError("Unknown dimension", new)
from .core import Array, unify_chunks, normalize_arg
if dtype is None:
raise ValueError("Must specify dtype of output array")
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
for arg, ind in arginds:
if hasattr(arg, 'ndim') and hasattr(ind, '__len__') and arg.ndim != len(ind):
raise ValueError("Index string %s does not match array dimension %d"
% (ind, arg.ndim))
numblocks = {a.name: a.numblocks for a, ind in arginds if ind is not None}
argindsstr = list(toolz.concat([(normalize_arg(a) if ind is None else a.name, ind)
for a, ind in arginds]))
# Finish up the name
if not out:
out = '%s-%s' % (token or utils.funcname(func).strip('_'),
base.tokenize(func, out_ind, argindsstr, dtype, **kwargs))
kwargs2 = {k: normalize_arg(v) for k, v in kwargs.items()}
dsk = _top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs2)
dsks = [a.dask for a, ind in arginds if ind is not None]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], numbers.Integral):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(sharedict.merge((out, dsk), *dsks,
dependencies={out: {a.name for a, ind in arginds if ind is not None}}),
out, chunks, dtype=dtype)
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def optimize_atop(full_graph, keys=()):
""" High level optimization of stacked TOP layers
For operations that have multiple TOP operations one after the other, like
``x.T + 123`` we can fuse these into a single TOP operation. This happens
before any actual tasks are generated, and so can reduce overhead.
This finds groups of TOP operations that can be safely fused, and then
passes them to ``rewrite_atop`` for rewriting.
Parameters
----------
full_graph: ShareDict
keys: Iterable
The keys of all outputs of all collections.
Used to make sure that we don't fuse a layer needed by an output
Returns
-------
sharedict : ShareDict
See Also
--------
rewrite_atop
"""
keep = {k[0] if type(k) is tuple else k for k in keys}
layers = full_graph.dicts
dependents = core.reverse_dict(full_graph.dependencies)
roots = {k for k in full_graph.dicts
if not dependents.get(k)}
stack = list(roots)
out = {}
dependencies = {}
seen = set()
while stack:
layer = stack.pop()
if layer in seen or layer not in layers:
continue
seen.add(layer)
# Outer loop walks through possible output TOP layers
if isinstance(layers[layer], TOP):
top_layers = {layer}
deps = set(top_layers)
while deps: # we gather as many sub-layers as we can
dep = deps.pop()
if dep not in layers:
stack.append(dep)
continue
if not isinstance(layers[dep], TOP):
stack.append(dep)
continue
if (dep != layer and dep in keep):
stack.append(dep)
continue
if layers[dep].concatenate != layers[layer].concatenate:
stack.append(dep)
continue
# passed everything, proceed
top_layers.add(dep)
# traverse further to this child's children
for d in full_graph.dependencies.get(dep, ()):
# Don't allow reductions to proceed
output_indices = set(layers[dep].output_indices)
input_indices = {i for _, ind in layers[dep].indices if ind for i in ind}
if len(dependents[d]) <= 1 and output_indices.issuperset(input_indices):
deps.add(d)
else:
stack.append(d)
# Merge these TOP layers into one
new_layer = rewrite_atop([layers[l] for l in top_layers])
out[layer] = new_layer
dependencies[layer] = {k for k, v in new_layer.indices if v is not None}
else:
out[layer] = layers[layer]
dependencies[layer] = full_graph.dependencies.get(layer, set())
stack.extend(full_graph.dependencies.get(layer, ()))
return sharedict.ShareDict(out, dependencies)
def rewrite_atop(inputs):
""" Rewrite a stack of atop expressions into a single atop expression
Given a set of TOP layers, combine them into a single layer. The provided
layers are expected to fit well together. That job is handled by
``optimize_atop``
Parameters
----------
inputs : List[TOP]
Returns
-------
top : TOP
See Also
--------
optimize_atop
"""
inputs = {inp.output: inp for inp in inputs}
dependencies = {inp.output: {d for d, v in inp.indices
if v is not None and d in inputs}
for inp in inputs.values()}
dependents = core.reverse_dict(dependencies)
new_index_iter = (c + (str(d) if d else '') # A, B, ... A1, B1, ...
for d in itertools.count()
for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
[root] = [k for k, v in dependents.items() if not v]
# Our final results. These will change during fusion below
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
dsk = dict(inputs[root].dsk)
changed = True
while changed:
changed = False
for i, (dep, ind) in enumerate(indices):
if ind is None:
continue
if dep not in inputs:
continue
changed = True
# Replace _n with dep name in existing tasks
# (inc, _0) -> (inc, 'b')
dsk = {k: subs(v, {atop_token(i): dep}) for k, v in dsk.items()}
# Remove current input from input indices
# [('a', 'i'), ('b', 'i')] -> [('a', 'i')]
_, current_dep_indices = indices.pop(i)
sub = {atop_token(i): atop_token(i - 1) for i in range(i + 1, len(indices) + 1)}
dsk = subs(dsk, sub)
# Change new input_indices to match give index from current computation
# [('c', j')] -> [('c', 'i')]
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {x for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
for i, index in enumerate(new_indices):
try:
contains = index in indices
except (ValueError, TypeError):
contains = False
if contains: # use old inputs if available
sub[atop_token(i)] = atop_token(indices.index(index))
else:
sub[atop_token(i)] = atop_token(len(indices))
indices.append(index)
new_dsk = subs(inputs[dep].dsk, sub)
# indices.extend(new_indices)
dsk.update(new_dsk)
indices = [(a, tuple(b) if isinstance(b, list) else b)
for a, b in indices]
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {atop_token(k): atop_token(v) for k, v in sub.items()}
dsk = {k: subs(v, sub) for k, v in dsk.items()}
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items()
if v is None or k in indices_check}
out = TOP(root, inputs[root].output_indices, dsk, new_indices,
numblocks=numblocks, new_axes=new_axes, concatenate=concatenate)
return out
| __init__ |
saas.js | $(function () {
//pagenation 변수
var pageNum = getParameter('pageNum');
if(pageNum == "" || pageNum == null || pageNum == undefined) {
pageNum = 1;
}
notice(pageNum);
$("#searchWord").keyup(function(event) {
if (event.keyCode === 13) {
$("#button-addon2").click();
}
});
$("#button-addon2").click(function () {
notice(pageNum);
});
});
function noti | eNum) {
var html = "";
var totalNum = "";
var searchType = $("#inputGroupSelect02").val();
var searchWord = $("#searchWord").val();
$.ajax({
url: '/api/fetchQuality?pageNum='+pageNum+'&searchType='+searchType+'&searchWord='+searchWord+'&type=SaaS',
type: 'GET',
dataType: 'JSON',
success: function (response) {
console.log(response);
if(response.totalCount > 0) {
$("#total").text(response.totalCount);
for (var i = 0; i < response.list.length; i++) {
var ahref = "location.href='/page/details?wr_id=" + response.list[i].wr_id + "'";
html += '<tr onclick="' + ahref + '">'
+ '<td>'
+ '<div class="row">'
+ '<div class="col-12 col-md-3"><img src="/assets/images/icon/nipa.jpg" class="w-100"></div>'
+ '<div class="col-12 col-md-9">'
+ '<div class="row">'
+ '<div class="col-12 col-md-6">'
+ '<p><b>URL</b><span class="text-secondary px-3">|</span><span>' + response.list[i].wr_link1 + '</span></p>'
+ '<p><b>단체명</b><span class="text-secondary px-3">|</span><span>' + response.list[i].wr_subject + '</span></p>'
+ '<p><b>서비스명</b><span class="text-secondary px-3">|</span><span>' + response.list[i].wr_1 + '</span></p>'
+ '</div>'
+ '<div class="col-12 col-md-6">'
+ '<p><b>발급일자</b><span class="text-secondary px-3">|</span><span>' + response.list[i].wr_last + '</span></p>'
+ '<p><b>품질성능 확인</b><span class="text-secondary px-3">|</span><span class="text-danger">' + response.list[i].wr_14 + '</span></p>'
+ '</div>'
+ '<div class="col-12 border-top py-2"><div class="ellipsis-multis">' + response.list[i].wr_content.replace(/(\r\n|\n|\r)/gm, "<br />") + '</div></div>'
+ '</div>'
+ '</div>'
+ '</div>'
+ '</td>'
+ '</tr>';
}
if(response.totalCount % 10 == 0) {
totalNum = (response.totalCount / 10);
}else {
totalNum = (response.totalCount / 10) + 1;
}
$("#content").empty();
$("#content").append(html);
$('#show_paginator').bootpag({
total: totalNum,
page: pageNum,
maxVisible: 5
}).on('page', function(event, num) {
location.href='/page/saas?pageNum='+num;
});
}else {
html ='<tr><th colspan="5" scope="row"><div class="bbs-none d-flex justify-content-center align-items-center">게시물이 없습니다.</div></th></tr>';
$("#content").empty();
$("#content").append(html);
}
}
});
} | ce(pag |
state.py | # -*- coding: utf-8 -*-
class State :
"""
Classe définissant un état caractérisée par :
- un identifiant
- un booleen pour savoir si c'est un état initial
- un booleen pour savoir si c'est un état final
- un label utilisé pour les constructions
ou il faut memoriser d'ou vient l'etat construit
"""
def __init__ (self, id, init, fin, label=None) :
""" int x bool x bool x str -> State
constructeur d'état
"""
self.id = id
self.init = init
self.fin = fin
if label == None :
self.label = str(self.id)
else :
self.label =label
def __repr__(self) :
""" -> str
renvoie une description de l'état sous la forme d'une chaîne
de caractères contenant son label puis (init) si c'est un état
initial puis (fin) si c'est un état final
elle permet d'utiliser print pour les états
"""
# ret : str
ret = str(self.label)
if self.init :
ret = ret + "(init)"
if self.fin :
ret = ret+ "(fin)"
return ret
def insertPrefix(self, prefid, preflab=None):
""" int x str ->
met à jour l'identifiant et le label de l'état en les
préfixant avec la chaîne de caractères pref
"""
if self.id < 0 :
tempid = - self.id
else :
tempid = self.id
tempid2 = 10**len(str(tempid))*prefid + tempid
if self.id < 0 :
self.id = - tempid2
else :
self.id = tempid2
if preflab == None :
self.label = str(prefid) + "_" + str(self.label)
else :
self.label = str(preflab) + "_" + str(self.label)
def __eq__(self, other) | """ Val -> bool
rend le booléen vrai si l'état est égal à other, faux sinon
elle permet que == fonctionne pour les états
"""
return type(self) == type(other) and self.id == other.id
def __ne__(self, other) :
""" Val -> bool
rend le booléen vrai si l'état est différent de other, faux sinon
elle permet que != fonctionne pour les états
"""
return not self.__eq__(other)
def __hash__(self) :
""" -> int
rend un identifiant unique (utile pour les tables de hachage)
elle permet que les états puissent appartenir à des ensembles
"""
if type(self.id)== int :
return self.id
# s : str
s=str(self.id)
# res : str
res=''.join(str(ord(c)) for c in s)
return int(res)
@staticmethod
def isInitialIn(list) :
""" list[State] -> bool
rend vrai si l'un des états de list est un état initial, faux sinon
"""
# s : State
for s in list :
if s.init :
return True
return False
@staticmethod
def isFinalIn(list) :
""" list[State] -> bool
rend vrai si l'un des états de list est un état final, faux sinon
"""
for s in list :
if s.fin :
return True
return False
| :
|
full.go | // Package full implements a full Tendermint consensus node.
package full
import (
"bytes"
"context"
"fmt"
"math/rand"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/prometheus/client_golang/prometheus"
flag "github.com/spf13/pflag"
"github.com/spf13/viper"
tmabcitypes "github.com/tendermint/tendermint/abci/types"
tmconfig "github.com/tendermint/tendermint/config"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
tmlight "github.com/tendermint/tendermint/light"
tmmempool "github.com/tendermint/tendermint/mempool"
tmnode "github.com/tendermint/tendermint/node"
tmp2p "github.com/tendermint/tendermint/p2p"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmproxy "github.com/tendermint/tendermint/proxy"
tmcli "github.com/tendermint/tendermint/rpc/client/local"
tmrpctypes "github.com/tendermint/tendermint/rpc/core/types"
tmstate "github.com/tendermint/tendermint/state"
tmstatesync "github.com/tendermint/tendermint/statesync"
tmtypes "github.com/tendermint/tendermint/types"
tmdb "github.com/tendermint/tm-db"
beaconAPI "github.com/oasisprotocol/oasis-core/go/beacon/api"
"github.com/oasisprotocol/oasis-core/go/common/cbor"
"github.com/oasisprotocol/oasis-core/go/common/crypto/hash"
"github.com/oasisprotocol/oasis-core/go/common/crypto/signature"
"github.com/oasisprotocol/oasis-core/go/common/errors"
"github.com/oasisprotocol/oasis-core/go/common/identity"
"github.com/oasisprotocol/oasis-core/go/common/logging"
"github.com/oasisprotocol/oasis-core/go/common/node"
"github.com/oasisprotocol/oasis-core/go/common/pubsub"
cmservice "github.com/oasisprotocol/oasis-core/go/common/service"
"github.com/oasisprotocol/oasis-core/go/common/version"
consensusAPI "github.com/oasisprotocol/oasis-core/go/consensus/api"
"github.com/oasisprotocol/oasis-core/go/consensus/api/transaction"
"github.com/oasisprotocol/oasis-core/go/consensus/api/transaction/results"
"github.com/oasisprotocol/oasis-core/go/consensus/metrics"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/abci"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/api"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/apps/supplementarysanity"
tmbeacon "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/beacon"
tmcommon "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/common"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/crypto"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/db"
tmgovernance "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/governance"
tmkeymanager "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/keymanager"
"github.com/oasisprotocol/oasis-core/go/consensus/tendermint/light"
tmregistry "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/registry"
tmroothash "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/roothash"
tmscheduler "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/scheduler"
tmstaking "github.com/oasisprotocol/oasis-core/go/consensus/tendermint/staking"
genesisAPI "github.com/oasisprotocol/oasis-core/go/genesis/api"
governanceAPI "github.com/oasisprotocol/oasis-core/go/governance/api"
keymanagerAPI "github.com/oasisprotocol/oasis-core/go/keymanager/api"
cmbackground "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/background"
cmflags "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/flags"
cmmetrics "github.com/oasisprotocol/oasis-core/go/oasis-node/cmd/common/metrics"
"github.com/oasisprotocol/oasis-core/go/registry"
registryAPI "github.com/oasisprotocol/oasis-core/go/registry/api"
"github.com/oasisprotocol/oasis-core/go/roothash"
roothashAPI "github.com/oasisprotocol/oasis-core/go/roothash/api"
schedulerAPI "github.com/oasisprotocol/oasis-core/go/scheduler/api"
stakingAPI "github.com/oasisprotocol/oasis-core/go/staking/api"
upgradeAPI "github.com/oasisprotocol/oasis-core/go/upgrade/api"
)
const (
// CfgABCIPruneStrategy configures the ABCI state pruning strategy.
CfgABCIPruneStrategy = "consensus.tendermint.abci.prune.strategy"
// CfgABCIPruneNumKept configures the amount of kept heights if pruning is enabled.
CfgABCIPruneNumKept = "consensus.tendermint.abci.prune.num_kept"
// CfgCheckpointerDisabled disables the ABCI state checkpointer.
CfgCheckpointerDisabled = "consensus.tendermint.checkpointer.disabled"
// CfgCheckpointerCheckInterval configures the ABCI state checkpointing check interval.
CfgCheckpointerCheckInterval = "consensus.tendermint.checkpointer.check_interval"
// CfgSentryUpstreamAddress defines nodes for which we act as a sentry for.
CfgSentryUpstreamAddress = "consensus.tendermint.sentry.upstream_address"
// CfgP2PPersistentPeer configures tendermint's persistent peer(s).
CfgP2PPersistentPeer = "consensus.tendermint.p2p.persistent_peer"
// CfgP2PPersistenPeersMaxDialPeriod configures the tendermint's persistent peer max dial period.
CfgP2PPersistenPeersMaxDialPeriod = "consensus.tendermint.p2p.persistent_peers_max_dial_period"
// CfgP2PDisablePeerExchange disables tendermint's peer-exchange (Pex) reactor.
CfgP2PDisablePeerExchange = "consensus.tendermint.p2p.disable_peer_exchange"
// CfgP2PUnconditionalPeerIDs configures tendermint's unconditional peer(s).
CfgP2PUnconditionalPeerIDs = "consensus.tendermint.p2p.unconditional_peer_ids"
// CfgDebugUnsafeReplayRecoverCorruptedWAL enables the debug and unsafe
// automatic corrupted WAL recovery during replay.
CfgDebugUnsafeReplayRecoverCorruptedWAL = "consensus.tendermint.debug.unsafe_replay_recover_corrupted_wal"
// CfgMinGasPrice configures the minimum gas price for this validator.
CfgMinGasPrice = "consensus.tendermint.min_gas_price"
// CfgSupplementarySanityEnabled is the supplementary sanity enabled flag.
CfgSupplementarySanityEnabled = "consensus.tendermint.supplementarysanity.enabled"
// CfgSupplementarySanityInterval configures the supplementary sanity check interval.
CfgSupplementarySanityInterval = "consensus.tendermint.supplementarysanity.interval"
// CfgConsensusStateSyncEnabled enabled consensus state sync.
CfgConsensusStateSyncEnabled = "consensus.tendermint.state_sync.enabled"
// CfgConsensusStateSyncConsensusNode specifies nodes exposing public consensus services which
// are used to sync a light client.
CfgConsensusStateSyncConsensusNode = "consensus.tendermint.state_sync.consensus_node"
// CfgConsensusStateSyncTrustPeriod is the light client trust period.
CfgConsensusStateSyncTrustPeriod = "consensus.tendermint.state_sync.trust_period"
// CfgConsensusStateSyncTrustHeight is the known trusted height for the light client.
CfgConsensusStateSyncTrustHeight = "consensus.tendermint.state_sync.trust_height"
// CfgConsensusStateSyncTrustHash is the known trusted block header hash for the light client.
CfgConsensusStateSyncTrustHash = "consensus.tendermint.state_sync.trust_hash"
// CfgUpgradeStopDelay is the average amount of time to delay shutting down the node on upgrade.
CfgUpgradeStopDelay = "consensus.tendermint.upgrade.stop_delay"
)
const (
// Time difference threshold used when considering if node is done with
// initial syncing. If difference is greater than the specified threshold
// the node is considered not yet synced.
// NOTE: this is only used during the initial sync.
syncWorkerLastBlockTimeDiffThreshold = 1 * time.Minute
minUpgradeStopWaitPeriod = 5 * time.Second
// tmSubscriberID is the subscriber identifier used for all internal Tendermint pubsub
// subscriptions. If any other subscriber IDs need to be derived they will be under this prefix.
tmSubscriberID = "oasis-core"
)
var (
_ api.Backend = (*fullService)(nil)
labelTendermint = prometheus.Labels{"backend": "tendermint"}
// Flags has the configuration flags.
Flags = flag.NewFlagSet("", flag.ContinueOnError)
)
// fullService implements a full Tendermint node.
type fullService struct { // nolint: maligned
sync.Mutex
cmservice.BaseBackgroundService
ctx context.Context
svcMgr *cmbackground.ServiceManager
upgrader upgradeAPI.Backend
mux *abci.ApplicationServer
node *tmnode.Node
client *tmcli.Local
blockNotifier *pubsub.Broker
failMonitor *failMonitor
stateStore tmstate.Store
beacon beaconAPI.Backend
governance governanceAPI.Backend
keymanager keymanagerAPI.Backend
registry registryAPI.Backend
roothash roothashAPI.Backend
scheduler schedulerAPI.Backend
staking stakingAPI.Backend
submissionMgr consensusAPI.SubmissionManager
serviceClients []api.ServiceClient
serviceClientsWg sync.WaitGroup
genesis *genesisAPI.Document
genesisProvider genesisAPI.Provider
identity *identity.Identity
dataDir string
isInitialized, isStarted bool
startedCh chan struct{}
syncedCh chan struct{}
quitCh chan struct{}
startFn func() error
stopOnce sync.Once
nextSubscriberID uint64
}
func (t *fullService) initialized() bool {
t.Lock()
defer t.Unlock()
return t.isInitialized
}
func (t *fullService) started() bool {
t.Lock()
defer t.Unlock()
return t.isStarted
}
// Implements service.BackgroundService.
func (t *fullService) Start() error {
if t.started() {
return fmt.Errorf("tendermint: service already started")
}
switch t.initialized() {
case true:
if err := t.mux.Start(); err != nil {
return err
}
if err := t.startFn(); err != nil {
return err
}
if err := t.node.Start(); err != nil {
return fmt.Errorf("tendermint: failed to start service: %w", err)
}
// Make sure the quit channel is closed when the node shuts down.
go func() {
select {
case <-t.quitCh:
case <-t.node.Quit():
select {
case <-t.quitCh:
default:
close(t.quitCh)
}
}
}()
// Start event dispatchers for all the service clients.
t.serviceClientsWg.Add(len(t.serviceClients))
for _, svc := range t.serviceClients {
go t.serviceClientWorker(t.ctx, svc)
}
// Start sync checker.
go t.syncWorker()
// Start block notifier.
go t.blockNotifierWorker()
// Optionally start metrics updater.
if cmmetrics.Enabled() {
go t.metrics()
}
case false:
close(t.syncedCh)
}
t.Lock()
t.isStarted = true
t.Unlock()
close(t.startedCh)
return nil
}
// Implements service.BackgroundService.
func (t *fullService) Quit() <-chan struct{} {
return t.quitCh
}
// Implements service.BackgroundService.
func (t *fullService) Cleanup() {
t.serviceClientsWg.Wait()
t.svcMgr.Cleanup()
}
// Implements service.BackgroundService.
func (t *fullService) Stop() {
if !t.initialized() || !t.started() {
return
}
t.stopOnce.Do(func() {
t.failMonitor.markCleanShutdown()
if err := t.node.Stop(); err != nil {
t.Logger.Error("Error on stopping node", err)
}
t.svcMgr.Stop()
t.mux.Stop()
})
}
func (t *fullService) Started() <-chan struct{} {
return t.startedCh
}
func (t *fullService) SupportedFeatures() consensusAPI.FeatureMask {
return consensusAPI.FeatureServices | consensusAPI.FeatureFullNode
}
func (t *fullService) Synced() <-chan struct{} {
return t.syncedCh
}
func (t *fullService) GetAddresses() ([]node.ConsensusAddress, error) {
u, err := tmcommon.GetExternalAddress()
if err != nil {
return nil, err
}
var addr node.ConsensusAddress
if err = addr.Address.UnmarshalText([]byte(u.Host)); err != nil {
return nil, fmt.Errorf("tendermint: failed to parse external address host: %w", err)
}
addr.ID = t.identity.P2PSigner.Public()
return []node.ConsensusAddress{addr}, nil
}
func (t *fullService) StateToGenesis(ctx context.Context, blockHeight int64) (*genesisAPI.Document, error) {
blk, err := t.GetTendermintBlock(ctx, blockHeight)
if err != nil {
t.Logger.Error("failed to get tendermint block",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
if blk == nil {
return nil, consensusAPI.ErrNoCommittedBlocks
}
blockHeight = blk.Header.Height
// Get initial genesis doc.
genesisDoc, err := t.GetGenesisDocument(ctx)
if err != nil {
t.Logger.Error("failed getting genesis document",
"err", err,
)
return nil, err
}
// Call StateToGenesis on all backends and merge the results together.
beaconGenesis, err := t.beacon.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("beacon StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
registryGenesis, err := t.registry.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("registry StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
roothashGenesis, err := t.roothash.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("roothash StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
stakingGenesis, err := t.staking.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("staking StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
keymanagerGenesis, err := t.keymanager.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("keymanager StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
schedulerGenesis, err := t.scheduler.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("scheduler StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
governanceGenesis, err := t.governance.StateToGenesis(ctx, blockHeight)
if err != nil {
t.Logger.Error("governance StateToGenesis failure",
"err", err,
"block_height", blockHeight,
)
return nil, err
}
return &genesisAPI.Document{
Height: blockHeight,
ChainID: genesisDoc.ChainID,
HaltEpoch: genesisDoc.HaltEpoch,
Time: blk.Header.Time,
Beacon: *beaconGenesis,
Registry: *registryGenesis,
RootHash: *roothashGenesis,
Staking: *stakingGenesis,
Governance: *governanceGenesis,
KeyManager: *keymanagerGenesis,
Scheduler: *schedulerGenesis,
Consensus: genesisDoc.Consensus,
}, nil
}
func (t *fullService) GetGenesisDocument(ctx context.Context) (*genesisAPI.Document, error) {
return t.genesis, nil
}
func (t *fullService) GetChainContext(ctx context.Context) (string, error) {
return t.genesis.ChainContext(), nil
}
func (t *fullService) RegisterHaltHook(hook consensusAPI.HaltHook) {
if !t.initialized() {
return
}
t.mux.RegisterHaltHook(hook)
}
func (t *fullService) SubmitTx(ctx context.Context, tx *transaction.SignedTransaction) error {
// Subscribe to the transaction being included in a block.
data := cbor.Marshal(tx)
query := tmtypes.EventQueryTxFor(data)
subID := t.newSubscriberID()
txSub, err := t.subscribe(subID, query)
if err != nil {
return err
}
if ptrSub, ok := txSub.(*tendermintPubsubBuffer).tmSubscription.(*tmpubsub.Subscription); ok && ptrSub == nil {
t.Logger.Debug("broadcastTx: service has shut down. Cancel our context to recover")
<-ctx.Done()
return ctx.Err()
}
defer t.unsubscribe(subID, query) // nolint: errcheck
// Subscribe to the transaction becoming invalid.
txHash := hash.NewFromBytes(data)
recheckCh, recheckSub, err := t.mux.WatchInvalidatedTx(txHash)
if err != nil {
return err
}
defer recheckSub.Close()
// First try to broadcast.
if err := t.broadcastTxRaw(data); err != nil {
return err
}
// Wait for the transaction to be included in a block.
select {
case v := <-recheckCh:
return v
case v := <-txSub.Out():
if result := v.Data().(tmtypes.EventDataTx).Result; !result.IsOK() {
return errors.FromCode(result.GetCodespace(), result.GetCode(), result.GetLog())
}
return nil
case <-txSub.Cancelled():
return context.Canceled
case <-ctx.Done():
return ctx.Err()
}
}
func (t *fullService) broadcastTxRaw(data []byte) error {
// We could use t.client.BroadcastTxSync but that is annoying as it
// doesn't give you the right fields when CheckTx fails.
mp := t.node.Mempool()
// Submit the transaction to mempool and wait for response.
ch := make(chan *tmabcitypes.Response, 1)
err := mp.CheckTx(tmtypes.Tx(data), func(rsp *tmabcitypes.Response) {
ch <- rsp
close(ch)
}, tmmempool.TxInfo{})
switch err {
case nil:
case tmmempool.ErrTxInCache:
// Transaction already in the mempool or was recently there.
return consensusAPI.ErrDuplicateTx
default:
return fmt.Errorf("tendermint: failed to submit to local mempool: %w", err)
}
rsp := <-ch
if result := rsp.GetCheckTx(); !result.IsOK() {
return errors.FromCode(result.GetCodespace(), result.GetCode(), result.GetLog())
}
return nil
}
func (t *fullService) newSubscriberID() string {
return fmt.Sprintf("%s/subscriber-%d", tmSubscriberID, atomic.AddUint64(&t.nextSubscriberID, 1))
}
func (t *fullService) SubmitEvidence(ctx context.Context, evidence *consensusAPI.Evidence) error {
var protoEv tmproto.Evidence
if err := protoEv.Unmarshal(evidence.Meta); err != nil {
return fmt.Errorf("tendermint: malformed evidence while unmarshalling: %w", err)
}
ev, err := tmtypes.EvidenceFromProto(&protoEv)
if err != nil {
return fmt.Errorf("tendermint: malformed evidence while converting: %w", err)
}
if _, err := t.client.BroadcastEvidence(ctx, ev); err != nil {
return fmt.Errorf("tendermint: broadcast evidence failed: %w", err)
}
return nil
}
func (t *fullService) EstimateGas(ctx context.Context, req *consensusAPI.EstimateGasRequest) (transaction.Gas, error) {
return t.mux.EstimateGas(req.Signer, req.Transaction)
}
func (t *fullService) subscribe(subscriber string, query tmpubsub.Query) (tmtypes.Subscription, error) {
// Note: The tendermint documentation claims using SubscribeUnbuffered can
// freeze the server, however, the buffered Subscribe can drop events, and
// force-unsubscribe the channel if processing takes too long.
subFn := func() (tmtypes.Subscription, error) {
sub, err := t.node.EventBus().SubscribeUnbuffered(t.ctx, subscriber, query)
if err != nil {
return nil, err
}
// Oh yes, this can actually return a nil subscription even though the
// error was also nil if the node is just shutting down.
if sub == (*tmpubsub.Subscription)(nil) {
return nil, context.Canceled
}
return newTendermintPubsubBuffer(sub), nil
}
if t.started() {
return subFn()
}
// The node doesn't exist until it's started since, creating the node
// triggers replay, InitChain, and etc.
t.Logger.Debug("Subscribe: node not available yet, blocking",
"subscriber", subscriber,
"query", query,
)
// XXX/yawning: As far as I can tell just blocking here is safe as
// ever single consumer of the API subscribes from a go routine.
select {
case <-t.startedCh:
case <-t.ctx.Done():
return nil, t.ctx.Err()
}
return subFn()
}
func (t *fullService) unsubscribe(subscriber string, query tmpubsub.Query) error {
if t.started() {
return t.node.EventBus().Unsubscribe(t.ctx, subscriber, query)
}
return fmt.Errorf("tendermint: unsubscribe called with no backing service")
}
func (t *fullService) RegisterApplication(app api.Application) error {
return t.mux.Register(app)
}
func (t *fullService) SetTransactionAuthHandler(handler api.TransactionAuthHandler) error {
return t.mux.SetTransactionAuthHandler(handler)
}
func (t *fullService) TransactionAuthHandler() consensusAPI.TransactionAuthHandler {
return t.mux.TransactionAuthHandler()
}
func (t *fullService) SubmissionManager() consensusAPI.SubmissionManager {
return t.submissionMgr
}
func (t *fullService) Beacon() beaconAPI.Backend {
return t.beacon
}
func (t *fullService) KeyManager() keymanagerAPI.Backend {
return t.keymanager
}
func (t *fullService) Registry() registryAPI.Backend {
return t.registry
}
func (t *fullService) RootHash() roothashAPI.Backend {
return t.roothash
}
func (t *fullService) Staking() stakingAPI.Backend {
return t.staking
}
func (t *fullService) Scheduler() schedulerAPI.Backend {
return t.scheduler
}
func (t *fullService) Governance() governanceAPI.Backend {
return t.governance
}
func (t *fullService) GetBlock(ctx context.Context, height int64) (*consensusAPI.Block, error) {
blk, err := t.GetTendermintBlock(ctx, height)
if err != nil {
return nil, err
}
if blk == nil {
return nil, consensusAPI.ErrNoCommittedBlocks
}
return api.NewBlock(blk), nil
}
func (t *fullService) GetSignerNonce(ctx context.Context, req *consensusAPI.GetSignerNonceRequest) (uint64, error) {
return t.mux.TransactionAuthHandler().GetSignerNonce(ctx, req)
}
func (t *fullService) GetTransactions(ctx context.Context, height int64) ([][]byte, error) {
blk, err := t.GetTendermintBlock(ctx, height)
if err != nil {
return nil, err
}
if blk == nil {
return nil, consensusAPI.ErrNoCommittedBlocks
}
txs := make([][]byte, 0, len(blk.Data.Txs))
for _, v := range blk.Data.Txs {
txs = append(txs, v[:])
}
return txs, nil
}
func (t *fullService) GetTransactionsWithResults(ctx context.Context, height int64) (*consensusAPI.TransactionsWithResults, error) {
var txsWithResults consensusAPI.TransactionsWithResults
blk, err := t.GetTendermintBlock(ctx, height)
if err != nil {
return nil, err
}
if blk == nil {
return nil, consensusAPI.ErrNoCommittedBlocks
}
for _, tx := range blk.Data.Txs {
txsWithResults.Transactions = append(txsWithResults.Transactions, tx[:])
}
res, err := t.GetBlockResults(ctx, blk.Height)
if err != nil {
return nil, err
}
for txIdx, rs := range res.TxsResults {
// Transaction result.
result := &results.Result{
Error: results.Error{
Module: rs.GetCodespace(),
Code: rs.GetCode(),
Message: rs.GetLog(),
},
}
// Transaction staking events.
stakingEvents, err := tmstaking.EventsFromTendermint(
txsWithResults.Transactions[txIdx],
blk.Height,
rs.Events,
)
if err != nil {
return nil, err
}
for _, e := range stakingEvents {
result.Events = append(result.Events, &results.Event{Staking: e})
}
// Transaction registry events.
registryEvents, _, err := tmregistry.EventsFromTendermint(
txsWithResults.Transactions[txIdx],
blk.Height,
rs.Events,
)
if err != nil {
return nil, err
}
for _, e := range registryEvents {
result.Events = append(result.Events, &results.Event{Registry: e})
}
// Transaction roothash events.
roothashEvents, err := tmroothash.EventsFromTendermint(
txsWithResults.Transactions[txIdx],
blk.Height,
rs.Events,
)
if err != nil {
return nil, err
}
for _, e := range roothashEvents {
result.Events = append(result.Events, &results.Event{RootHash: e})
}
// Transaction governance events.
governanceEvents, err := tmgovernance.EventsFromTendermint(
txsWithResults.Transactions[txIdx],
blk.Height,
rs.Events,
)
if err != nil {
return nil, err
}
for _, e := range governanceEvents {
result.Events = append(result.Events, &results.Event{Governance: e})
}
txsWithResults.Results = append(txsWithResults.Results, result)
}
return &txsWithResults, nil
}
func (t *fullService) GetUnconfirmedTransactions(ctx context.Context) ([][]byte, error) {
mempoolTxs := t.node.Mempool().ReapMaxTxs(-1)
txs := make([][]byte, 0, len(mempoolTxs))
for _, v := range mempoolTxs {
txs = append(txs, v[:])
}
return txs, nil
}
func (t *fullService) GetStatus(ctx context.Context) (*consensusAPI.Status, error) {
status := &consensusAPI.Status{
Version: version.ConsensusProtocol,
Backend: api.BackendName,
Features: t.SupportedFeatures(),
}
status.ChainContext = t.genesis.ChainContext()
status.GenesisHeight = t.genesis.Height
if t.started() {
// Only attempt to fetch blocks in case the consensus service has started as otherwise
// requests will block.
genBlk, err := t.GetBlock(ctx, t.genesis.Height)
switch err {
case nil:
status.GenesisHash = genBlk.Hash
default:
// We may not be able to fetch the genesis block in case it has been pruned.
}
lastRetainedHeight, err := t.GetLastRetainedVersion(ctx) | // Some pruning configurations return 0 instead of a valid block height. Clamp those to the genesis height.
if lastRetainedHeight < t.genesis.Height {
lastRetainedHeight = t.genesis.Height
}
status.LastRetainedHeight = lastRetainedHeight
lastRetainedBlock, err := t.GetBlock(ctx, lastRetainedHeight)
switch err {
case nil:
status.LastRetainedHash = lastRetainedBlock.Hash
default:
// Before we commit the first block, we can't load it from GetBlock. Don't give its hash in this case.
}
// Latest block.
latestBlk, err := t.GetBlock(ctx, consensusAPI.HeightLatest)
switch err {
case nil:
status.LatestHeight = latestBlk.Height
status.LatestHash = latestBlk.Hash
status.LatestTime = latestBlk.Time
status.LatestStateRoot = latestBlk.StateRoot
var epoch beaconAPI.EpochTime
epoch, err = t.beacon.GetEpoch(ctx, status.LatestHeight)
if err != nil {
return nil, fmt.Errorf("failed to fetch epoch: %w", err)
}
status.LatestEpoch = epoch
case consensusAPI.ErrNoCommittedBlocks:
// No committed blocks yet.
default:
return nil, fmt.Errorf("failed to fetch current block: %w", err)
}
// List of consensus peers.
tmpeers := t.node.Switch().Peers().List()
peers := make([]string, 0, len(tmpeers))
for _, tmpeer := range tmpeers {
p := string(tmpeer.ID()) + "@" + tmpeer.RemoteAddr().String()
peers = append(peers, p)
}
status.NodePeers = peers
// Check if the local node is in the validator set for the latest (uncommitted) block.
valSetHeight := status.LatestHeight + 1
if valSetHeight < status.GenesisHeight {
valSetHeight = status.GenesisHeight
}
vals, err := t.stateStore.LoadValidators(valSetHeight)
if err != nil {
return nil, fmt.Errorf("failed to load validator set: %w", err)
}
consensusPk := t.identity.ConsensusSigner.Public()
consensusAddr := []byte(crypto.PublicKeyToTendermint(&consensusPk).Address())
status.IsValidator = vals.HasAddress(consensusAddr)
}
return status, nil
}
func (t *fullService) WatchBlocks(ctx context.Context) (<-chan *consensusAPI.Block, pubsub.ClosableSubscription, error) {
ch, sub := t.WatchTendermintBlocks()
mapCh := make(chan *consensusAPI.Block)
go func() {
defer close(mapCh)
for {
select {
case tmBlk, ok := <-ch:
if !ok {
return
}
mapCh <- api.NewBlock(tmBlk)
case <-ctx.Done():
return
}
}
}()
return mapCh, sub, nil
}
func (t *fullService) ensureStarted(ctx context.Context) error {
// Make sure that the Tendermint service has started so that we
// have the client interface available.
select {
case <-t.startedCh:
case <-t.ctx.Done():
return t.ctx.Err()
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (t *fullService) initialize() error {
t.Lock()
defer t.Unlock()
if t.isInitialized {
return nil
}
if err := t.lazyInit(); err != nil {
return err
}
// Apply the genesis public key blacklist.
for _, v := range t.genesis.Consensus.Parameters.PublicKeyBlacklist {
if err := v.Blacklist(); err != nil {
t.Logger.Error("initialize: failed to blacklist key",
"err", err,
"pk", v,
)
return err
}
}
// Initialize the beacon/epochtime backend.
var (
err error
scBeacon tmbeacon.ServiceClient
)
if scBeacon, err = tmbeacon.New(t.ctx, t); err != nil {
t.Logger.Error("initialize: failed to initialize beapoch backend",
"err", err,
)
return err
}
t.beacon = scBeacon
t.serviceClients = append(t.serviceClients, scBeacon)
if err = t.mux.SetEpochtime(t.beacon); err != nil {
return err
}
// Initialize the rest of backends.
var scKeyManager tmkeymanager.ServiceClient
if scKeyManager, err = tmkeymanager.New(t.ctx, t); err != nil {
t.Logger.Error("initialize: failed to initialize keymanager backend",
"err", err,
)
return err
}
t.keymanager = scKeyManager
t.serviceClients = append(t.serviceClients, scKeyManager)
var scRegistry tmregistry.ServiceClient
if scRegistry, err = tmregistry.New(t.ctx, t); err != nil {
t.Logger.Error("initialize: failed to initialize registry backend",
"err", err,
)
return err
}
t.registry = scRegistry
if cmmetrics.Enabled() {
t.svcMgr.RegisterCleanupOnly(registry.NewMetricsUpdater(t.ctx, t.registry), "registry metrics updater")
}
t.serviceClients = append(t.serviceClients, scRegistry)
t.svcMgr.RegisterCleanupOnly(t.registry, "registry backend")
var scStaking tmstaking.ServiceClient
if scStaking, err = tmstaking.New(t.ctx, t); err != nil {
t.Logger.Error("staking: failed to initialize staking backend",
"err", err,
)
return err
}
t.staking = scStaking
t.serviceClients = append(t.serviceClients, scStaking)
t.svcMgr.RegisterCleanupOnly(t.staking, "staking backend")
var scScheduler tmscheduler.ServiceClient
if scScheduler, err = tmscheduler.New(t.ctx, t); err != nil {
t.Logger.Error("scheduler: failed to initialize scheduler backend",
"err", err,
)
return err
}
t.scheduler = scScheduler
t.serviceClients = append(t.serviceClients, scScheduler)
t.svcMgr.RegisterCleanupOnly(t.scheduler, "scheduler backend")
var scRootHash tmroothash.ServiceClient
if scRootHash, err = tmroothash.New(t.ctx, t.dataDir, t); err != nil {
t.Logger.Error("roothash: failed to initialize roothash backend",
"err", err,
)
return err
}
t.roothash = roothash.NewMetricsWrapper(scRootHash)
t.serviceClients = append(t.serviceClients, scRootHash)
t.svcMgr.RegisterCleanupOnly(t.roothash, "roothash backend")
var scGovernance tmgovernance.ServiceClient
if scGovernance, err = tmgovernance.New(t.ctx, t); err != nil {
t.Logger.Error("governance: failed to initialize governance backend",
"err", err,
)
return err
}
t.governance = scGovernance
t.serviceClients = append(t.serviceClients, scGovernance)
t.svcMgr.RegisterCleanupOnly(t.governance, "governance backend")
// Enable supplementary sanity checks when enabled.
if viper.GetBool(CfgSupplementarySanityEnabled) {
ssa := supplementarysanity.New(viper.GetUint64(CfgSupplementarySanityInterval))
if err = t.RegisterApplication(ssa); err != nil {
return fmt.Errorf("failed to register supplementary sanity check app: %w", err)
}
}
return nil
}
func (t *fullService) GetLastRetainedVersion(ctx context.Context) (int64, error) {
if err := t.ensureStarted(ctx); err != nil {
return -1, err
}
return t.node.BlockStore().Base(), nil
}
func (t *fullService) heightToTendermintHeight(height int64) (int64, error) {
var tmHeight int64
if height == consensusAPI.HeightLatest {
// Do not let Tendermint determine the latest height (e.g., by passing nil) as that
// completely ignores ABCI processing so it can return a block for which local state does
// not yet exist. Use our mux notion of latest height instead.
tmHeight = t.mux.State().BlockHeight()
if tmHeight == 0 {
// No committed blocks yet.
return 0, consensusAPI.ErrNoCommittedBlocks
}
} else {
tmHeight = height
}
return tmHeight, nil
}
func (t *fullService) GetTendermintBlock(ctx context.Context, height int64) (*tmtypes.Block, error) {
if err := t.ensureStarted(ctx); err != nil {
return nil, err
}
tmHeight, err := t.heightToTendermintHeight(height)
switch err {
case nil:
// Continues bellow.
case consensusAPI.ErrNoCommittedBlocks:
// No committed blocks yet.
return nil, nil
default:
return nil, err
}
result, err := t.client.Block(ctx, &tmHeight)
if err != nil {
return nil, fmt.Errorf("tendermint: block query failed: %w", err)
}
return result.Block, nil
}
func (t *fullService) GetBlockResults(ctx context.Context, height int64) (*tmrpctypes.ResultBlockResults, error) {
if t.client == nil {
panic("client not available yet")
}
if err := t.ensureStarted(ctx); err != nil {
return nil, err
}
tmHeight, err := t.heightToTendermintHeight(height)
if err != nil {
return nil, err
}
result, err := t.client.BlockResults(ctx, &tmHeight)
if err != nil {
return nil, fmt.Errorf("tendermint: block results query failed: %w", err)
}
return result, nil
}
func (t *fullService) WatchTendermintBlocks() (<-chan *tmtypes.Block, *pubsub.Subscription) {
typedCh := make(chan *tmtypes.Block)
sub := t.blockNotifier.Subscribe()
sub.Unwrap(typedCh)
return typedCh, sub
}
func (t *fullService) ConsensusKey() signature.PublicKey {
return t.identity.ConsensusSigner.Public()
}
func (t *fullService) lazyInit() error {
if t.isInitialized {
return nil
}
var err error
// Create Tendermint application mux.
var pruneCfg abci.PruneConfig
pruneStrat := viper.GetString(CfgABCIPruneStrategy)
if err = pruneCfg.Strategy.FromString(pruneStrat); err != nil {
return err
}
pruneCfg.NumKept = viper.GetUint64(CfgABCIPruneNumKept)
appConfig := &abci.ApplicationConfig{
DataDir: filepath.Join(t.dataDir, tmcommon.StateDir),
StorageBackend: db.GetBackendName(),
Pruning: pruneCfg,
HaltEpochHeight: t.genesis.HaltEpoch,
MinGasPrice: viper.GetUint64(CfgMinGasPrice),
OwnTxSigner: t.identity.NodeSigner.Public(),
DisableCheckpointer: viper.GetBool(CfgCheckpointerDisabled),
CheckpointerCheckInterval: viper.GetDuration(CfgCheckpointerCheckInterval),
InitialHeight: uint64(t.genesis.Height),
}
t.mux, err = abci.NewApplicationServer(t.ctx, t.upgrader, appConfig)
if err != nil {
return err
}
// Tendermint needs the on-disk directories to be present when
// launched like this, so create the relevant sub-directories
// under the node DataDir.
tendermintDataDir := filepath.Join(t.dataDir, tmcommon.StateDir)
if err = tmcommon.InitDataDir(tendermintDataDir); err != nil {
return err
}
// Create Tendermint node.
tenderConfig := tmconfig.DefaultConfig()
_ = viper.Unmarshal(&tenderConfig)
tenderConfig.SetRoot(tendermintDataDir)
timeoutCommit := t.genesis.Consensus.Parameters.TimeoutCommit
emptyBlockInterval := t.genesis.Consensus.Parameters.EmptyBlockInterval
tenderConfig.Consensus.TimeoutCommit = timeoutCommit
tenderConfig.Consensus.SkipTimeoutCommit = t.genesis.Consensus.Parameters.SkipTimeoutCommit
tenderConfig.Consensus.CreateEmptyBlocks = true
tenderConfig.Consensus.CreateEmptyBlocksInterval = emptyBlockInterval
tenderConfig.Consensus.DebugUnsafeReplayRecoverCorruptedWAL = viper.GetBool(CfgDebugUnsafeReplayRecoverCorruptedWAL) && cmflags.DebugDontBlameOasis()
tenderConfig.Instrumentation.Prometheus = true
tenderConfig.Instrumentation.PrometheusListenAddr = ""
tenderConfig.TxIndex.Indexer = "null"
tenderConfig.P2P.ListenAddress = viper.GetString(tmcommon.CfgCoreListenAddress)
tenderConfig.P2P.ExternalAddress = viper.GetString(tmcommon.CfgCoreExternalAddress)
tenderConfig.P2P.PexReactor = !viper.GetBool(CfgP2PDisablePeerExchange)
tenderConfig.P2P.MaxNumInboundPeers = viper.GetInt(tmcommon.CfgP2PMaxNumInboundPeers)
tenderConfig.P2P.MaxNumOutboundPeers = viper.GetInt(tmcommon.CfgP2PMaxNumOutboundPeers)
tenderConfig.P2P.SendRate = viper.GetInt64(tmcommon.CfgP2PSendRate)
tenderConfig.P2P.RecvRate = viper.GetInt64(tmcommon.CfgP2PRecvRate)
// Persistent peers need to be lowercase as p2p/transport.go:MultiplexTransport.upgrade()
// uses a case sensitive string comparison to validate public keys.
// Since persistent peers is expected to be in comma-delimited ID@host:port format,
// lowercasing the whole string is ok.
tenderConfig.P2P.PersistentPeers = strings.ToLower(strings.Join(viper.GetStringSlice(CfgP2PPersistentPeer), ","))
tenderConfig.P2P.PersistentPeersMaxDialPeriod = viper.GetDuration(CfgP2PPersistenPeersMaxDialPeriod)
// Unconditional peer IDs need to be lowercase as p2p/transport.go:MultiplexTransport.upgrade()
// uses a case sensitive string comparison to validate public keys.
// Since persistent peers is expected to be in comma-delimited ID format,
// lowercasing the whole string is ok.
tenderConfig.P2P.UnconditionalPeerIDs = strings.ToLower(strings.Join(viper.GetStringSlice(CfgP2PUnconditionalPeerIDs), ","))
// Seed Ids need to be lowercase as p2p/transport.go:MultiplexTransport.upgrade()
// uses a case sensitive string comparison to validate public keys.
// Since Seeds is expected to be in comma-delimited ID@host:port format,
// lowercasing the whole string is ok.
tenderConfig.P2P.Seeds = strings.ToLower(strings.Join(viper.GetStringSlice(tmcommon.CfgP2PSeed), ","))
tenderConfig.P2P.AddrBookStrict = !(viper.GetBool(tmcommon.CfgDebugP2PAddrBookLenient) && cmflags.DebugDontBlameOasis())
tenderConfig.P2P.AllowDuplicateIP = viper.GetBool(tmcommon.CfgDebugP2PAllowDuplicateIP) && cmflags.DebugDontBlameOasis()
tenderConfig.RPC.ListenAddress = ""
sentryUpstreamAddrs := viper.GetStringSlice(CfgSentryUpstreamAddress)
if len(sentryUpstreamAddrs) > 0 {
t.Logger.Info("Acting as a tendermint sentry", "addrs", sentryUpstreamAddrs)
// Append upstream addresses to persistent, private and unconditional peers.
tenderConfig.P2P.PersistentPeers += "," + strings.ToLower(strings.Join(sentryUpstreamAddrs, ","))
var sentryUpstreamIDs []string
for _, addr := range sentryUpstreamAddrs {
parts := strings.Split(addr, "@")
if len(parts) != 2 {
return fmt.Errorf("malformed sentry upstream address: %s", addr)
}
sentryUpstreamIDs = append(sentryUpstreamIDs, parts[0])
}
// Convert upstream node IDs to lowercase (like other IDs) since
// Tendermint stores them in a map and uses a case sensitive string
// comparison to check ID equality.
sentryUpstreamIDsStr := strings.ToLower(strings.Join(sentryUpstreamIDs, ","))
tenderConfig.P2P.PrivatePeerIDs += "," + sentryUpstreamIDsStr
tenderConfig.P2P.UnconditionalPeerIDs += "," + sentryUpstreamIDsStr
}
if !tenderConfig.P2P.PexReactor {
t.Logger.Info("pex reactor disabled",
logging.LogEvent, api.LogEventPeerExchangeDisabled,
)
}
tendermintPV, err := crypto.LoadOrGeneratePrivVal(tendermintDataDir, t.identity.ConsensusSigner)
if err != nil {
return err
}
tmGenDoc, err := api.GetTendermintGenesisDocument(t.genesisProvider)
if err != nil {
t.Logger.Error("failed to obtain genesis document",
"err", err,
)
return err
}
tendermintGenesisProvider := func() (*tmtypes.GenesisDoc, error) {
return tmGenDoc, nil
}
dbProvider, err := db.GetProvider()
if err != nil {
t.Logger.Error("failed to obtain database provider",
"err", err,
)
return err
}
// HACK: Wrap the provider so we can extract the state database handle. This is required because
// Tendermint does not expose a way to access the state database and we need it to bypass some
// stupid things like pagination on the in-process "client".
wrapDbProvider := func(dbCtx *tmnode.DBContext) (tmdb.DB, error) {
db, derr := dbProvider(dbCtx)
if derr != nil {
return nil, derr
}
switch dbCtx.ID {
case "state":
// Tendermint state database.
t.stateStore = tmstate.NewStore(db)
default:
}
return db, nil
}
// Configure state sync if enabled.
var stateProvider tmstatesync.StateProvider
if viper.GetBool(CfgConsensusStateSyncEnabled) {
t.Logger.Info("state sync enabled")
// Enable state sync in the configuration.
tenderConfig.StateSync.Enable = true
tenderConfig.StateSync.TrustHash = viper.GetString(CfgConsensusStateSyncTrustHash)
// Create new state sync state provider.
cfg := light.ClientConfig{
GenesisDocument: tmGenDoc,
TrustOptions: tmlight.TrustOptions{
Period: viper.GetDuration(CfgConsensusStateSyncTrustPeriod),
Height: int64(viper.GetUint64(CfgConsensusStateSyncTrustHeight)),
Hash: tenderConfig.StateSync.TrustHashBytes(),
},
}
for _, rawAddr := range viper.GetStringSlice(CfgConsensusStateSyncConsensusNode) {
var addr node.TLSAddress
if err = addr.UnmarshalText([]byte(rawAddr)); err != nil {
return fmt.Errorf("failed to parse state sync consensus node address (%s): %w", rawAddr, err)
}
cfg.ConsensusNodes = append(cfg.ConsensusNodes, addr)
}
if stateProvider, err = newStateProvider(t.ctx, cfg); err != nil {
t.Logger.Error("failed to create state sync state provider",
"err", err,
)
return fmt.Errorf("failed to create state sync state provider: %w", err)
}
}
// HACK: tmnode.NewNode() triggers block replay and or ABCI chain
// initialization, instead of t.node.Start(). This is a problem
// because at the time that lazyInit() is called, none of the ABCI
// applications are registered.
//
// Defer actually initializing the node till after everything
// else is setup.
t.startFn = func() (err error) {
defer func() {
// The node constructor can panic early in case an error occurrs during block replay as
// the fail monitor is not yet initialized in that case. Propagate the error.
if p := recover(); p != nil {
switch pt := p.(type) {
case error:
err = pt
default:
err = fmt.Errorf("%v", pt)
}
}
}()
t.node, err = tmnode.NewNode(tenderConfig,
tendermintPV,
&tmp2p.NodeKey{PrivKey: crypto.SignerToTendermint(t.identity.P2PSigner)},
tmproxy.NewLocalClientCreator(t.mux.Mux()),
tendermintGenesisProvider,
wrapDbProvider,
tmnode.DefaultMetricsProvider(tenderConfig.Instrumentation),
tmcommon.NewLogAdapter(!viper.GetBool(tmcommon.CfgLogDebug)),
tmnode.StateProvider(stateProvider),
)
if err != nil {
return fmt.Errorf("tendermint: failed to create node: %w", err)
}
if t.stateStore == nil {
// Sanity check for the above wrapDbProvider hack in case the DB provider changes.
return fmt.Errorf("tendermint: internal error: state database not set")
}
t.client = tmcli.New(t.node)
t.failMonitor = newFailMonitor(t.ctx, t.Logger, t.node.ConsensusState().Wait)
// Register a halt hook that handles upgrades gracefully.
t.RegisterHaltHook(func(ctx context.Context, blockHeight int64, epoch beaconAPI.EpochTime, err error) {
if !errors.Is(err, upgradeAPI.ErrStopForUpgrade) {
return
}
// Mark this as a clean shutdown and request the node to stop gracefully.
t.failMonitor.markCleanShutdown()
// Wait before stopping to give time for P2P messages to propagate. Sleep for at least
// minUpgradeStopWaitPeriod or the configured commit timeout.
t.Logger.Info("waiting a bit before stopping the node for upgrade")
waitPeriod := minUpgradeStopWaitPeriod
if tc := t.genesis.Consensus.Parameters.TimeoutCommit; tc > waitPeriod {
waitPeriod = tc
}
time.Sleep(waitPeriod)
go func() {
// Sleep another period so there is some time between when consensus shuts down and
// when all the other services start shutting down.
//
// Randomize the period so that not all nodes shut down at the same time.
delay := getRandomValueFromInterval(0.5, rand.Float64(), viper.GetDuration(CfgUpgradeStopDelay))
time.Sleep(delay)
t.Logger.Info("stopping the node for upgrade")
t.Stop()
// Close the quit channel early to force the node to stop. This is needed because
// the Tendermint node will otherwise never quit.
close(t.quitCh)
}()
})
return nil
}
t.isInitialized = true
return nil
}
func (t *fullService) syncWorker() {
checkSyncFn := func() (isSyncing bool, err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("tendermint: node disappeared, terminated?")
}
}()
return t.node.ConsensusReactor().WaitSync(), nil
}
for {
select {
case <-t.node.Quit():
return
case <-time.After(1 * time.Second):
isFastSyncing, err := checkSyncFn()
if err != nil {
t.Logger.Error("Failed to poll FastSync",
"err", err,
)
return
}
if !isFastSyncing {
// Check latest block time.
tmBlock, err := t.GetTendermintBlock(t.ctx, consensusAPI.HeightLatest)
if err != nil {
t.Logger.Error("Failed to get tendermint block",
"err", err,
)
return
}
now := time.Now()
// No committed blocks or latest block within threshold.
if tmBlock == nil || now.Sub(tmBlock.Header.Time) < syncWorkerLastBlockTimeDiffThreshold {
t.Logger.Info("Tendermint Node finished initial sync")
close(t.syncedCh)
return
}
t.Logger.Debug("Node still syncing",
"currentTime", now,
"latestBlockTime", tmBlock.Time,
"diff", now.Sub(tmBlock.Time),
)
}
}
}
}
func (t *fullService) blockNotifierWorker() {
sub, err := t.node.EventBus().SubscribeUnbuffered(t.ctx, tmSubscriberID, tmtypes.EventQueryNewBlock)
if err != nil {
t.Logger.Error("failed to subscribe to new block events",
"err", err,
)
return
}
// Oh yes, this can actually return a nil subscription even though the error was also
// nil if the node is just shutting down.
if sub == (*tmpubsub.Subscription)(nil) {
return
}
defer t.node.EventBus().Unsubscribe(t.ctx, tmSubscriberID, tmtypes.EventQueryNewBlock) // nolint: errcheck
for {
select {
// Should not return on t.ctx.Done()/t.node.Quit() as that could lead to a deadlock.
case <-sub.Cancelled():
return
case v := <-sub.Out():
ev := v.Data().(tmtypes.EventDataNewBlock)
t.blockNotifier.Broadcast(ev.Block)
}
}
}
// metrics updates oasis_consensus metrics by checking last accepted block info.
func (t *fullService) metrics() {
ch, sub := t.WatchTendermintBlocks()
defer sub.Close()
// Tendermint uses specific public key encoding.
pubKey := t.identity.ConsensusSigner.Public()
myAddr := []byte(crypto.PublicKeyToTendermint(&pubKey).Address())
for {
var blk *tmtypes.Block
select {
case <-t.node.Quit():
return
case blk = <-ch:
}
// Was block proposed by our node.
if bytes.Equal(myAddr, blk.ProposerAddress) {
metrics.ProposedBlocks.With(labelTendermint).Inc()
}
// Was block voted for by our node. Ignore if there was no previous block.
if blk.LastCommit != nil {
for _, sig := range blk.LastCommit.Signatures {
if sig.Absent() || sig.BlockIDFlag == tmtypes.BlockIDFlagNil {
// Vote is missing, ignore.
continue
}
if bytes.Equal(myAddr, sig.ValidatorAddress) {
metrics.SignedBlocks.With(labelTendermint).Inc()
break
}
}
}
}
}
// New creates a new Tendermint consensus backend.
func New(
ctx context.Context,
dataDir string,
identity *identity.Identity,
upgrader upgradeAPI.Backend,
genesisProvider genesisAPI.Provider,
) (consensusAPI.Backend, error) {
// Retrieve the genesis document early so that it is possible to
// use it while initializing other things.
genesisDoc, err := genesisProvider.GetGenesisDocument()
if err != nil {
return nil, fmt.Errorf("tendermint: failed to get genesis doc: %w", err)
}
// Make sure that the consensus backend specified in the genesis
// document is the correct one.
if genesisDoc.Consensus.Backend != api.BackendName {
return nil, fmt.Errorf("tendermint: genesis document contains incorrect consensus backend: %s",
genesisDoc.Consensus.Backend,
)
}
t := &fullService{
BaseBackgroundService: *cmservice.NewBaseBackgroundService("tendermint"),
svcMgr: cmbackground.NewServiceManager(logging.GetLogger("tendermint/servicemanager")),
upgrader: upgrader,
blockNotifier: pubsub.NewBroker(false),
identity: identity,
genesis: genesisDoc,
genesisProvider: genesisProvider,
ctx: ctx,
dataDir: dataDir,
startedCh: make(chan struct{}),
syncedCh: make(chan struct{}),
quitCh: make(chan struct{}),
}
t.Logger.Info("starting a full consensus node")
// Create the submission manager.
pd, err := consensusAPI.NewStaticPriceDiscovery(viper.GetUint64(tmcommon.CfgSubmissionGasPrice))
if err != nil {
return nil, fmt.Errorf("tendermint: failed to create submission manager: %w", err)
}
t.submissionMgr = consensusAPI.NewSubmissionManager(t, pd, viper.GetUint64(tmcommon.CfgSubmissionMaxFee))
return t, t.initialize()
}
func init() {
Flags.String(CfgABCIPruneStrategy, abci.PruneDefault, "ABCI state pruning strategy")
Flags.Uint64(CfgABCIPruneNumKept, 3600, "ABCI state versions kept (when applicable)")
Flags.Bool(CfgCheckpointerDisabled, false, "Disable the ABCI state checkpointer")
Flags.Duration(CfgCheckpointerCheckInterval, 1*time.Minute, "ABCI state checkpointer check interval")
Flags.StringSlice(CfgSentryUpstreamAddress, []string{}, "Tendermint nodes for which we act as sentry of the form ID@ip:port")
Flags.StringSlice(CfgP2PPersistentPeer, []string{}, "Tendermint persistent peer(s) of the form ID@ip:port")
Flags.StringSlice(CfgP2PUnconditionalPeerIDs, []string{}, "Tendermint unconditional peer IDs")
Flags.Bool(CfgP2PDisablePeerExchange, false, "Disable Tendermint's peer-exchange reactor")
Flags.Duration(CfgP2PPersistenPeersMaxDialPeriod, 0*time.Second, "Tendermint max timeout when redialing a persistent peer (default: unlimited)")
Flags.Uint64(CfgMinGasPrice, 0, "minimum gas price")
Flags.Bool(CfgDebugUnsafeReplayRecoverCorruptedWAL, false, "Enable automatic recovery from corrupted WAL during replay (UNSAFE).")
Flags.Bool(CfgSupplementarySanityEnabled, false, "enable supplementary sanity checks (slows down consensus)")
Flags.Uint64(CfgSupplementarySanityInterval, 10, "supplementary sanity check interval (in blocks)")
// State sync.
Flags.Bool(CfgConsensusStateSyncEnabled, false, "enable state sync")
Flags.StringSlice(CfgConsensusStateSyncConsensusNode, []string{}, "state sync: consensus node to use for syncing the light client")
Flags.Duration(CfgConsensusStateSyncTrustPeriod, 24*time.Hour, "state sync: light client trust period")
Flags.Uint64(CfgConsensusStateSyncTrustHeight, 0, "state sync: light client trusted height")
Flags.String(CfgConsensusStateSyncTrustHash, "", "state sync: light client trusted consensus header hash")
Flags.Duration(CfgUpgradeStopDelay, 60*time.Second, "average amount of time to delay shutting down the node on upgrade")
_ = Flags.MarkHidden(CfgDebugUnsafeReplayRecoverCorruptedWAL)
_ = Flags.MarkHidden(CfgSupplementarySanityEnabled)
_ = Flags.MarkHidden(CfgSupplementarySanityInterval)
_ = viper.BindPFlags(Flags)
Flags.AddFlagSet(db.Flags)
} | if err != nil {
return nil, fmt.Errorf("failed to get last retained height: %w", err)
} |
resource_thunder_fw_radius_server.go | package thunder
//Thunder resource FwRadiusServer
import (
"context"
"fmt"
"strconv"
"util"
go_thunder "github.com/go_thunder/thunder"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceFwRadiusServer() *schema.Resource {
return &schema.Resource{
CreateContext: resourceFwRadiusServerCreate,
UpdateContext: resourceFwRadiusServerUpdate,
ReadContext: resourceFwRadiusServerRead,
DeleteContext: resourceFwRadiusServerDelete,
Schema: map[string]*schema.Schema{
"accounting_start": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"attribute_name": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"vrid": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"remote": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_list": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_list_name": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"ip_list_encrypted": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"ip_list_secret_string": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"ip_list_secret": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
},
},
},
},
},
},
"uuid": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"encrypted": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"accounting_interim_update": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"secret": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"sampling_enable": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"counters1": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
},
},
},
"accounting_stop": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"custom_attribute_name": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"attribute": {
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"prefix_number": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"prefix_length": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"name": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"prefix_vendor": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"number": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"value": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"custom_vendor": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"custom_number": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"vendor": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"attribute_value": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
},
},
},
"listen_port": {
Type: schema.TypeInt,
Optional: true,
Description: "",
},
"accounting_on": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
"secret_string": {
Type: schema.TypeString,
Optional: true,
Description: "",
},
},
}
}
func | (ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
logger := util.GetLoggerInstance()
client := meta.(Thunder)
var diags diag.Diagnostics
if client.Host != "" {
logger.Println("[INFO] Creating FwRadiusServer (Inside resourceFwRadiusServerCreate) ")
data := dataToFwRadiusServer(d)
logger.Println("[INFO] received formatted data from method data to FwRadiusServer --")
d.SetId(strconv.Itoa('1'))
err := go_thunder.PostFwRadiusServer(client.Token, data, client.Host)
if err != nil {
return diag.FromErr(err)
}
return resourceFwRadiusServerRead(ctx, d, meta)
}
return diags
}
func resourceFwRadiusServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
logger := util.GetLoggerInstance()
client := meta.(Thunder)
var diags diag.Diagnostics
logger.Println("[INFO] Reading FwRadiusServer (Inside resourceFwRadiusServerRead)")
if client.Host != "" {
name := d.Id()
logger.Println("[INFO] Fetching service Read" + name)
data, err := go_thunder.GetFwRadiusServer(client.Token, client.Host)
if err != nil {
return diag.FromErr(err)
}
if data == nil {
logger.Println("[INFO] No data found " + name)
d.SetId("")
return nil
}
return diags
}
return nil
}
func resourceFwRadiusServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
return resourceFwRadiusServerRead(ctx, d, meta)
}
func resourceFwRadiusServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
return resourceFwRadiusServerRead(ctx, d, meta)
}
func dataToFwRadiusServer(d *schema.ResourceData) go_thunder.FwRadiusServer {
var vc go_thunder.FwRadiusServer
var c go_thunder.FwRadiusServerInstance
c.AccountingStart = d.Get("accounting_start").(string)
c.AttributeName = d.Get("attribute_name").(string)
c.Vrid = d.Get("vrid").(int)
var obj1 go_thunder.FwRadiusServerRemote
prefix := "remote.0."
IpListCount := d.Get(prefix + "ip_list.#").(int)
obj1.IPListName = make([]go_thunder.FwRadiusServerIPList, 0, IpListCount)
for i := 0; i < IpListCount; i++ {
var obj1_1 go_thunder.FwRadiusServerIPList
prefix1 := prefix + fmt.Sprintf("ip_list.%d.", i)
obj1_1.IPListName = d.Get(prefix1 + "ip_list_name").(string)
obj1_1.IPListEncrypted = d.Get(prefix1 + "ip_list_encrypted").(string)
obj1_1.IPListSecretString = d.Get(prefix1 + "ip_list_secret_string").(string)
obj1_1.IPListSecret = d.Get(prefix1 + "ip_list_secret").(int)
obj1.IPListName = append(obj1.IPListName, obj1_1)
}
c.IPList = obj1
c.Encrypted = d.Get("encrypted").(string)
c.AccountingInterimUpdate = d.Get("accounting_interim_update").(string)
c.Secret = d.Get("secret").(int)
SamplingEnableCount := d.Get("sampling_enable.#").(int)
c.Counters1 = make([]go_thunder.FwRadiusServerSamplingEnable, 0, SamplingEnableCount)
for i := 0; i < SamplingEnableCount; i++ {
var obj1 go_thunder.FwRadiusServerSamplingEnable
prefix2 := fmt.Sprintf("sampling_enable.%d.", i)
obj1.Counters1 = d.Get(prefix2 + "counters1").(string)
c.Counters1 = append(c.Counters1, obj1)
}
c.AccountingStop = d.Get("accounting_stop").(string)
c.CustomAttributeName = d.Get("custom_attribute_name").(string)
AttributeCount := d.Get("attribute.#").(int)
c.PrefixNumber = make([]go_thunder.FwRadiusServerAttribute, 0, AttributeCount)
for i := 0; i < AttributeCount; i++ {
var obj1 go_thunder.FwRadiusServerAttribute
prefix3 := fmt.Sprintf("attribute.%d.", i)
obj1.PrefixNumber = d.Get(prefix3 + "prefix_number").(int)
obj1.PrefixLength = d.Get(prefix3 + "prefix_length").(string)
obj1.Name = d.Get(prefix3 + "name").(string)
obj1.PrefixVendor = d.Get(prefix3 + "prefix_vendor").(int)
obj1.Number = d.Get(prefix3 + "number").(int)
obj1.Value = d.Get(prefix3 + "value").(string)
obj1.CustomVendor = d.Get(prefix3 + "custom_vendor").(int)
obj1.CustomNumber = d.Get(prefix3 + "custom_number").(int)
obj1.Vendor = d.Get(prefix3 + "vendor").(int)
obj1.AttributeValue = d.Get(prefix3 + "attribute_value").(string)
c.PrefixNumber = append(c.PrefixNumber, obj1)
}
c.ListenPort = d.Get("listen_port").(int)
c.AccountingOn = d.Get("accounting_on").(string)
c.SecretString = d.Get("secret_string").(string)
vc.AccountingStart = c
return vc
}
| resourceFwRadiusServerCreate |
reducemin.py | # SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class ReduceMin(Base):
@staticmethod
def | () -> None:
shape = [3, 2, 2]
axes = [1]
keepdims = 0
node = onnx.helper.make_node(
'ReduceMin',
inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
#print(reduced)
#[[5., 1.]
# [30., 1.]
# [55., 1.]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')
@staticmethod
def export_keepdims() -> None:
shape = [3, 2, 2]
axes = [1]
keepdims = 1
node = onnx.helper.make_node(
'ReduceMin', inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
#print(reduced)
#[[[5., 1.]]
# [[30., 1.]]
# [[55., 1.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')
@staticmethod
def export_default_axes_keepdims() -> None:
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
'ReduceMin',
inputs=['data'],
outputs=['reduced'],
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)
#print(reduced)
#[[[1.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')
@staticmethod
def export_negative_axes_keepdims() -> None:
shape = [3, 2, 2]
axes = [-2]
keepdims = 1
node = onnx.helper.make_node(
'ReduceMin', inputs=['data'],
outputs=['reduced'],
axes=axes,
keepdims=keepdims)
data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
# print(reduced)
#[[[5., 1.]]
# [[30., 1.]]
# [[55., 1.]]]
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)
expect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')
| export_do_not_keepdims |
event_template.py | """
@brief Event Template class
Instances of this class describe a specific event type. For example: AF_ASSERT_0
or cmdSeq_CS_CmdStarted
@date Created July 2, 2018
@author R. Joseph Paetz
@bug No known bugs
"""
from fprime.common.models.serialize import type_base
from fprime.common.models.serialize.type_exceptions import TypeMismatchException
from fprime_gds.common.utils.event_severity import EventSeverity
from . import data_template
class EventTemplate(data_template.DataTemplate):
"""Class to create event templates to describe specific event types"""
def __init__(
self, event_id, name, component, args, severity, format_str, description=None
):
|
def get_full_name(self):
"""
Get the full name of this event
Returns:
The full name (component.channel) for this event
"""
return "{}.{}".format(self.comp_name, self.name)
def get_id(self):
return self.id
def get_name(self):
return self.name
def get_comp_name(self):
return self.comp_name
def get_severity(self):
"""
Returns the event's severity as an EventSeverity Enum.
Returns:
The event's severity as an EventSeverity Enum
"""
return self.severity
def get_format_str(self):
return self.format_str
def get_description(self):
return self.description
def get_args(self):
"""
Returns a list of argument information
Returns:
A list of tuples where each tuple represents an argument. Each tuple
in the form: (arg name, arg description, arg obj). Where arg obj is
an object of a type derived from the class Base Type. Arg
description may be None.
"""
return self.args
if __name__ == "__main__":
pass
| """
Constructor
Args:
event_id: The ID of the event being described
name: event name as a string
component: Component that produces the Event
args: List of arguments in tuple form. Each tuple should be:
(arg name, arg description, arg obj). Where arg obj is an
object of a type derived from the class Base Type. Arg
description may be None.
severity: event severity as an EventSeverity Enum
format_str: Format string for the event's arguments
description: (Optional) Event Description
"""
super().__init__()
# Make sure correct types are passed
if not isinstance(event_id, int):
raise TypeMismatchException(int, type(event_id))
if not isinstance(name, str):
raise TypeMismatchException(str, type(name))
if not isinstance(component, str):
raise TypeMismatchException(str, type(component))
if not isinstance(format_str, str):
raise TypeMismatchException(str, type(format_str))
if not isinstance(args, list):
raise TypeMismatchException(list, type(args))
for (arg_name, arg_desc, arg_type) in args:
if not isinstance(arg_name, str):
raise TypeMismatchException(str, type(arg_name))
if arg_desc is not None and not isinstance(arg_desc, str):
raise TypeMismatchException(str, type(arg_desc))
if not isinstance(arg_type, type_base.BaseType):
raise TypeMismatchException(type_base.BaseType, type(arg_type))
if description is not None and not isinstance(description, str):
raise TypeMismatchException(str, type(description))
if not isinstance(severity, EventSeverity):
raise TypeMismatchException("EventSeverity", type(severity))
# Initialize event internal variables
self.id = event_id
self.name = name
self.comp_name = component
self.args = args
self.severity = severity
self.format_str = format_str
self.description = description |
dynamoDBWalkthrough.ts | import inquirer from 'inquirer';
import path from 'path';
const TransformPackage = require('graphql-transformer-core');
const { ResourceDoesNotExistError } = require('amplify-cli-core');
export async function askDynamoDBQuestions(context: any, currentProjectOnly = false): Promise<{ resourceName: string }> {
const dynamoDbTypeQuestion = {
type: 'list',
name: 'dynamoDbType',
message: 'Choose a DynamoDB data source option',
choices: [
{
name: 'Use DynamoDB table configured in the current Amplify project',
value: 'currentProject',
},
{
name: 'Create a new DynamoDB table',
value: 'newResource',
},
],
};
for (let count = 0; count < 2; count++) {
// give the developer a chance to go back and select a valid response
const dynamoDbTypeAnswer = currentProjectOnly ? { dynamoDbType: 'currentProject' } : await inquirer.prompt([dynamoDbTypeQuestion]);
switch (dynamoDbTypeAnswer.dynamoDbType) {
case 'currentProject': {
const storageResources = context.amplify.getProjectDetails().amplifyMeta.storage;
const dynamoDbProjectResources: any[] = [];
if (!storageResources) {
context.print.error('There are no DynamoDB resources configured in your project currently');
break;
}
Object.keys(storageResources).forEach(resourceName => {
if (storageResources[resourceName].service === 'DynamoDB') {
dynamoDbProjectResources.push(resourceName);
}
});
if (dynamoDbProjectResources.length === 0) {
context.print.error('There are no DynamoDB resources configured in your project currently');
break;
}
const dynamoResourceQuestion = {
type: 'list',
name: 'dynamoDbResources',
message: 'Choose from one of the already configured DynamoDB tables',
choices: dynamoDbProjectResources,
};
const dynamoResourceAnswer = await inquirer.prompt([dynamoResourceQuestion]);
return { resourceName: dynamoResourceAnswer.dynamoDbResources as string };
}
case 'newResource': {
let add;
try {
({ add } = require('amplify-category-storage'));
} catch (e) {
context.print.error('Storage plugin is not installed in the CLI. You must install it to use this feature.');
break;
}
return add(context, 'awscloudformation', 'DynamoDB').then((resourceName: any) => {
context.print.success('Successfully added DynamoDb table locally');
return { resourceName };
});
}
default:
context.print.error('Invalid option selected');
}
}
throw new Error('Invalid option selected');
}
export async function getTableParameters(context: any, dynamoAnswers: any): Promise<any> {
if (dynamoAnswers.Arn) {
// Looking for table parameters on DynamoDB public API
const hashKey = dynamoAnswers.KeySchema.find((attr: any) => attr.KeyType === 'HASH') || {};
const hashType = dynamoAnswers.AttributeDefinitions.find((attr: any) => attr.AttributeName === hashKey.AttributeName) || {};
const rangeKey = dynamoAnswers.KeySchema.find((attr: any) => attr.KeyType === 'RANGE') || {};
const rangeType = dynamoAnswers.AttributeDefinitions.find((attr: any) => attr.AttributeName === rangeKey.AttributeName) || {};
return {
tableName: dynamoAnswers.TableName,
partitionKeyName: hashKey.AttributeName,
partitionKeyType: hashType.AttributeType,
sortKeyName: rangeKey.AttributeName,
sortKeyType: rangeType.AttributeType,
};
} // Looking for table parameters on local configuration
const projectBackendDirPath = context.amplify.pathManager.getBackendDirPath();
const resourceDirPath = path.join(projectBackendDirPath, 'storage', dynamoAnswers.resourceName);
const parametersFilePath = path.join(resourceDirPath, 'parameters.json');
let parameters;
try {
parameters = context.amplify.readJsonFile(parametersFilePath);
} catch (e) {
parameters = {};
}
return parameters;
}
export async function askAPICategoryDynamoDBQuestions(context: any) {
const { allResources } = await context.amplify.getResourceStatus();
const appSyncResources = allResources.filter((resource: any) => resource.service === 'AppSync');
let targetResourceName: any;
if (appSyncResources.length === 0) {
const errMessage = `
No AppSync resources have been configured in the API category.
Please use "amplify add api" command to create a new appsync resource`;
context.print.error(errMessage);
context.usageData.emitError(new ResourceDoesNotExistError(errMessage));
process.exit(0);
} else if (appSyncResources.length === 1) {
targetResourceName = appSyncResources[0].resourceName;
context.print.success(`Selected resource ${targetResourceName}`);
} else {
const resourceNameQuestion = {
type: 'list',
name: 'dynamoDbAPIResourceName',
message: 'Choose an API resource to associate with',
choices: appSyncResources.map((resource: any) => resource.resourceName),
};
const answer = await inquirer.prompt([resourceNameQuestion]);
targetResourceName = answer.dynamoDbAPIResourceName;
}
const backendDir = context.amplify.pathManager.getBackendDirPath();
const resourceDirPath = path.join(backendDir, 'api', targetResourceName);
const project = await TransformPackage.readProjectConfiguration(resourceDirPath);
const directiveMap = TransformPackage.collectDirectivesByTypeNames(project.schema);
const modelNames = Object.keys(directiveMap.types).filter(typeName => directiveMap.types[typeName].includes('model'));
let targetModelNames: string[] = [];
if (modelNames.length === 0) {
throw Error('Unable to find graphql model info.');
} else if (modelNames.length === 1) {
const [modelName] = modelNames;
context.print.success(`Selected @model ${modelName}`);
targetModelNames = modelNames;
} else {
while (targetModelNames.length === 0) {
const modelNameQuestion = {
type: 'checkbox',
name: 'graphqlAPIModelName',
message: 'Choose the graphql @model(s)',
choices: modelNames,
};
const modelNameAnswer = await inquirer.prompt([modelNameQuestion]);
targetModelNames = modelNameAnswer.graphqlAPIModelName as string[];
if (targetModelNames.length === 0) {
context.print.info('You need to select at least one @model');
}
}
}
const triggerEventSourceMappings = targetModelNames.map(modelName => {
const streamArnParamRef = {
'Fn::ImportValue': {
'Fn::Sub': [`\${api${targetResourceName}GraphQLAPIIdOutput}`, 'GetAtt', `${modelName}Table`, 'StreamArn'].join(':'),
},
};
return {
modelName, | triggerPolicies: [
{
Effect: 'Allow',
Action: ['dynamodb:DescribeStream', 'dynamodb:GetRecords', 'dynamodb:GetShardIterator', 'dynamodb:ListStreams'],
Resource: streamArnParamRef,
},
],
};
});
return {
triggerEventSourceMappings,
dependsOn: [
{
category: 'api',
resourceName: targetResourceName,
attributes: ['GraphQLAPIIdOutput', 'GraphQLAPIEndpointOutput'],
},
],
};
} | batchSize: 100,
startingPosition: 'LATEST',
eventSourceArn: streamArnParamRef,
functionTemplateName: 'trigger-dynamodb.js', |
dm_control_env.py | from dm_control import suite
from dm_control.suite.wrappers import pixels
from dm_env.specs import Array, BoundedArray
import numpy as np
import os
import atari_py
import cv2
import copy | from rlpyt.utils.collections import namedarraytuple
from rlpyt.envs.base import Env, EnvStep, EnvSpaces
from rlpyt.spaces.box import Box
from rlpyt.spaces.composite import Composite
from rlpyt.utils.quick_args import save__init__args
from rlpyt.samplers.collections import TrajInfo
State = None
def convert_dm_control_to_rlpyt_space(dm_control_space):
"""Recursively convert dm_control_space into gym space.
Note: Need to check the following cases of the input type, in the following
order:
(1) BoundedArray
(2) Array
(3) OrderedDict.
- Generally, dm_control observation_specs are OrderedDict with other spaces
(e.g. Array) nested in it.
- Generally, dm_control action_specs are of type `BoundedArray`.
To handle dm_control observation_specs as inputs, we check the following
input types in order to enable recursive calling on each nested item.
"""
if isinstance(dm_control_space, BoundedArray):
rlpyt_box = Box(
low=dm_control_space.minimum,
high=dm_control_space.maximum,
shape=None,
dtype=dm_control_space.dtype)
assert rlpyt_box.shape == dm_control_space.shape, (
(rlpyt_box.shape, dm_control_space.shape))
return rlpyt_box
elif isinstance(dm_control_space, Array):
if isinstance(dm_control_space, BoundedArray):
raise ValueError("The order of the if-statements matters.")
return Box(
low=-float("inf"),
high=float("inf"),
shape=dm_control_space.shape,
dtype=dm_control_space.dtype)
elif isinstance(dm_control_space, OrderedDict):
global State
if State is None:
State = namedtuple('State', list(dm_control_space.keys()))
return Composite([convert_dm_control_to_rlpyt_space(value)
for value in dm_control_space.values()], State)
else:
raise ValueError(dm_control_space)
EnvInfo = None
Observation = None
def init_namedtuples(info_keys=None, state_keys=None):
global EnvInfo, Observation, State
if info_keys is None:
info_keys = ['traj_done']
if state_keys is None:
state_keys = ['pixels']
EnvInfo = namedtuple('EnvInfo', info_keys)
Observation = namedarraytuple('Observation', state_keys)
State = namedtuple('State', state_keys)
class DMControlEnv(Env):
def __init__(self,
domain,
task,
frame_skip=1,
normalize=False,
pixel_wrapper_kwargs=None,
task_kwargs={},
environment_kwargs={},
max_path_length=1200,
):
save__init__args(locals(), underscore=True)
env = suite.load(domain_name=domain,
task_name=task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs)
if normalize:
np.testing.assert_equal(env.action_spec().minimum, -1)
np.testing.assert_equal(env.action_spec().maximum, 1)
if pixel_wrapper_kwargs is not None:
env = pixels.Wrapper(env, **pixel_wrapper_kwargs)
self._env = env
self._observation_keys = tuple(env.observation_spec().keys())
observation_space = convert_dm_control_to_rlpyt_space(
env.observation_spec())
self._observation_space = observation_space
action_space = convert_dm_control_to_rlpyt_space(env.action_spec())
if len(action_space.shape) > 1:
raise NotImplementedError(
"Shape of the action space ({}) is not flat, make sure to"
" check the implemenation.".format(action_space))
self._action_space = action_space
self._step_count = 0
def reset(self):
self._step_count = 0
time_step = self._env.reset()
observation = self._filter_observation(time_step.observation)
global Observation
if Observation is None:
Observation = namedarraytuple("Observation", list(observation.keys()))
observation = Observation(**{k: v for k, v in observation.items()
if k in self._observation_keys})
return observation
def step(self, action):
time_step = self._env.step(action)
reward = time_step.reward
terminal = time_step.last()
info = time_step.info
info.update({
key: value
for key, value in time_step.observation.items()
if key not in self._observation_keys
})
observation = self._filter_observation(time_step.observation)
self._step_count += 1
info['traj_done'] = self._step_count >= self._max_path_length
global EnvInfo
if EnvInfo is None:
EnvInfo = namedtuple("EnvInfo", list(info.keys()))
info = EnvInfo(**{k: v for k, v in info.items() if k in EnvInfo._fields})
global Observation
if Observation is None:
Observation = namedarraytuple("Observation", list(observation.keys()))
observation = Observation(**{k: v.copy() for k, v in observation.items()
if k in self._observation_keys})
return EnvStep(observation, reward, terminal, info)
def render(self, *args, mode='rgb_array', width=256, height=256,
cameria_id=0, **kwargs):
if mode == 'human':
raise NotImplementedError(
"TODO(Alacarter): Figure out how to not continuously launch"
" viewers if one is already open."
" See: https://github.com/deepmind/dm_control/issues/39.")
elif mode == 'rgb_array':
return self._env.physics.render(width=width, height=height,
camera_id=cameria_id, **kwargs)
raise NotImplementedError(mode)
def get_obs(self):
obs = self._env.task.get_observation(self._env.physics)
obs['pixels'] = self._env.physics.render(**self._env._render_kwargs)
obs = self._filter_observation(obs)
obs = Observation(**{k: v for k, v in obs.items()
if k in self._observation_keys})
return obs
def get_state(self, ignore_step=True):
if ignore_step:
return self._env.physics.get_state()
return self._env.physics.get_state(), self._step_count
def set_state(self, state, ignore_step=True):
if ignore_step:
self._env.physics.set_state(state)
self._env.step(np.zeros(self.action_space.shape))
else:
self._env.physics.set_state(state[0])
self._env.step(np.zeros(self.action_space.shape))
self._step_count = state[1]
def get_geoms(self):
return self._env.task.get_geoms(self._env.physics)
@property
def spaces(self):
return EnvSpaces(
observation=self._observation_space,
action=self._action_space,
)
###########################################################################
# Helpers
def _filter_observation(self, observation):
observation = type(observation)([
(name, value)
for name, value in observation.items()
if name in self._observation_keys
])
return observation
###########################################################################
# Properties | from collections import namedtuple, OrderedDict |
default_collector_test.go | package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func | (t *testing.T) {
SUT := NewDefaultCollector("test")
SUT.SetDescriptor("test_metric", "", nil)
metric := SUT.MakeGaugeMetric("test_metric", 1)
assert.Equal(t, SUT.GetDescriptor("test_metric"), metric.Desc())
}
| TestMetricFactory |
webhook.go | // Copyright 2017 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gogs
import (
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/jenkins-x/go-scm/pkg/hmac"
"github.com/jenkins-x/go-scm/scm"
)
type webhookService struct {
client *wrapper
}
func (s *webhookService) Parse(req *http.Request, fn scm.SecretFunc) (scm.Webhook, error) {
data, err := ioutil.ReadAll(
io.LimitReader(req.Body, 10000000),
)
if err != nil {
return nil, err
}
var hook scm.Webhook
event := req.Header.Get("X-Gogs-Event")
switch event {
case "push":
hook, err = s.parsePushHook(data)
case "create":
hook, err = s.parseCreateHook(data)
case "delete":
hook, err = s.parseDeleteHook(data)
case "issues":
hook, err = s.parseIssueHook(data)
case "issue_comment":
hook, err = s.parseIssueCommentHook(data)
case "pull_request":
hook, err = s.parsePullRequestHook(data)
default:
return nil, scm.UnknownWebhook{event}
}
if err != nil {
return nil, err
}
// get the gogs signature key to verify the payload
// signature. If no key is provided, no validation
// is performed.
key, err := fn(hook)
if err != nil {
return hook, err
} else if key == "" {
return hook, nil
}
sig := req.Header.Get("X-Gogs-Signature")
if sig == "" |
if !hmac.Validate(sha256.New, data, []byte(key), sig) {
return hook, scm.ErrSignatureInvalid
}
return hook, nil
}
func (s *webhookService) parsePushHook(data []byte) (scm.Webhook, error) {
dst := new(pushHook)
err := json.Unmarshal(data, dst)
return convertPushHook(dst), err
}
func (s *webhookService) parseCreateHook(data []byte) (scm.Webhook, error) {
dst := new(createHook)
err := json.Unmarshal(data, dst)
switch dst.RefType {
case "tag":
return convertTagHook(dst, scm.ActionCreate), err
case "branch":
return convertBranchHook(dst, scm.ActionCreate), err
default:
return nil, scm.UnknownWebhook{dst.RefType}
}
}
func (s *webhookService) parseDeleteHook(data []byte) (scm.Webhook, error) {
dst := new(createHook)
err := json.Unmarshal(data, dst)
switch dst.RefType {
case "tag":
return convertTagHook(dst, scm.ActionDelete), err
case "branch":
return convertBranchHook(dst, scm.ActionDelete), err
default:
return nil, scm.UnknownWebhook{dst.RefType}
}
}
func (s *webhookService) parseIssueHook(data []byte) (scm.Webhook, error) {
dst := new(issueHook)
err := json.Unmarshal(data, dst)
return convertIssueHook(dst), err
}
func (s *webhookService) parseIssueCommentHook(data []byte) (scm.Webhook, error) {
dst := new(issueHook)
err := json.Unmarshal(data, dst)
if dst.Issue.PullRequest != nil {
return convertPullRequestCommentHook(dst), err
}
return convertIssueCommentHook(dst), err
}
func (s *webhookService) parsePullRequestHook(data []byte) (scm.Webhook, error) {
dst := new(pullRequestHook)
err := json.Unmarshal(data, dst)
return convertPullRequestHook(dst), err
}
//
// native data structures
//
type (
// gogs push webhook payload
pushHook struct {
Ref string `json:"ref"`
Before string `json:"before"`
After string `json:"after"`
Compare string `json:"compare_url"`
Commits []commit `json:"commits"`
Repository repository `json:"repository"`
Pusher user `json:"pusher"`
Sender user `json:"sender"`
}
// gogs create webhook payload
createHook struct {
Ref string `json:"ref"`
RefType string `json:"ref_type"`
DefaultBranch string `json:"default_branch"`
Repository repository `json:"repository"`
Sender user `json:"sender"`
}
// gogs issue webhook payload
issueHook struct {
Action string `json:"action"`
Issue issue `json:"issue"`
Comment issueComment `json:"comment"`
Repository repository `json:"repository"`
Sender user `json:"sender"`
}
// gogs pull request webhook payload
pullRequestHook struct {
Action string `json:"action"`
Number int `json:"number"`
PullRequest pullRequest `json:"pull_request"`
Repository repository `json:"repository"`
Sender user `json:"sender"`
}
)
//
// native data structure conversion
//
func convertTagHook(dst *createHook, action scm.Action) *scm.TagHook {
return &scm.TagHook{
Action: action,
Ref: scm.Reference{
Name: dst.Ref,
},
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertBranchHook(dst *createHook, action scm.Action) *scm.BranchHook {
return &scm.BranchHook{
Action: action,
Ref: scm.Reference{
Name: dst.Ref,
},
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertPushHook(dst *pushHook) *scm.PushHook {
return &scm.PushHook{
Ref: scm.ExpandRef(dst.Ref, "refs/heads/"),
Commit: scm.Commit{
Sha: dst.After,
Message: dst.Commits[0].Message,
Link: dst.Compare,
Author: scm.Signature{
Login: dst.Commits[0].Author.Username,
Email: dst.Commits[0].Author.Email,
Name: dst.Commits[0].Author.Name,
Date: dst.Commits[0].Timestamp,
},
Committer: scm.Signature{
Login: dst.Commits[0].Committer.Username,
Email: dst.Commits[0].Committer.Email,
Name: dst.Commits[0].Committer.Name,
Date: dst.Commits[0].Timestamp,
},
},
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertPullRequestHook(dst *pullRequestHook) *scm.PullRequestHook {
return &scm.PullRequestHook{
Action: convertAction(dst.Action),
PullRequest: scm.PullRequest{
Number: dst.PullRequest.Number,
Title: dst.PullRequest.Title,
Body: dst.PullRequest.Body,
Closed: dst.PullRequest.State == "closed",
Author: scm.User{
Login: dst.PullRequest.User.Login,
Email: dst.PullRequest.User.Email,
Avatar: dst.PullRequest.User.Avatar,
},
Merged: dst.PullRequest.Merged,
// Created: nil,
// Updated: nil,
Source: dst.PullRequest.HeadBranch,
Target: dst.PullRequest.BaseBranch,
Link: dst.PullRequest.HTMLURL,
Fork: dst.PullRequest.HeadRepo.FullName,
Ref: fmt.Sprintf("refs/pull/%d/head", dst.PullRequest.Number),
// Sha: "",
},
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertPullRequestCommentHook(dst *issueHook) *scm.PullRequestCommentHook {
return &scm.PullRequestCommentHook{
Action: convertAction(dst.Action),
PullRequest: *convertPullRequestFromIssue(&dst.Issue),
Comment: *convertIssueComment(&dst.Comment),
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertIssueHook(dst *issueHook) *scm.IssueHook {
return &scm.IssueHook{
Action: convertAction(dst.Action),
Issue: *convertIssue(&dst.Issue),
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertIssueCommentHook(dst *issueHook) *scm.IssueCommentHook {
return &scm.IssueCommentHook{
Action: convertAction(dst.Action),
Issue: *convertIssue(&dst.Issue),
Comment: *convertIssueComment(&dst.Comment),
Repo: *convertRepository(&dst.Repository),
Sender: *convertUser(&dst.Sender),
}
}
func convertAction(src string) (action scm.Action) {
switch src {
case "create", "created":
return scm.ActionCreate
case "delete", "deleted":
return scm.ActionDelete
case "update", "updated", "edit", "edited":
return scm.ActionUpdate
case "open", "opened":
return scm.ActionOpen
case "reopen", "reopened":
return scm.ActionReopen
case "close", "closed":
return scm.ActionClose
case "label", "labeled":
return scm.ActionLabel
case "unlabel", "unlabeled":
return scm.ActionUnlabel
case "merge", "merged":
return scm.ActionMerge
case "synchronize", "synchronized":
return scm.ActionSync
default:
return
}
}
| {
return hook, scm.ErrSignatureInvalid
} |
Tag.entity.ts | import {
Column,
CreateDateColumn,
Entity,
ManyToMany,
PrimaryGeneratedColumn,
} from 'typeorm';
import { Course } from './course.entity';
@Entity('tags') | @PrimaryGeneratedColumn()
id: number;
@Column()
name: string;
@ManyToMany(
() => Course,
(course: Course) => {
course.tags;
},
)
courses: Course[];
@CreateDateColumn()
createdAt: Date;
} | export class Tag { |
ca.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package crypto
import (
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"io/ioutil"
"math/big"
"time"
"github.com/pingcap/errors"
)
var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
// CertificateAuthority holds the CA of a cluster
type CertificateAuthority struct {
ClusterName string
Cert *x509.Certificate
Key PrivKey
}
// NewCA generates a new CertificateAuthority object
func NewCA(clsName string) (*CertificateAuthority, error) |
// Sign signs a CSR with the CA
func (ca *CertificateAuthority) Sign(csrBytes []byte) ([]byte, error) {
csr, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
return nil, err
}
if err := csr.CheckSignature(); err != nil {
return nil, err
}
currTime := time.Now().UTC()
if !currTime.Before(ca.Cert.NotAfter) {
return nil, errors.Errorf("the signer has expired: NotAfter=%v", ca.Cert.NotAfter)
}
// generate a random serial number for the new cert
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
template := &x509.Certificate{
Signature: csr.Signature,
SignatureAlgorithm: csr.SignatureAlgorithm,
PublicKey: csr.PublicKey,
PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
SerialNumber: serialNumber,
Issuer: ca.Cert.Issuer,
Subject: csr.Subject,
DNSNames: csr.DNSNames,
IPAddresses: csr.IPAddresses,
EmailAddresses: csr.EmailAddresses,
URIs: csr.URIs,
NotBefore: currTime,
NotAfter: currTime.Add(time.Hour * 24 * 365 * 10),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
},
Extensions: csr.Extensions,
ExtraExtensions: csr.ExtraExtensions,
}
return x509.CreateCertificate(rand.Reader, template, ca.Cert, csr.PublicKey, ca.Key.Signer())
}
// ReadCA reads an existing CA certificate from disk
func ReadCA(clsName, certPath, keyPath string) (*CertificateAuthority, error) {
// read private key
rawKey, err := ioutil.ReadFile(keyPath)
if err != nil {
return nil, errors.Annotatef(err, "error reading CA private key for %s", clsName)
}
keyPem, _ := pem.Decode(rawKey)
if keyPem == nil {
return nil, errors.Errorf("error decoding CA private key for %s", clsName)
}
var privKey PrivKey
switch keyPem.Type {
case "RSA PRIVATE KEY":
pk, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes)
if err != nil {
return nil, errors.Annotatef(err, "error decoding CA private key for %s", clsName)
}
privKey = &RSAPrivKey{key: pk}
default:
return nil, errors.Errorf("the CA private key type \"%s\" is not supported", keyPem.Type)
}
// read certificate
rawCert, err := ioutil.ReadFile(certPath)
if err != nil {
return nil, errors.Annotatef(err, "error reading CA certificate for %s", clsName)
}
certPem, _ := pem.Decode(rawCert)
if certPem == nil {
return nil, errors.Errorf("error decoding CA certificate for %s", clsName)
}
if certPem.Type != "CERTIFICATE" {
return nil, errors.Errorf("the CA certificate type \"%s\" is not valid", certPem.Type)
}
cert, err := x509.ParseCertificate(certPem.Bytes)
if err != nil {
return nil, errors.Annotatef(err, "error decoding CA certificate for %s", clsName)
}
return &CertificateAuthority{
ClusterName: clsName,
Cert: cert,
Key: privKey,
}, nil
}
| {
currTime := time.Now().UTC()
// generate a random serial number for the new ca
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
caTemplate := &x509.Certificate{
SerialNumber: serialNumber,
// NOTE: not adding cluster name to the cert subject to avoid potential issues
// when we implement cluster renaming feature. We may consider add this back
// if we find proper way renaming a TLS enabled cluster.
// Adding the cluster name in cert subject may be helpful to diagnose problem
// when a process is trying to connecting a component from another cluster.
Subject: pkix.Name{
Organization: []string{pkixOrganization},
OrganizationalUnit: []string{pkixOrganizationalUnit /*, clsName */},
},
NotBefore: currTime,
NotAfter: currTime.Add(time.Hour * 24 * 365 * 50), // TODO: support ca cert rotate
IsCA: true, // must be true
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
},
BasicConstraintsValid: true,
}
priv, err := NewKeyPair(KeyTypeRSA, KeySchemeRSASSAPSSSHA256)
if err != nil {
return nil, err
}
caBytes, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, priv.Public().Key(), priv.Signer())
if err != nil {
return nil, err
}
caCert, err := x509.ParseCertificate(caBytes)
if err != nil {
return nil, err
}
return &CertificateAuthority{
ClusterName: clsName,
Cert: caCert,
Key: priv,
}, nil
} |
scikit_learn.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for using the Scikit-Learn API with Keras models."""
# pylint: disable=g-classes-have-attributes
import copy
import types
import numpy as np
from keras import losses
from keras.models import Sequential
from keras.utils.generic_utils import has_arg
from keras.utils.np_utils import to_categorical
from tensorflow.python.util.tf_export import keras_export
class BaseWrapper(object):
"""Base class for the Keras scikit-learn wrapper.
Warning: This class should not be used directly.
Use descendant classes instead.
Args:
build_fn: callable function or class instance
**sk_params: model parameters & fitting parameters
The `build_fn` should construct, compile and return a Keras model, which
will then be used to fit/predict. One of the following
three values could be passed to `build_fn`:
1. A function
2. An instance of a class that implements the `__call__` method
3. None. This means you implement a class that inherits from either
`KerasClassifier` or `KerasRegressor`. The `__call__` method of the
present class will then be treated as the default `build_fn`.
`sk_params` takes both model parameters and fitting parameters. Legal model
parameters are the arguments of `build_fn`. Note that like all other
estimators in scikit-learn, `build_fn` should provide default values for
its arguments, so that you could create the estimator without passing any
values to `sk_params`.
`sk_params` could also accept parameters for calling `fit`, `predict`,
`predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).
fitting (predicting) parameters are selected in the following order:
1. Values passed to the dictionary arguments of
`fit`, `predict`, `predict_proba`, and `score` methods
2. Values passed to `sk_params`
3. The default values of the `keras.models.Sequential`
`fit`, `predict`, `predict_proba` and `score` methods
When using scikit-learn's `grid_search` API, legal tunable parameters are
those you could pass to `sk_params`, including fitting parameters.
In other words, you could use `grid_search` to search for the best
`batch_size` or `epochs` as well as the model parameters.
"""
def __init__(self, build_fn=None, **sk_params):
|
def check_params(self, params):
"""Checks for user typos in `params`.
Args:
params: dictionary; the parameters to be checked
Raises:
ValueError: if any member of `params` is not a valid argument.
"""
legal_params_fns = [
Sequential.fit, Sequential.predict, Sequential.predict_classes,
Sequential.evaluate
]
if self.build_fn is None:
legal_params_fns.append(self.__call__)
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
legal_params_fns.append(self.build_fn.__call__)
else:
legal_params_fns.append(self.build_fn)
for params_name in params:
for fn in legal_params_fns:
if has_arg(fn, params_name):
break
else:
if params_name != 'nb_epoch':
raise ValueError('{} is not a legal parameter'.format(params_name))
def get_params(self, **params): # pylint: disable=unused-argument
"""Gets parameters for this estimator.
Args:
**params: ignored (exists for API compatibility).
Returns:
Dictionary of parameter names mapped to their values.
"""
res = self.sk_params.copy()
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
"""Sets the parameters of this estimator.
Args:
**params: Dictionary of parameter names mapped to their values.
Returns:
self
"""
self.check_params(params)
self.sk_params.update(params)
return self
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Args:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
"""
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
if (losses.is_categorical_crossentropy(self.model.loss) and
len(y.shape) != 2):
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(x, y, **fit_args)
return history
def filter_sk_params(self, fn, override=None):
"""Filters `sk_params` and returns those in `fn`'s arguments.
Args:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res
@keras_export('keras.wrappers.scikit_learn.KerasClassifier')
class KerasClassifier(BaseWrapper):
"""Implementation of the scikit-learn classifier API for Keras.
"""
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Args:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
Raises:
ValueError: In case of invalid shape for `y` argument.
"""
y = np.array(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
return super(KerasClassifier, self).fit(x, y, **kwargs)
def predict(self, x, **kwargs):
"""Returns the class predictions for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
preds: array-like, shape `(n_samples,)`
Class predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
classes = self.model.predict_classes(x, **kwargs)
return self.classes_[classes]
def predict_proba(self, x, **kwargs):
"""Returns class probability estimates for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
In the case of binary classification,
to match the scikit-learn API,
will return an array of shape `(n_samples, 2)`
(instead of `(n_sample, 1)` as in Keras).
"""
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict(x, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
def score(self, x, y, **kwargs):
"""Returns the mean accuracy on the given test data and labels.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
Raises:
ValueError: If the underlying model isn't configured to
compute accuracy. You should pass `metrics=["accuracy"]` to
the `.compile()` method of the model.
"""
y = np.searchsorted(self.classes_, y)
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(x, y, **kwargs)
if not isinstance(outputs, list):
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name in ['accuracy', 'acc']:
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@keras_export('keras.wrappers.scikit_learn.KerasRegressor')
class KerasRegressor(BaseWrapper):
"""Implementation of the scikit-learn regressor API for Keras.
"""
def predict(self, x, **kwargs):
"""Returns predictions for the given test data.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict`.
Returns:
preds: array-like, shape `(n_samples,)`
Predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
"""Returns the mean loss on the given test data and labels.
Args:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
"""
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
| self.build_fn = build_fn
self.sk_params = sk_params
self.check_params(sk_params) |
vmi_networking.go | /*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package network
import (
"context"
"fmt"
"net"
"strconv"
"strings"
"time"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
k8sv1 "k8s.io/api/core/v1"
v12 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
netutils "k8s.io/utils/net"
"k8s.io/utils/pointer"
"kubevirt.io/kubevirt/tests/util"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
"kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api"
"kubevirt.io/kubevirt/tests"
"kubevirt.io/kubevirt/tests/console"
cd "kubevirt.io/kubevirt/tests/containerdisk"
"kubevirt.io/kubevirt/tests/flags"
"kubevirt.io/kubevirt/tests/libnet"
"kubevirt.io/kubevirt/tests/libvmi"
)
var _ = SIGDescribe("[Serial][rfe_id:694][crit:medium][vendor:[email protected]][level:component]Networking", func() {
var err error
var virtClient kubecli.KubevirtClient
var currentConfiguration v1.KubeVirtConfiguration
var inboundVMI *v1.VirtualMachineInstance
var inboundVMIWithPodNetworkSet *v1.VirtualMachineInstance
var inboundVMIWithCustomMacAddress *v1.VirtualMachineInstance
var outboundVMI *v1.VirtualMachineInstance
const (
testPort = 1500
LibvirtDirectMigrationPort = 49152
LibvirtBlockMigrationPort = 49153
)
tests.BeforeAll(func() {
virtClient, err = kubecli.GetKubevirtClient()
util.PanicOnError(err)
kv := util.GetCurrentKv(virtClient)
currentConfiguration = kv.Spec.Configuration
})
checkMacAddress := func(vmi *v1.VirtualMachineInstance, expectedMacAddress string) {
err := console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "cat /sys/class/net/eth0/address\n"},
&expect.BExp{R: expectedMacAddress},
}, 15)
Expect(err).ToNot(HaveOccurred())
}
checkNetworkVendor := func(vmi *v1.VirtualMachineInstance, expectedVendor string) {
err := console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "cat /sys/class/net/eth0/device/vendor\n"},
&expect.BExp{R: expectedVendor},
}, 15)
Expect(err).ToNot(HaveOccurred())
}
checkLearningState := func(vmi *v1.VirtualMachineInstance, expectedValue string) {
output := tests.RunCommandOnVmiPod(vmi, []string{"cat", "/sys/class/net/eth0-nic/brport/learning"})
ExpectWithOffset(1, strings.TrimSpace(output)).To(Equal(expectedValue))
}
setBridgeEnabled := func(enable bool) {
if currentConfiguration.NetworkConfiguration == nil {
currentConfiguration.NetworkConfiguration = &v1.NetworkConfiguration{}
}
currentConfiguration.NetworkConfiguration.PermitBridgeInterfaceOnPodNetwork = pointer.BoolPtr(enable)
kv := tests.UpdateKubeVirtConfigValueAndWait(currentConfiguration)
currentConfiguration = kv.Spec.Configuration
}
Describe("Multiple virtual machines connectivity using bridge binding interface", func() {
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
inboundVMI = libvmi.NewCirros()
outboundVMI = libvmi.NewCirros()
inboundVMIWithPodNetworkSet = libvmi.NewCirros(
libvmi.WithInterface(*v1.DefaultBridgeNetworkInterface()),
libvmi.WithNetwork(v1.DefaultPodNetwork()))
inboundVMIWithCustomMacAddress = libvmi.NewCirros(
libvmi.WithInterface(*libvmi.InterfaceWithMac(v1.DefaultBridgeNetworkInterface(), "de:ad:00:00:be:af")),
libvmi.WithNetwork(v1.DefaultPodNetwork()))
// Create VMIs
for _, networkVMI := range []*v1.VirtualMachineInstance{inboundVMI, outboundVMI, inboundVMIWithPodNetworkSet, inboundVMIWithCustomMacAddress} {
vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(networkVMI)
Expect(err).ToNot(HaveOccurred())
*networkVMI = *vmi
}
// Wait for VMIs to become ready
inboundVMI = tests.WaitUntilVMIReady(inboundVMI, libnet.WithIPv6(console.LoginToCirros))
outboundVMI = tests.WaitUntilVMIReady(outboundVMI, libnet.WithIPv6(console.LoginToCirros))
inboundVMIWithPodNetworkSet = tests.WaitUntilVMIReady(inboundVMIWithPodNetworkSet, libnet.WithIPv6(console.LoginToCirros))
inboundVMIWithCustomMacAddress = tests.WaitUntilVMIReady(inboundVMIWithCustomMacAddress, libnet.WithIPv6(console.LoginToCirros))
tests.StartTCPServer(inboundVMI, testPort)
})
table.DescribeTable("should be able to reach", func(vmiRef **v1.VirtualMachineInstance) {
var cmdCheck, addrShow, addr string
if vmiRef == nil {
addr = "kubevirt.io"
} else {
vmi := *vmiRef
if vmiHasCustomMacAddress(vmi) {
tests.SkipIfOpenShift("Custom MAC addresses on pod networks are not supported")
}
addr = vmi.Status.Interfaces[0].IP
}
payloadSize := 0
ipHeaderSize := 28 // IPv4 specific
vmiPod := tests.GetRunningPodByVirtualMachineInstance(outboundVMI, util.NamespaceTestDefault)
var mtu int
for _, ifaceName := range []string{"k6t-eth0", "tap0"} {
By(fmt.Sprintf("checking %s MTU inside the pod", ifaceName))
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"cat", fmt.Sprintf("/sys/class/net/%s/mtu", ifaceName)},
)
log.Log.Infof("%s mtu is %v", ifaceName, output)
Expect(err).ToNot(HaveOccurred())
output = strings.TrimSuffix(output, "\n")
mtu, err = strconv.Atoi(output)
Expect(err).ToNot(HaveOccurred())
Expect(mtu > 1000).To(BeTrue())
payloadSize = mtu - ipHeaderSize
}
expectedMtuString := fmt.Sprintf("mtu %d", mtu)
By("checking eth0 MTU inside the VirtualMachineInstance")
Expect(libnet.WithIPv6(console.LoginToCirros)(outboundVMI)).To(Succeed())
addrShow = "ip address show eth0\n"
Expect(console.SafeExpectBatch(outboundVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: addrShow},
&expect.BExp{R: fmt.Sprintf(".*%s.*\n", expectedMtuString)},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 180)).To(Succeed())
By("checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance")
// NOTE: VirtualMachineInstance is not directly accessible from inside the pod because
// we transferred its IP address under DHCP server control, so the
// only thing we can validate is connectivity between VMIs
//
// NOTE: cirros ping doesn't support -M do that could be used to
// validate end-to-end connectivity with Don't Fragment flag set
cmdCheck = fmt.Sprintf("ping %s -c 1 -w 5 -s %d\n", addr, payloadSize)
err = console.SafeExpectBatch(outboundVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 180)
Expect(err).ToNot(HaveOccurred())
By("checking the VirtualMachineInstance can fetch via HTTP")
err = console.SafeExpectBatch(outboundVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "curl --silent http://kubevirt.io > /dev/null\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
Expect(err).ToNot(HaveOccurred())
},
table.Entry("[test_id:1539]the Inbound VirtualMachineInstance", &inboundVMI),
table.Entry("[test_id:1540]the Inbound VirtualMachineInstance with pod network connectivity explicitly set", &inboundVMIWithPodNetworkSet),
table.Entry("[test_id:1541]the Inbound VirtualMachineInstance with custom MAC address", &inboundVMIWithCustomMacAddress),
table.Entry("[test_id:1542]the internet", nil),
)
table.DescribeTable("should be reachable via the propagated IP from a Pod", func(op v12.NodeSelectorOperator, hostNetwork bool) {
ip := inboundVMI.Status.Interfaces[0].IP
//TODO if node count 1, skip the nv12.NodeSelectorOpOut
nodes, err := virtClient.CoreV1().Nodes().List(context.Background(), v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(nodes.Items).ToNot(BeEmpty())
if len(nodes.Items) == 1 && op == v12.NodeSelectorOpNotIn {
Skip("Skip network test that requires multiple nodes when only one node is present.")
}
job := tests.NewHelloWorldJob(ip, strconv.Itoa(testPort))
job.Spec.Template.Spec.Affinity = &v12.Affinity{
NodeAffinity: &v12.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{
NodeSelectorTerms: []v12.NodeSelectorTerm{
{
MatchExpressions: []v12.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: op, Values: []string{inboundVMI.Status.NodeName}},
},
},
},
},
},
}
job.Spec.Template.Spec.HostNetwork = hostNetwork
job, err = virtClient.BatchV1().Jobs(inboundVMI.ObjectMeta.Namespace).Create(context.Background(), job, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(tests.WaitForJobToSucceed(job, 90*time.Second)).To(Succeed())
},
table.Entry("[test_id:1543]on the same node from Pod", v12.NodeSelectorOpIn, false),
table.Entry("[test_id:1544]on a different node from Pod", v12.NodeSelectorOpNotIn, false),
table.Entry("[test_id:1545]on the same node from Node", v12.NodeSelectorOpIn, true),
table.Entry("[test_id:1546]on a different node from Node", v12.NodeSelectorOpNotIn, true),
)
Context("VirtualMachineInstance with default interface model", func() {
// Unless an explicit interface model is specified, the default interface model is virtio.
It("[test_id:1550]should expose the right device type to the guest", func() {
By("checking the device vendor in /sys/class")
// Taken from https://wiki.osdev.org/Virtio#Technical_Details
virtio_vid := "0x1af4"
for _, networkVMI := range []*v1.VirtualMachineInstance{inboundVMI, outboundVMI} {
// as defined in https://vendev.org/pci/ven_1af4/
checkNetworkVendor(networkVMI, virtio_vid)
}
})
It("[test_id:1551]should reject the creation of virtual machine with unsupported interface model", func() {
// Create a virtual machine with an unsupported interface model
customIfVMI := NewRandomVMIWithInvalidNetworkInterface()
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(customIfVMI)
Expect(err).To(HaveOccurred())
})
})
})
Context("VirtualMachineInstance with custom interface model", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1770]should expose the right device type to the guest", func() {
By("checking the device vendor in /sys/class")
// Create a machine with e1000 interface model
e1000VMI := tests.NewRandomVMIWithe1000NetworkInterface()
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(e1000VMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(e1000VMI, console.LoginToAlpine)
// as defined in https://vendev.org/pci/ven_8086/
checkNetworkVendor(e1000VMI, "0x8086")
})
})
Context("VirtualMachineInstance with custom MAC address", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1771]should configure custom MAC address", func() {
By("checking eth0 MAC address")
deadbeafVMI := tests.NewRandomVMIWithCustomMacAddress()
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(deadbeafVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(deadbeafVMI, console.LoginToAlpine)
checkMacAddress(deadbeafVMI, deadbeafVMI.Spec.Domain.Devices.Interfaces[0].MacAddress)
})
})
Context("VirtualMachineInstance with custom MAC address in non-conventional format", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1772]should configure custom MAC address", func() {
By("checking eth0 MAC address")
beafdeadVMI := tests.NewRandomVMIWithCustomMacAddress()
beafdeadVMI.Spec.Domain.Devices.Interfaces[0].MacAddress = "BE-AF-00-00-DE-AD"
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(beafdeadVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(beafdeadVMI, console.LoginToAlpine)
checkMacAddress(beafdeadVMI, "be:af:00:00:de:ad")
})
})
Context("VirtualMachineInstance with invalid MAC address", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:700]should failed to start with invalid MAC address", func() {
By("Start VMI")
beafdeadVMI := tests.NewRandomVMIWithCustomMacAddress()
beafdeadVMI.Spec.Domain.Devices.Interfaces[0].MacAddress = "de:00c:00c:00:00:de:abc"
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(beafdeadVMI)
Expect(err).To(HaveOccurred())
testErr := err.(*errors.StatusError)
Expect(testErr.ErrStatus.Reason).To(BeEquivalentTo("Invalid"))
})
})
Context("VirtualMachineInstance with custom MAC address and slirp interface", func() {
setPermitSlirpInterface := func(enable bool) {
if currentConfiguration.NetworkConfiguration == nil {
currentConfiguration.NetworkConfiguration = &v1.NetworkConfiguration{}
}
currentConfiguration.NetworkConfiguration.PermitSlirpInterface = pointer.BoolPtr(enable)
kv := tests.UpdateKubeVirtConfigValueAndWait(currentConfiguration)
currentConfiguration = kv.Spec.Configuration
}
BeforeEach(func() {
tests.BeforeTestCleanup()
setPermitSlirpInterface(true)
})
AfterEach(func() {
setPermitSlirpInterface(false)
})
It("[test_id:1773]should configure custom MAC address", func() {
By("checking eth0 MAC address")
deadbeafVMI := tests.NewRandomVMIWithSlirpInterfaceEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\necho 'hello'\n", []v1.Port{})
deadbeafVMI.Spec.Domain.Devices.Interfaces[0].MacAddress = "de:ad:00:00:be:af"
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(deadbeafVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(deadbeafVMI, console.LoginToAlpine)
checkMacAddress(deadbeafVMI, deadbeafVMI.Spec.Domain.Devices.Interfaces[0].MacAddress)
})
})
Context("VirtualMachineInstance with disabled automatic attachment of interfaces", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1774]should not configure any external interfaces", func() {
By("checking loopback is the only guest interface")
autoAttach := false
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
// Remove the masquerade interface to use the default bridge one
detachedVMI.Spec.Domain.Devices.Interfaces = nil
detachedVMI.Spec.Networks = nil
detachedVMI.Spec.Domain.Devices.AutoattachPodInterface = &autoAttach
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, libnet.WithIPv6(console.LoginToCirros))
err := console.SafeExpectBatch(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "ls /sys/class/net/ | wc -l\n"},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
})
It("[test_id:1775]should not request a tun device", func() {
By("Creating random VirtualMachineInstance")
autoAttach := false
vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskAlpine))
// Remove the masquerade interface to use the default bridge one
vmi.Spec.Domain.Devices.Interfaces = nil
vmi.Spec.Networks = nil
vmi.Spec.Domain.Devices.AutoattachPodInterface = &autoAttach
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
waitUntilVMIReady(vmi, console.LoginToAlpine)
By("Checking that the pod did not request a tun device")
virtClient, err := kubecli.GetKubevirtClient()
Expect(err).ToNot(HaveOccurred())
By("Looking up pod using VMI's label")
pods, err := virtClient.CoreV1().Pods(util.NamespaceTestDefault).List(context.Background(), tests.UnfinishedVMIPodSelector(vmi))
Expect(err).ToNot(HaveOccurred())
Expect(pods.Items).NotTo(BeEmpty())
pod := pods.Items[0]
foundContainer := false
for _, container := range pod.Spec.Containers {
if container.Name == "compute" {
foundContainer = true
_, ok := container.Resources.Requests[services.TunDevice]
Expect(ok).To(BeFalse())
_, ok = container.Resources.Limits[services.TunDevice]
Expect(ok).To(BeFalse())
caps := container.SecurityContext.Capabilities
Expect(caps.Add).To(Not(ContainElement(k8sv1.Capability("NET_ADMIN"))), "Compute container should not have NET_ADMIN capability")
Expect(caps.Drop).To(ContainElement(k8sv1.Capability("NET_RAW")), "Compute container should drop NET_RAW capability")
}
}
Expect(foundContainer).To(BeTrue(), "Did not find 'compute' container in pod")
})
})
Context("VirtualMachineInstance with custom PCI address", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
checkPciAddress := func(vmi *v1.VirtualMachineInstance, expectedPciAddress string) {
err := console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "grep INTERFACE /sys/bus/pci/devices/" + expectedPciAddress + "/*/net/eth0/uevent|awk -F= '{ print $2 }'\n"},
&expect.BExp{R: "eth0"},
}, 15)
Expect(err).ToNot(HaveOccurred())
}
It("[test_id:1776]should configure custom Pci address", func() {
By("checking eth0 Pci address")
testVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
tests.AddExplicitPodNetworkInterface(testVMI)
testVMI.Spec.Domain.Devices.Interfaces[0].PciAddress = "0000:81:00.1"
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(testVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(testVMI, libnet.WithIPv6(console.LoginToCirros))
checkPciAddress(testVMI, testVMI.Spec.Domain.Devices.Interfaces[0].PciAddress)
})
})
Context("VirtualMachineInstance with learning disabled on pod interface", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1777]should disable learning on pod iface", func() {
By("checking learning flag")
learningDisabledVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\necho 'hello'\n")
// Remove the masquerade interface to use the default bridge one
learningDisabledVMI.Spec.Domain.Devices.Interfaces = nil
learningDisabledVMI.Spec.Networks = nil
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(learningDisabledVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(learningDisabledVMI, console.LoginToAlpine)
checkLearningState(learningDisabledVMI, "0")
})
})
Context("VirtualMachineInstance with dhcp options", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1778]should offer extra dhcp options to pod iface", func() {
dhcpVMI := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskFedoraTestTooling))
tests.AddExplicitPodNetworkInterface(dhcpVMI)
dhcpVMI.Spec.Domain.Resources.Requests[k8sv1.ResourceName("memory")] = resource.MustParse("1024M")
dhcpVMI.Spec.Domain.Devices.Interfaces[0].DHCPOptions = &v1.DHCPOptions{
BootFileName: "config",
TFTPServerName: "tftp.kubevirt.io",
NTPServers: []string{"127.0.0.1", "127.0.0.2"},
PrivateOptions: []v1.DHCPPrivateOptions{{Option: 240, Value: "private.options.kubevirt.io"}},
}
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(dhcpVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(dhcpVMI, console.LoginToFedora)
err = console.SafeExpectBatch(dhcpVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "dhclient -1 -r -d eth0\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "dhclient -1 -sf /usr/bin/env --request-options subnet-mask,broadcast-address,time-offset,routers,domain-search,domain-name,domain-name-servers,host-name,nis-domain,nis-servers,ntp-servers,interface-mtu,tftp-server-name,bootfile-name eth0 | tee /dhcp-env\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "grep -q 'new_tftp_server_name=tftp.kubevirt.io' /dhcp-env; echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
&expect.BSnd{S: "grep -q 'new_bootfile_name=config' /dhcp-env; echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
&expect.BSnd{S: "grep -q 'new_ntp_servers=127.0.0.1 127.0.0.2' /dhcp-env; echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
&expect.BSnd{S: "grep -q 'new_unknown_240=private.options.kubevirt.io' /dhcp-env; echo $?\n"},
&expect.BExp{R: console.RetValue("0")},
}, 15)
Expect(err).ToNot(HaveOccurred())
})
})
Context("VirtualMachineInstance with custom dns", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1779]should have custom resolv.conf", func() {
userData := "#cloud-config\n"
dnsVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), userData)
dnsVMI.Spec.DNSPolicy = "None"
dnsVMI.Spec.DNSConfig = &k8sv1.PodDNSConfig{
Nameservers: []string{"8.8.8.8", "4.2.2.1"},
Searches: []string{"example.com"},
}
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(dnsVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(dnsVMI, libnet.WithIPv6(console.LoginToCirros))
err = console.SafeExpectBatch(dnsVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "cat /etc/resolv.conf\n"},
&expect.BExp{R: "search example.com"},
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "cat /etc/resolv.conf\n"},
&expect.BExp{R: "nameserver 8.8.8.8"},
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "cat /etc/resolv.conf\n"},
&expect.BExp{R: "nameserver 4.2.2.1"},
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
}, 15)
Expect(err).ToNot(HaveOccurred())
})
})
Context("VirtualMachineInstance with masquerade binding mechanism", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
masqueradeVMI := func(ports []v1.Port, ipv4NetworkCIDR string) *v1.VirtualMachineInstance {
containerImage := cd.ContainerDiskFor(cd.ContainerDiskCirros)
userData := "#!/bin/bash\necho 'hello'\n"
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(containerImage, userData)
vmi.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "default", Ports: ports, InterfaceBindingMethod: v1.InterfaceBindingMethod{Masquerade: &v1.InterfaceMasquerade{}}}}
net := v1.DefaultPodNetwork()
if ipv4NetworkCIDR != "" {
net.NetworkSource.Pod.VMNetworkCIDR = ipv4NetworkCIDR
}
vmi.Spec.Networks = []v1.Network{*net}
return vmi
}
fedoraMasqueradeVMI := func(ports []v1.Port, ipv6NetworkCIDR string) (*v1.VirtualMachineInstance, error) {
networkData, err := libnet.NewNetworkData(
libnet.WithEthernet("eth0",
libnet.WithDHCP4Enabled(),
libnet.WithDHCP6Enabled(),
),
)
if err != nil {
return nil, err
}
net := v1.DefaultPodNetwork()
net.Pod.VMIPv6NetworkCIDR = ipv6NetworkCIDR
vmi := libvmi.NewFedora(
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding(ports...)),
libvmi.WithNetwork(net),
libvmi.WithCloudInitNoCloudNetworkData(networkData, false),
)
return vmi, nil
}
configureIpv6 := func(vmi *v1.VirtualMachineInstance, networkCIDR string) error {
if networkCIDR == "" {
networkCIDR = api.DefaultVMIpv6CIDR
}
err := console.RunCommand(vmi, "dhclient -6 eth0", 30*time.Second)
if err != nil {
return err
}
err = console.RunCommand(vmi, "ip -6 route add "+networkCIDR+" dev eth0", 5*time.Second)
if err != nil {
return err
}
gateway := gatewayIPFromCIDR(networkCIDR)
err = console.RunCommand(vmi, "ip -6 route add default via "+gateway, 5*time.Second)
if err != nil {
return err
}
return nil
}
portsUsedByLiveMigration := func() []v1.Port {
return []v1.Port{
{Port: LibvirtDirectMigrationPort},
{Port: LibvirtBlockMigrationPort},
}
}
Context("[Conformance][test_id:1780][label:masquerade_binding_connectivity]should allow regular network connection", func() {
verifyClientServerConnectivity := func(clientVMI *v1.VirtualMachineInstance, serverVMI *v1.VirtualMachineInstance, tcpPort int, ipFamily k8sv1.IPFamily) error {
serverIP := libnet.GetVmiPrimaryIpByFamily(serverVMI, ipFamily)
err := libnet.PingFromVMConsole(clientVMI, serverIP)
if err != nil {
return err
}
By("Connecting from the client vm")
err = console.SafeExpectBatch(clientVMI, createExpectConnectToServer(serverIP, tcpPort, true), 30)
if err != nil {
return err
}
By("Rejecting the connection from the client to unregistered port")
err = console.SafeExpectBatch(clientVMI, createExpectConnectToServer(serverIP, tcpPort+1, false), 30)
if err != nil {
return err
}
return nil
}
table.DescribeTable("ipv4", func(ports []v1.Port, tcpPort int, networkCIDR string) {
var clientVMI *v1.VirtualMachineInstance
var serverVMI *v1.VirtualMachineInstance
clientVMI = masqueradeVMI([]v1.Port{}, networkCIDR)
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(clientVMI)
Expect(err).ToNot(HaveOccurred())
clientVMI = tests.WaitUntilVMIReady(clientVMI, console.LoginToCirros)
serverVMI = masqueradeVMI(ports, networkCIDR)
serverVMI.Labels = map[string]string{"expose": "server"}
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(serverVMI)
Expect(err).ToNot(HaveOccurred())
serverVMI = tests.WaitUntilVMIReady(serverVMI, console.LoginToCirros)
Expect(serverVMI.Status.Interfaces).To(HaveLen(1))
Expect(serverVMI.Status.Interfaces[0].IPs).NotTo(BeEmpty())
By("starting a tcp server")
tests.StartTCPServer(serverVMI, tcpPort)
if networkCIDR == "" {
networkCIDR = api.DefaultVMCIDR
}
By("Checking ping (IPv4) to gateway")
ipAddr := gatewayIPFromCIDR(networkCIDR)
Expect(libnet.PingFromVMConsole(serverVMI, ipAddr)).To(Succeed())
Expect(verifyClientServerConnectivity(clientVMI, serverVMI, tcpPort, k8sv1.IPv4Protocol)).To(Succeed())
},
table.Entry("with a specific port number [IPv4]", []v1.Port{{Name: "http", Port: 8080}}, 8080, ""),
table.Entry("with a specific port used by live migration", portsUsedByLiveMigration(), LibvirtDirectMigrationPort, ""),
table.Entry("without a specific port number [IPv4]", []v1.Port{}, 8080, ""),
table.Entry("with custom CIDR [IPv4]", []v1.Port{}, 8080, "10.10.10.0/24"),
)
It("[outside_connectivity]should be able to reach the outside world [IPv4]", func() {
ipv4Address := "8.8.8.8"
if flags.IPV4ConnectivityCheckAddress != "" {
ipv4Address = flags.IPV4ConnectivityCheckAddress
}
dns := "google.com"
if flags.ConnectivityCheckDNS != "" {
dns = flags.ConnectivityCheckDNS
}
vmi := masqueradeVMI([]v1.Port{}, "")
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
vmi = tests.WaitUntilVMIReady(vmi, console.LoginToCirros)
By("Checking ping (IPv4)")
Expect(libnet.PingFromVMConsole(vmi, ipv4Address, "-c 5", "-w 15")).To(Succeed())
Expect(libnet.PingFromVMConsole(vmi, dns, "-c 5", "-w 15")).To(Succeed())
})
table.DescribeTable("IPv6", func(ports []v1.Port, tcpPort int, networkCIDR string) {
libnet.SkipWhenNotDualStackCluster(virtClient)
var serverVMI *v1.VirtualMachineInstance
var clientVMI *v1.VirtualMachineInstance
clientVMI, err = fedoraMasqueradeVMI([]v1.Port{}, networkCIDR)
Expect(err).ToNot(HaveOccurred())
clientVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(clientVMI)
Expect(err).ToNot(HaveOccurred())
clientVMI = tests.WaitUntilVMIReady(clientVMI, console.LoginToFedora)
Expect(configureIpv6(clientVMI, networkCIDR)).To(Succeed(), "failed to configure ipv6 on client vmi")
serverVMI, err = fedoraMasqueradeVMI(ports, networkCIDR)
Expect(err).ToNot(HaveOccurred())
serverVMI.Labels = map[string]string{"expose": "server"}
serverVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(serverVMI)
Expect(err).ToNot(HaveOccurred())
serverVMI = tests.WaitUntilVMIReady(serverVMI, console.LoginToFedora)
Expect(configureIpv6(serverVMI, networkCIDR)).To(Succeed(), "failed to configure ipv6 on server vmi")
Expect(serverVMI.Status.Interfaces).To(HaveLen(1))
Expect(serverVMI.Status.Interfaces[0].IPs).NotTo(BeEmpty())
By("starting a http server")
tests.StartPythonHttpServer(serverVMI, tcpPort)
Expect(verifyClientServerConnectivity(clientVMI, serverVMI, tcpPort, k8sv1.IPv6Protocol)).To(Succeed())
},
table.Entry("with a specific port number [IPv6]", []v1.Port{{Name: "http", Port: 8080}}, 8080, ""),
table.Entry("with a specific port used by live migration", portsUsedByLiveMigration(), LibvirtDirectMigrationPort, ""),
table.Entry("without a specific port number [IPv6]", []v1.Port{}, 8080, ""),
table.Entry("with custom CIDR [IPv6]", []v1.Port{}, 8080, "fd10:10:10::/120"),
)
It("[outside_connectivity]should be able to reach the outside world [IPv6]", func() {
// Cluster nodes subnet (docker network gateway)
// Docker network subnet cidr definition:
// https://github.com/kubevirt/project-infra/blob/master/github/ci/shared-deployments/files/docker-daemon-mirror.conf#L5
ipv6Address := "2001:db8:1::1"
if flags.IPV6ConnectivityCheckAddress != "" {
ipv6Address = flags.IPV6ConnectivityCheckAddress
}
vmi, err := fedoraMasqueradeVMI([]v1.Port{}, "")
Expect(err).ToNot(HaveOccurred())
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
vmi = tests.WaitUntilVMIReady(vmi, console.LoginToFedora)
Expect(configureIpv6(vmi, api.DefaultVMIpv6CIDR)).To(Succeed(), "failed to configure ipv6 on vmi")
By("Checking ping (IPv6) from vmi to cluster nodes gateway")
Expect(libnet.PingFromVMConsole(vmi, ipv6Address)).To(Succeed())
})
})
When("performing migration", func() {
var vmi *v1.VirtualMachineInstance
ping := func(ipAddr string) error {
return libnet.PingFromVMConsole(vmi, ipAddr, "-c 1", "-w 2")
}
getVirtHandlerPod := func() (*k8sv1.Pod, error) {
node := vmi.Status.NodeName
pod, err := kubecli.NewVirtHandlerClient(virtClient).Namespace(flags.KubeVirtInstallNamespace).ForNode(node).Pod()
if err != nil {
return nil, fmt.Errorf("failed to get virt-handler pod on node %s: %v", node, err)
}
return pod, nil
}
runMigrationAndExpectCompletion := func(migration *v1.VirtualMachineInstanceMigration, timeout int) {
migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration)
Expect(err).NotTo(HaveOccurred())
Eventually(func() error {
migration, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Get(migration.Name, &v13.GetOptions{})
if err != nil {
return err
}
Expect(migration.Status.Phase).ToNot(Equal(v1.MigrationFailed))
if migration.Status.Phase == v1.MigrationSucceeded {
return nil
}
return fmt.Errorf("Migration is in phase %s", migration.Status.Phase)
}, timeout, time.Second).Should(Succeed(), fmt.Sprintf("migration should succeed after %d s", timeout))
}
BeforeEach(func() {
tests.SkipIfMigrationIsNotPossible()
})
AfterEach(func() {
if vmi != nil {
By("Delete VMI")
Expect(virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &v13.DeleteOptions{})).To(Succeed())
Eventually(func() error {
_, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &v13.GetOptions{})
return err
}, time.Minute, time.Second).Should(
SatisfyAll(HaveOccurred(), WithTransform(errors.IsNotFound, BeTrue())),
"The VMI should be gone within the given timeout",
)
}
})
table.DescribeTable("[Conformance] preserves connectivity", func(ipFamily k8sv1.IPFamily, ports []v1.Port) {
if ipFamily == k8sv1.IPv6Protocol {
libnet.SkipWhenNotDualStackCluster(virtClient)
}
var err error
var loginMethod console.LoginToFactory
By("Create VMI")
if ipFamily == k8sv1.IPv4Protocol {
vmi = masqueradeVMI(ports, "")
loginMethod = console.LoginToCirros
} else {
vmi, err = fedoraMasqueradeVMI(ports, "")
Expect(err).ToNot(HaveOccurred(), "Error creating fedora masquerade vmi")
loginMethod = console.LoginToFedora
}
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(vmi, loginMethod)
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if ipFamily == k8sv1.IPv6Protocol {
err = configureIpv6(vmi, api.DefaultVMIpv6CIDR)
Expect(err).ToNot(HaveOccurred(), "failed to configure ipv6 on vmi")
}
virtHandlerPod, err := getVirtHandlerPod()
Expect(err).ToNot(HaveOccurred())
By("Check connectivity")
podIP := libnet.GetPodIpByFamily(virtHandlerPod, ipFamily)
Expect(ping(podIP)).To(Succeed())
By("Execute migration")
migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace)
runMigrationAndExpectCompletion(migration, tests.MigrationWaitTime)
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.Status.Phase).To(Equal(v1.Running))
Expect(ping(podIP)).To(Succeed())
By("Restarting the vmi")
Expect(console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: "sudo reboot\n"},
&expect.BExp{R: "reboot: Restarting system"},
}, 10)).To(Succeed(), "failed to restart the vmi")
tests.WaitUntilVMIReady(vmi, loginMethod)
if ipFamily == k8sv1.IPv6Protocol {
Expect(configureIpv6(vmi, api.DefaultVMIpv6CIDR)).To(Succeed(), "failed to configure ipv6 on vmi after restart")
}
Expect(ping(podIP)).To(Succeed())
},
table.Entry("IPv4", k8sv1.IPv4Protocol, []v1.Port{}),
table.Entry("IPv4 with explicit ports used by live migration", k8sv1.IPv4Protocol, portsUsedByLiveMigration()),
table.Entry("IPv6", k8sv1.IPv6Protocol, []v1.Port{}),
)
})
Context("MTU verification", func() {
var vmi *v1.VirtualMachineInstance
var anotherVmi *v1.VirtualMachineInstance
getMtu := func(pod *k8sv1.Pod, ifaceName string) int {
output, err := tests.ExecuteCommandOnPod(
virtClient,
pod,
"compute",
[]string{"cat", fmt.Sprintf("/sys/class/net/%s/mtu", ifaceName)},
)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
output = strings.TrimSuffix(output, "\n")
mtu, err := strconv.Atoi(output)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
return mtu
}
BeforeEach(func() {
var err error
By("Create masquerade VMI")
networkData, err := libnet.CreateDefaultCloudInitNetworkData()
Expect(err).NotTo(HaveOccurred())
vmi = libvmi.NewFedora(
libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()),
libvmi.WithNetwork(v1.DefaultPodNetwork()),
libvmi.WithCloudInitNoCloudNetworkData(networkData, false),
)
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
By("Create another VMI")
anotherVmi = masqueradeVMI([]v1.Port{}, "")
anotherVmi, err = virtClient.VirtualMachineInstance(anotherVmi.Namespace).Create(anotherVmi)
Expect(err).ToNot(HaveOccurred())
By("Wait for VMIs to be ready")
tests.WaitUntilVMIReady(anotherVmi, libnet.WithIPv6(console.LoginToCirros))
anotherVmi, err = virtClient.VirtualMachineInstance(anotherVmi.Namespace).Get(anotherVmi.Name, &v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(vmi, console.LoginToFedora)
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
if vmi != nil {
By("Delete VMI")
Expect(virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &v13.DeleteOptions{})).To(Succeed())
}
})
AfterEach(func() {
if anotherVmi != nil {
By("Delete another VMI")
Expect(virtClient.VirtualMachineInstance(anotherVmi.Namespace).Delete(anotherVmi.Name, &v13.DeleteOptions{})).To(Succeed())
}
})
table.DescribeTable("should have the correct MTU", func(ipFamily k8sv1.IPFamily) {
if ipFamily == k8sv1.IPv6Protocol {
libnet.SkipWhenNotDualStackCluster(virtClient)
}
By("checking k6t-eth0 MTU inside the pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace)
bridgeMtu := getMtu(vmiPod, "k6t-eth0")
primaryIfaceMtu := getMtu(vmiPod, "eth0")
Expect(bridgeMtu).To(Equal(primaryIfaceMtu), "k6t-eth0 bridge mtu should equal eth0 interface mtu")
By("checking the tap device - tap0 - MTU inside the pod")
tapDeviceMTU := getMtu(vmiPod, "tap0")
Expect(tapDeviceMTU).To(Equal(primaryIfaceMtu), "tap0 mtu should equal eth0 interface mtu")
By("checking eth0 MTU inside the VirtualMachineInstance")
showMtu := "cat /sys/class/net/eth0/mtu\n"
err = console.SafeExpectBatch(vmi, []expect.Batcher{
&expect.BSnd{S: showMtu},
&expect.BExp{R: console.RetValue(strconv.Itoa(bridgeMtu))},
}, 180)
Expect(err).ToNot(HaveOccurred())
By("checking the VirtualMachineInstance can send MTU sized frames to another VirtualMachineInstance")
icmpHeaderSize := 8
var ipHeaderSize int
if ipFamily == k8sv1.IPv4Protocol {
ipHeaderSize = 20
} else {
ipHeaderSize = 40
}
payloadSize := primaryIfaceMtu - ipHeaderSize - icmpHeaderSize
addr := libnet.GetVmiPrimaryIpByFamily(anotherVmi, ipFamily)
Expect(libnet.PingFromVMConsole(vmi, addr, "-c 1", "-w 5", fmt.Sprintf("-s %d", payloadSize), "-M do")).To(Succeed())
By("checking the VirtualMachineInstance cannot send bigger than MTU sized frames to another VirtualMachineInstance")
Expect(libnet.PingFromVMConsole(vmi, addr, "-c 1", "-w 5", fmt.Sprintf("-s %d", payloadSize+1), "-M do")).ToNot(Succeed())
},
table.Entry("IPv4", k8sv1.IPv4Protocol),
table.Entry("IPv6", k8sv1.IPv6Protocol),
)
})
})
Context("VirtualMachineInstance with TX offload disabled", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1781]should get turned off for interfaces that serve dhcp", func() {
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskAlpine), "#!/bin/bash\necho")
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceName("memory")] = resource.MustParse("1024M")
_, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(vmi, console.LoginToAlpine)
output := tests.RunCommandOnVmiPod(
vmi,
[]string{"/bin/bash", "-c", "/usr/sbin/ethtool -k k6t-eth0|grep tx-checksumming|awk '{ printf $2 }'"},
)
ExpectWithOffset(1, strings.TrimSpace(output)).To(Equal("off"))
})
})
Context("vmi with default bridge interface on pod network", func() {
BeforeEach(func() {
setBridgeEnabled(false)
})
AfterEach(func() {
setBridgeEnabled(true)
})
It("[test_id:2964]should reject VMIs with bridge interface when it's not permitted on pod network", func() {
var t int64 = 0
vmi := v1.NewMinimalVMIWithNS(util.NamespaceTestDefault, libvmi.RandName(libvmi.DefaultVmiName))
vmi.Spec.TerminationGracePeriodSeconds = &t
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("128Mi")
tests.AddEphemeralDisk(vmi, "disk0", "virtio", cd.ContainerDiskFor(cd.ContainerDiskCirros))
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi)
Expect(err.Error()).To(ContainSubstring("Bridge interface is not enabled in kubevirt-config"))
})
})
})
func waitUntilVMIReady(vmi *v1.VirtualMachineInstance, loginTo console.LoginToFactory) *v1.VirtualMachineInstance {
// Wait for VirtualMachineInstance start
tests.WaitForSuccessfulVMIStart(vmi)
virtClient, err := kubecli.GetKubevirtClient()
Expect(err).ToNot(HaveOccurred())
// Fetch the new VirtualMachineInstance with updated status
vmi, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Get(vmi.Name, &v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Lets make sure that the OS is up by waiting until we can login
Expect(loginTo(vmi)).To(Succeed())
return vmi
}
func NewRandomVMIWithInvalidNetworkInterface() *v1.VirtualMachineInstance {
// Use alpine because cirros dhcp client starts prematurely before link is ready
vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskAlpine))
tests.AddExplicitPodNetworkInterface(vmi)
vmi.Spec.Domain.Devices.Interfaces[0].Model = "gibberish"
return vmi
}
func createExpectConnectToServer(serverIP string, tcpPort int, expectSuccess bool) []expect.Batcher |
// gatewayIpFromCIDR returns the first address of a network.
func gatewayIPFromCIDR(cidr string) string {
ip, ipnet, _ := net.ParseCIDR(cidr)
ip = ip.Mask(ipnet.Mask)
oct := len(ip) - 1
ip[oct]++
return ip.String()
}
func vmiHasCustomMacAddress(vmi *v1.VirtualMachineInstance) bool {
return vmi.Spec.Domain.Devices.Interfaces != nil &&
vmi.Spec.Domain.Devices.Interfaces[0].MacAddress != ""
}
| {
expectResult := console.ShellFail
if expectSuccess {
expectResult = console.ShellSuccess
}
var clientCommand string
if netutils.IsIPv6String(serverIP) {
clientCommand = fmt.Sprintf("curl %s\n", net.JoinHostPort(serverIP, strconv.Itoa(tcpPort)))
} else {
clientCommand = fmt.Sprintf("echo test | nc %s %d -i 1 -w 1 1> /dev/null\n", serverIP, tcpPort)
}
return []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: clientCommand},
&expect.BExp{R: console.PromptExpression},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: expectResult},
}
} |
crypto.go | package crypto
import (
"bytes"
"crypto/ecdsa"
"crypto/rand"
"encoding/binary"
"errors"
"math/big"
"xfsgo/common"
"xfsgo/common/ahash"
"xfsgo/crypto/secp256k1"
)
const defaultKeyPackType = uint8(1)
const DefaultKeyPackVersion = uint8(1)
const DigestLength = 32
func GenPrvKey() (*ecdsa.PrivateKey, error) {
return ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)
}
func MustGenPrvKey() *ecdsa.PrivateKey {
key, err := GenPrvKey()
if err != nil {
print(err)
}
return key
}
func PubKeyEncode(p ecdsa.PublicKey) []byte {
if p.Curve == nil || p.X == nil || p.Y == nil {
return nil
}
xbs := p.X.Bytes()
ybs := p.Y.Bytes()
buf := make([]byte, len(xbs)+len(ybs))
copy(buf, append(xbs, ybs...))
return buf
}
func Checksum(payload []byte) []byte {
first := ahash.SHA256(payload)
second := ahash.SHA256(first)
return second[:common.AddrCheckSumLen]
}
func VerifyAddress(addr common.Address) bool {
want := Checksum(addr.Payload()) |
func DefaultPubKey2Addr(p ecdsa.PublicKey) common.Address {
return PubKey2Addr(common.DefaultAddressVersion, p)
}
func PubKey2Addr(version uint8, p ecdsa.PublicKey) common.Address {
pubEnc := PubKeyEncode(p)
pubHash256 := ahash.SHA256(pubEnc)
pubHash := ahash.Ripemd160(pubHash256)
payload := append([]byte{version}, pubHash...)
cs := Checksum(payload)
full := append(payload, cs...)
return common.Bytes2Address(full)
}
func EncodePrivateKey(version uint8, key *ecdsa.PrivateKey) []byte {
dbytes := key.D.Bytes()
curve := secp256k1.S256()
curveOrder := curve.Params().N
privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
for len(dbytes) > len(privateKey) {
if dbytes[0] != 0 {
return nil
}
dbytes = dbytes[1:]
}
copy(privateKey[len(privateKey)-len(dbytes):], dbytes)
buf := append([]byte{version, defaultKeyPackType}, privateKey...)
return buf
}
func DefaultEncodePrivateKey(key *ecdsa.PrivateKey) []byte {
return EncodePrivateKey(DefaultKeyPackVersion, key)
}
func DecodePrivateKey(bs []byte) (uint8, *ecdsa.PrivateKey, error) {
if len(bs) <= 2 {
return 0, nil, errors.New("unknown private key version")
}
version := bs[0]
keytype := bs[1]
payload := bs[2:]
priv := new(ecdsa.PrivateKey)
if keytype == 1 {
k := new(big.Int).SetBytes(payload)
curve := secp256k1.S256()
curveOrder := curve.Params().N
if k.Cmp(curveOrder) >= 0 {
return 0, nil, errors.New("invalid elliptic curve private key value")
}
priv.Curve = curve
priv.D = k
privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
for len(payload) > len(privateKey) {
if payload[0] != 0 {
return 0, nil, errors.New("invalid private key length")
}
payload = payload[1:]
}
// Some private keys remove all leading zeros, this is also invalid
// according to [SEC1] but since OpenSSL used to do this, we ignore
// this too.
copy(privateKey[len(privateKey)-len(payload):], payload)
priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
} else {
return 0, nil, errors.New("unknown private key encrypt type")
}
return version, priv, nil
}
func ByteHash256(raw []byte) common.Hash {
h := ahash.SHA256(raw)
return common.Bytes2Hash(h)
}
func CreateAddress(addrHash common.Hash, nonce uint64) common.Address {
var nonceBytes [8]byte
binary.LittleEndian.PutUint64(nonceBytes[:], nonce)
mix := append(addrHash[:], nonceBytes[:]...)
h := ahash.SHA256(mix)
return common.Bytes2Address(h)
} | got := addr.Checksum()
return bytes.Equal(want, got)
} |
single_node.rs | mod fixtures;
use fixtures::{init_log, MemRouter, MemVoteFactor, NodeId};
use std::sync::Arc;
use std::thread::sleep;
use std::time::Duration;
use tender::{InitialMode, Options, State};
/// Single-node initialization test.
#[test]
fn | () {
init_log();
let mem_router = Arc::new(MemRouter::new(1));
let node = NodeId::new(1, 1);
mem_router.new_node(node, MemVoteFactor::new(0));
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Startup, 0, None);
let options = Options::builder()
.election_timeout_min(1000)
.election_timeout_max(1100)
.heartbeat_interval(300)
.build()
.unwrap();
mem_router.update_node_options(node, options);
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Startup, 0, None);
mem_router.init_node(node, Vec::new(), InitialMode::Normal);
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Leader, 1, Some(node));
}
/// Single-node initialization test: observer.
#[test]
fn test_single_node_as_observer() {
init_log();
let mem_router = Arc::new(MemRouter::new(1));
let node = NodeId::new(1, 1);
mem_router.new_node(node, MemVoteFactor::new(0));
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Startup, 0, None);
let options = Options::builder()
.election_timeout_min(1000)
.election_timeout_max(1100)
.heartbeat_interval(300)
.build()
.unwrap();
mem_router.update_node_options(node, options);
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Startup, 0, None);
mem_router.init_node(node, Vec::new(), InitialMode::AsObserver);
sleep(Duration::from_secs(1));
mem_router.assert_node_state(node, State::Observer, 0, None);
}
| test_single_node |
update-message.dto.ts | import { PartialType } from '@nestjs/swagger';
import { CreateMessageDto } from './create-message.dto';
export class | extends PartialType(CreateMessageDto) {}
| UpdateMessageDto |
viz.py | """Functions to visualize outputs at different stages of GTSFM.
Authors: Ayush Baid
"""
import os
from typing import List, Optional, Tuple
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from gtsam import Pose3
from matplotlib.axes._axes import Axes
import gtsfm.utils.geometry_comparisons as comp_utils
import gtsfm.utils.images as image_utils
import gtsfm.utils.io as io_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.common.image import Image
from gtsfm.common.keypoints import Keypoints
COLOR_RED = (255, 0, 0)
COLOR_GREEN = (0, 255, 0)
def set_axes_equal(ax: Axes):
"""
Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one
possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Ref: https://github.com/borglab/gtsam/blob/develop/python/gtsam/utils/plot.py#L13
Args:
ax: axis for the plot.
"""
# get the min and max value for each of (x, y, z) axes as 3x2 matrix.
# This gives us the bounds of the minimum volume cuboid encapsulating all
# data.
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
# find the centroid of the cuboid
centroid = np.mean(limits, axis=1)
# pick the largest edge length for this cuboid
largest_edge_length = np.max(np.abs(limits[:, 1] - limits[:, 0]))
# set new limits to draw a cube using the largest edge length
radius = 0.5 * largest_edge_length
ax.set_xlim3d([centroid[0] - radius, centroid[0] + radius])
ax.set_ylim3d([centroid[1] - radius, centroid[1] + radius])
ax.set_zlim3d([centroid[2] - radius, centroid[2] + radius])
def draw_circle_cv2(image: Image, x: int, y: int, color: Tuple[int, int, int], circle_size: int = 10) -> Image:
"""Draw a solid circle on the image.
Args:
image: image to draw the circle on.
x: x coordinate of the center of the circle.
y: y coordinate of the center of the circle.
color: RGB color of the circle.
circle_size (optional): the size of the circle (in pixels). Defaults to 10.
Returns:
Image: image with the circle drawn on it.
"""
return Image(
cv.circle(image.value_array, center=(x, y), radius=circle_size, color=color, thickness=-1) # solid circle
)
def draw_line_cv2(
image: Image, x1: int, y1: int, x2: int, y2: int, line_color: Tuple[int, int, int], line_thickness: int = 10,
) -> Image:
"""Draw a line on the image from coordinates (x1, y1) to (x2, y2).
Args:
image: image to draw the line on.
x1: x coordinate of start of the line.
y1: y coordinate of start of the line.
x2: x coordinate of end of the line.
y2: y coordinate of end of the line.
line_color: color of the line.
line_thickness (optional): line thickness. Defaults to 10.
Returns:
Image: image with the line drawn on it.
"""
return Image(cv.line(image.value_array, (x1, y1), (x2, y2), line_color, line_thickness, cv.LINE_AA))
def plot_twoview_correspondences(
image_i1: Image,
image_i2: Image,
kps_i1: Keypoints,
kps_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
inlier_mask: Optional[np.ndarray] = None,
dot_color: Optional[Tuple[int, int, int]] = None,
max_corrs: Optional[int] = 50,
) -> Image:
"""Plot correspondences between two images as lines between two circles.
Args:
image_i1: first image.
image_i2: second image.
kps_i1: keypoints for image_i1.
kps_i2: keypoints for image_i2.
corr_idxs_i1i2: indices of correspondences between i1 and i2.
inlier_mask (optional): inlier mask for correspondences as boolean array. Defaults to None.
dot_color (optional): color for keypoints. Defaults to (0, 0, 0).
max_corrs (optional): max number of correspondences to plot. Defaults to 50.
Returns:
image visualizing correspondences between two images.
"""
image_i1, image_i2, scale_i1, scale_i2 = image_utils.match_image_widths(image_i1, image_i2)
result = image_utils.vstack_image_pair(image_i1, image_i2)
if max_corrs is not None and corr_idxs_i1i2.shape[0] > max_corrs:
# subsample matches
corr_idxs_i1i2 = corr_idxs_i1i2[np.random.choice(corr_idxs_i1i2.shape[0], max_corrs)]
for corr_idx in range(corr_idxs_i1i2.shape[0]):
# mark the points in both images as circles, and draw connecting line
idx_i1, idx_i2 = corr_idxs_i1i2[corr_idx]
x_i1 = (kps_i1.coordinates[idx_i1, 0] * scale_i1[0]).astype(np.int32)
y_i1 = (kps_i1.coordinates[idx_i1, 1] * scale_i1[1]).astype(np.int32)
x_i2 = (kps_i2.coordinates[idx_i2, 0] * scale_i2[0]).astype(np.int32)
y_i2 = (kps_i2.coordinates[idx_i2, 1] * scale_i2[1]).astype(np.int32) + image_i1.height
# drawing correspondences with optional inlier mask
if inlier_mask is None:
line_color = tuple([int(c) for c in np.random.randint(0, 255 + 1, 3)])
elif inlier_mask[corr_idx]:
line_color = COLOR_GREEN
else:
line_color = COLOR_RED
result = draw_line_cv2(result, x_i1, y_i1, x_i2, y_i2, line_color, line_thickness=2)
if dot_color is None:
dot_color = line_color
result = draw_circle_cv2(result, x_i1, y_i1, dot_color, circle_size=2)
result = draw_circle_cv2(result, x_i2, y_i2, dot_color, circle_size=2)
return result
def plot_sfm_data_3d(sfm_data: GtsfmData, ax: Axes, max_plot_radius: float = 50) -> None:
"""Plot the camera poses and landmarks in 3D matplotlib plot.
Args:
sfm_data: SfmData object with camera and tracks.
ax: axis to plot on.
max_plot_radius: maximum distance threshold away from any camera for which a point
will be plotted
"""
camera_poses = [sfm_data.get_camera(i).pose() for i in sfm_data.get_valid_camera_indices()]
plot_poses_3d(camera_poses, ax)
num_tracks = sfm_data.number_tracks()
# Restrict 3d points to some radius of camera poses
points_3d = np.array([list(sfm_data.get_track(j).point3()) for j in range(num_tracks)])
nearby_points_3d = comp_utils.get_points_within_radius_of_cameras(camera_poses, points_3d, max_plot_radius)
# plot 3D points
for landmark in nearby_points_3d:
ax.plot(landmark[0], landmark[1], landmark[2], "g.", markersize=1)
def plot_poses_3d(
wTi_list: List[Pose3], ax: Axes, center_marker_color: str = "k", label_name: Optional[str] = None
) -> None:
"""Plot poses in 3D as dots for centers and lines denoting the orthonormal
coordinate system for each camera.
Color convention: R -> x axis, G -> y axis, B -> z axis.
Args:
wTi_list: list of poses to plot.
ax: axis to plot on.
center_marker_color (optional): color for camera center marker. Defaults to "k".
name:
"""
spec = "{}.".format(center_marker_color)
for i, wTi in enumerate(wTi_list):
x, y, z = wTi.translation().squeeze()
if i > 0:
# for the first loop iteration, add the label to the plot
# for the rest of iterations, set label to None (otherwise would be duplicated in legend)
label_name = None
ax.plot(x, y, z, spec, markersize=10, label=label_name)
R = wTi.rotation().matrix()
# getting the direction of the coordinate system (x, y, z axes)
default_axis_length = 0.5
v1 = R[:, 0] * default_axis_length
v2 = R[:, 1] * default_axis_length
v3 = R[:, 2] * default_axis_length
ax.plot3D([x, x + v1[0]], [y, y + v1[1]], [z, z + v1[2]], c="r")
ax.plot3D([x, x + v2[0]], [y, y + v2[1]], [z, z + v2[2]], c="g")
ax.plot3D([x, x + v3[0]], [y, y + v3[1]], [z, z + v3[2]], c="b")
def plot_and_compare_poses_3d(wTi_list: List[Pose3], wTi_list_: List[Pose3]) -> None:
"""Plots two sets poses in 3D with different markers to compare.
The markers are colored black (k) and cyan (c) for the two lists.
Args:
wTi_list: first set of poses.
wTi_list_: second set of poses.
"""
fig = plt.figure()
ax = fig.gca(projection="3d")
plot_poses_3d(wTi_list, ax, center_marker_color="k")
plot_poses_3d(wTi_list_, ax, center_marker_color="c")
set_axes_equal(ax)
plt.show()
def save_twoview_correspondences_viz(
image_i1: Image,
image_i2: Image,
keypoints_i1: Keypoints,
keypoints_i2: Keypoints,
corr_idxs_i1i2: np.ndarray,
file_path: str,
) -> None:
"""Visualize correspondences between pairs of images.
Args:
image_i1: image #i1.
image_i2: image #i2.
keypoints_i1: detected Keypoints for image #i1.
keypoints_i2: detected Keypoints for image #i2.
corr_idxs_i1i2: correspondence indices.
file_path: file path to save the visualization.
"""
plot_img = plot_twoview_correspondences(image_i1, image_i2, keypoints_i1, keypoints_i2, corr_idxs_i1i2)
io_utils.save_image(plot_img, file_path)
def save_sfm_data_viz(sfm_data: GtsfmData, folder_name: str) -> None:
"""Visualize the camera poses and 3d points in SfmData.
Args:
sfm_data: data to visualize.
folder_name: folder to save the visualization at.
"""
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
plot_sfm_data_3d(sfm_data, ax)
set_axes_equal(ax)
# save the 3D plot in the original view
fig.savefig(os.path.join(folder_name, "3d.png"))
# save the BEV representation
default_camera_elevation = 100 # in metres above ground
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "bev.png"))
plt.close(fig)
def save_camera_poses_viz(
pre_ba_sfm_data: GtsfmData, post_ba_sfm_data: GtsfmData, gt_pose_graph: Optional[List[Pose3]], folder_name: str
) -> None:
"""Visualize the camera pose and save to disk.
Args:
pre_ba_sfm_data: data input to bundle adjustment.
post_ba_sfm_data: output of bundle adjustment.
gt_pose_graph: ground truth poses.
folder_name: folder to save the visualization at.
"""
# extract camera poses
pre_ba_poses = []
for i in pre_ba_sfm_data.get_valid_camera_indices():
pre_ba_poses.append(pre_ba_sfm_data.get_camera(i).pose())
post_ba_poses = []
for i in post_ba_sfm_data.get_valid_camera_indices():
post_ba_poses.append(post_ba_sfm_data.get_camera(i).pose())
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
if gt_pose_graph is not None:
plot_poses_3d(gt_pose_graph, ax, center_marker_color="m", label_name="GT")
plot_poses_3d(pre_ba_poses, ax, center_marker_color="c", label_name="Pre-BA")
plot_poses_3d(post_ba_poses, ax, center_marker_color="k", label_name="Post-BA")
ax.legend(loc="upper left")
set_axes_equal(ax)
| # save the BEV representation
default_camera_elevation = 100 # in metres above ground
ax.view_init(azim=0, elev=default_camera_elevation)
fig.savefig(os.path.join(folder_name, "poses_bev.png"))
plt.close(fig) | # save the 3D plot in the original view
fig.savefig(os.path.join(folder_name, "poses_3d.png"))
|
opt.rs | //! A series of optimisation passes for laser frames.
use crate::lerp::Lerp;
use crate::point::{Point, Position, RawPoint};
use hashbrown::{HashMap, HashSet};
use petgraph::visit::EdgeRef;
use petgraph::{Undirected};
/// Represents a line segment over which the laser scanner will travel.
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct Segment {
pub start: Point,
pub end: Point,
pub kind: SegmentKind,
}
/// describes whether a line segment between two points is blank or not.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum SegmentKind {
Blank,
Lit,
}
/// A type used to represent graph describing the points in a frame and how they are joined.
///
/// Only lit edges are represented in this representation.
pub type PointGraph = petgraph::Graph<Point, (), Undirected, u32>;
/// A type used to represent a graph of points that contains at least one euler circuit.
pub type EulerGraph = petgraph::Graph<Point, SegmentKind, Undirected, u32>;
/// A type used to represent a eulerian circuit through a eulerian graph.
pub type EulerCircuit = Vec<EdgeIndex>;
type EdgeIndex = petgraph::graph::EdgeIndex<u32>;
type NodeIndex = petgraph::graph::NodeIndex<u32>;
/// An iterator yielding all lit line segments.
#[derive(Clone)]
pub struct Segments<I> {
points: I,
last_point: Option<Point>,
}
/// Configuration options for eulerian circuit interpolation.
#[repr(C)]
#[derive(Clone, Debug, PartialEq)]
pub struct InterpolationConfig {
/// The minimum distance the interpolator can travel along an edge before a new point is
/// required.
pub distance_per_point: f32,
/// The number of points to insert at the end of a blank to account for light modulator delay.
pub blank_delay_points: u32,
/// The amount of delay to add based on the angle of the corner in radians.
pub radians_per_point: f32,
}
/// Parameters for the frame interpolator.
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct InterpolationConfigBuilder {
pub distance_per_point: Option<f32>,
pub blank_delay_points: Option<u32>,
pub radians_per_point: Option<f32>,
}
/// For the blank ab: `[a, a.blanked(), b.blanked(), (0..delay).map(|_| b.blanked())]`.
pub const BLANK_MIN_POINTS: u32 = 3;
impl InterpolationConfig {
/// The default distance the interpolator can travel before a new point is required.
pub const DEFAULT_DISTANCE_PER_POINT: f32 = 0.1;
/// The default number of points inserted for the end of each blank segment.
pub const DEFAULT_BLANK_DELAY_POINTS: u32 = 10;
/// The default radians per point of delay to reduce corner inertia.
pub const DEFAULT_RADIANS_PER_POINT: f32 = 0.6;
/// Start building a new `InterpolationConfig`.
pub fn start() -> InterpolationConfigBuilder {
InterpolationConfigBuilder::default()
}
}
impl InterpolationConfigBuilder {
/// The minimum distance the interpolator can travel along an edge before a new point is
/// required.
///
/// By default, this value is `InterpolationConfig::DEFAULT_DISTANCE_PER_POINT`.
pub fn distance_per_point(mut self, dpp: f32) -> Self {
self.distance_per_point = Some(dpp);
self
}
/// The number of points to insert at the end of a blank to account for light modulator delay.
///
/// By default, this value is `InterpolationConfig::DEFAULT_BLANK_DELAY_POINTS`.
pub fn blank_delay_points(mut self, points: u32) -> Self {
self.blank_delay_points = Some(points);
self
}
/// The amount of delay to add based on the angle of the corner in radians.
///
/// By default, this value is `InterpolationConfig::DEFAULT_RADIANS_PER_POINT`.
pub fn radians_per_point(mut self, radians: f32) -> Self {
self.radians_per_point = Some(radians);
self
}
/// Build the `InterpolationConfig`, falling back to defaults where necessary.
pub fn build(self) -> InterpolationConfig {
InterpolationConfig {
distance_per_point: self.distance_per_point
.unwrap_or(InterpolationConfig::DEFAULT_DISTANCE_PER_POINT),
blank_delay_points: self.blank_delay_points
.unwrap_or(InterpolationConfig::DEFAULT_BLANK_DELAY_POINTS),
radians_per_point: self.radians_per_point
.unwrap_or(InterpolationConfig::DEFAULT_RADIANS_PER_POINT),
}
}
}
impl Default for InterpolationConfig {
fn default() -> Self {
Self::start().build()
}
}
impl<I> Iterator for Segments<I>
where
I: Iterator<Item = Point>,
{
type Item = Segment;
fn next(&mut self) -> Option<Self::Item> {
while let Some(end) = self.points.next() {
let start = match self.last_point.replace(end) {
None => continue,
Some(last) => last,
};
// Skip duplicates.
let kind = if start.position == end.position {
if !start.is_blank() && !end.is_blank() {
SegmentKind::Lit
} else {
continue;
}
} else if start.is_blank() && end.is_blank() {
SegmentKind::Blank
} else {
SegmentKind::Lit
};
return Some(Segment { start, end, kind });
}
None
}
}
/// Create an iterator yielding segments from an iterator yielding points.
pub fn points_to_segments<I>(points: I) -> Segments<I::IntoIter>
where
I: IntoIterator<Item = Point>,
{
let points = points.into_iter();
let last_point = None;
Segments { points, last_point }
}
/// Convert the given laser frame vector segments to a graph of points.
pub fn segments_to_point_graph<I>(segments: I) -> PointGraph
where
I: IntoIterator<Item = Segment>,
{
// A hashable version of a `Point`, used for removing point duplicates during graph generation.
#[derive(Eq, Hash, PartialEq)]
struct HashPoint {
pos: [i32; 2],
rgb: [u32; 3],
}
struct Node {
ix: NodeIndex,
weight: u32,
}
impl From<Point> for HashPoint {
fn from(p: Point) -> Self {
let [px, py] = p.position;
let [pr, pg, pb] = p.color;
let x = (px * std::i16::MAX as f32) as i32;
let y = (py * std::i16::MAX as f32) as i32;
let r = (pr * std::u16::MAX as f32) as u32;
let g = (pg * std::u16::MAX as f32) as u32;
let b = (pb * std::u16::MAX as f32) as u32;
let pos = [x, y];
let rgb = [r, g, b];
HashPoint { pos, rgb }
}
}
let mut g = PointGraph::default();
let mut pt_to_id = HashMap::new();
// Build the graph.
for seg in segments {
match seg.kind {
SegmentKind::Blank => (),
SegmentKind::Lit => {
let ha = HashPoint::from(seg.start);
let hb = HashPoint::from(seg.end);
let na = {
let n = pt_to_id.entry(ha).or_insert_with(|| {
let ix = g.add_node(seg.start);
let weight = seg.start.weight;
Node { ix, weight }
});
n.weight = std::cmp::max(n.weight, seg.start.weight);
n.ix
};
let nb = {
let n = pt_to_id.entry(hb).or_insert_with(|| {
let ix = g.add_node(seg.end);
let weight = seg.end.weight;
Node { ix, weight }
});
n.weight = std::cmp::max(n.weight, seg.end.weight);
n.ix
};
if na == nb {
continue;
}
if g.find_edge(na, nb).is_none() {
g.add_edge(na, nb, ());
}
}
}
}
g
}
/// Convert a point graph to a euler graph.
///
/// This determines the minimum number of blank segments necessary to create a euler circuit
/// from the given point graph. A euler circuit is useful as it represents a graph that can be
/// drawn unicursally (one continuous path that covers all nodes while only traversing each edge
/// once).
pub fn point_graph_to_euler_graph(pg: &PointGraph) -> EulerGraph {
// Find the connected components.
let ccs = petgraph::algo::kosaraju_scc(pg);
// The indices of the connected components whose nodes all have an even degree.
let euler_components: hashbrown::HashSet<_> = ccs
.iter()
.enumerate()
.filter(|(_, cc)| cc.iter().all(|&n| pg.edges(n).count() % 2 == 0))
.map(|(i, _)| i)
.collect();
// Represents the nodes to be connected for a single component.
struct ToConnect {
// Connection to the previous component.
prev: NodeIndex,
// Consecutive connections within the component.
inner: Vec<NodeIndex>,
// Connection to the next component.
next: NodeIndex,
}
// Collect the free nodes from each connected component that are to be connected by blanks.
let mut to_connect = vec![];
for (i, cc) in ccs.iter().enumerate() {
if euler_components.contains(&i) {
// Take the first point.
let n = cc[0];
to_connect.push(ToConnect {
prev: n,
inner: vec![],
next: n,
});
} else {
let v: Vec<_> = cc
.iter()
.filter(|&&n| pg.edges(n).count() % 2 != 0)
.collect();
// If there's a single point, connect to itself.
if v.len() == 1 {
let p = *v[0];
let prev = p;
let inner = vec![];
let next = p;
to_connect.push(ToConnect { prev, inner, next });
continue;
// Otherwise convert to a euler component.
} else {
assert_eq!(
v.len() % 2,
0,
"expected even number of odd-degree nodes for non-Euler component",
);
let prev = *v[0];
let inner = v[1..v.len() - 1].iter().map(|&&n| n).collect();
let next = *v[v.len() - 1];
to_connect.push(ToConnect { prev, inner, next });
}
}
}
// Convert the `to_connect` Vec containing the nodes to be connected for each connected
// component to a `Vec` containing the pairs of nodes which will be directly connected.
let mut pairs = vec![];
let mut iter = to_connect.iter().enumerate().peekable();
while let Some((i, this)) = iter.next() {
for ch in this.inner.chunks(2) {
pairs.push((ch[0], ch[1]));
}
match iter.peek() {
Some((_, next)) => pairs.push((this.next, next.prev)),
None if i > 0 => pairs.push((this.next, to_connect[0].prev)),
None => match euler_components.contains(&0) {
// If there is only one component and it is euler, we are done.
true => (),
// If there is only one non-euler, connect it to itself.
false => pairs.push((this.next, this.prev)),
},
}
}
// Turn the graph into a euler graph by adding the blanks.
let mut eg = pg.map(|_n_ix, n| n.clone(), |_e_ix, _| SegmentKind::Lit);
for (na, nb) in pairs {
eg.add_edge(na, nb, SegmentKind::Blank);
}
eg
}
/// Given a Euler Graph describing the vector image to be drawn, return the optimal Euler Circuit
/// describing the path over which the laser should travel.
///
/// This is Hierholzer's Algorithm with the amendment that during traversal of each vertex the edge
/// with the closest angle to a straight line is always chosen.
pub fn euler_graph_to_euler_circuit(eg: &EulerGraph) -> EulerCircuit {
// If there is one or less nodes, there's no place for edges.
if eg.node_count() == 0 || eg.node_count() == 1 {
return vec![];
}
// Begin the traversals to build the circuit, starting at `v0`.
let start_n = eg
.node_indices()
.next()
.expect("expected at least two nodes, found none");
let mut visited: HashSet<EdgeIndex> = HashSet::new();
let mut visit_order: Vec<EdgeIndex> = vec![];
loop {
// Find a node in the visit order with untraversed edges, or pick one to begin if we're
// just starting. We will do a traversal from this node. Keep track of where in the
// existing `visit_order` we should merge this new traversal. If there are no nodes with
// untraversed edges, we are done.
let (merge_ix, n) = match visit_order.is_empty() {
true => (0, start_n),
false => {
match visit_order
.iter()
.map(|&e| eg.raw_edges()[e.index()].source())
.enumerate()
.find(|&(_i, n)| eg.edges(n).any(|e| !visited.contains(&e.id())))
{
Some(n) => n,
None => break,
}
}
};
let traversal = traverse_unvisited(n, eg, &mut visited);
let new_visit_order = visit_order
.iter()
.take(merge_ix)
.cloned()
.chain(traversal)
.chain(visit_order.iter().skip(merge_ix).cloned())
.collect();
visit_order = new_visit_order;
}
visit_order
}
// A traversal through unvisited edges of the graph starting from `n`.
//
// Traversal ends when `n` is reached again.
//
// The returned `Vec` contains the index of each edge traversed.
fn traverse_unvisited(
start: NodeIndex,
eg: &EulerGraph,
visited: &mut HashSet<EdgeIndex>,
) -> Vec<EdgeIndex> {
let mut n = start;
let mut traversal: Vec<EdgeIndex> = vec![];
loop {
// Find the straightest edge that hasn't yet been traversed.
let e_ref = {
let mut untraversed_edges = eg.edges(n).filter(|e_ref| !visited.contains(&e_ref.id()));
let init_e_ref = untraversed_edges
.next()
.expect("expected a strongly connected euler graph");
match traversal
.last()
.map(|e| eg.raw_edges()[e.index()].source())
.map(|n| eg[n].position)
{
// If this is the first edge in the traversal, use the first ref.
None => init_e_ref,
// Retrieve the three positions used to determine the angle.
Some(prev_source_p) => {
let source_p = eg[init_e_ref.source()].position;
let target_p = eg[init_e_ref.target()].position;
let init_dist = straight_angle_variance(prev_source_p, source_p, target_p);
let init = (init_e_ref, init_dist);
let (e_ref, _) = untraversed_edges.fold(init, |best, e_ref| {
let (_, best_dist) = best;
let target_p = eg[e_ref.target()].position;
let dist = straight_angle_variance(prev_source_p, source_p, target_p);
if dist < best_dist {
(e_ref, dist)
} else {
best
}
});
e_ref
}
}
};
// Add the edge into our visitation record.
let e = e_ref.id();
n = e_ref.target();
visited.insert(e);
traversal.push(e);
// If this edge brings us back to the start, we have finished this traversal.
if e_ref.target() == start {
break;
}
}
traversal
}
// Given an angle described by points a -> b -> c, return the variance from a straight angle in
// radians.
fn straight_angle_variance([ax, ay]: Position, [bx, by]: Position, [cx, cy]: Position) -> f32 {
let [ux, uy] = [bx - ax, by - ay];
let [vx, vy] = [cx - bx, cy - by];
let ur = uy.atan2(ux);
let vr = vy.atan2(vx);
let diff_rad = vr - ur;
// Convert the radians to the angular distance.
fn angular_dist(rad: f32) -> f32 {
let rad = rad.abs();
if rad > std::f32::consts::PI {
-rad + std::f32::consts::PI * 2.0
} else {
rad
}
}
angular_dist(diff_rad)
}
fn distance_squared(a: Position, b: Position) -> f32 {
let [ax, ay] = a;
let [bx, by] = b;
let [abx, aby] = [bx - ax, by - ay];
abx * abx + aby * aby
}
/// The number of points used per blank segment given the `blank_delay_points` from a config.
pub fn blank_segment_point_count(a_weight: u32, blank_delay_points: u32) -> u32 {
a_weight + BLANK_MIN_POINTS + blank_delay_points
}
/// Returns the points used to blank between two given lit points *a* and *b*.
pub fn blank_segment_points(
a: Point,
br: RawPoint,
blank_delay_points: u32,
) -> impl Iterator<Item = RawPoint> |
/// The number of points added at a lit corner given its angle and angular delay rate.
pub fn corner_point_count(rad: f32, corner_delay_radians_per_point: f32) -> u32 {
(rad / corner_delay_radians_per_point) as _
}
/// The minimum points for traversing a lit segment (not including end corner delays).
pub fn distance_min_point_count(dist: f32, min_distance_per_point: f32) -> u32 {
// There must be at least one point at the beginning of the line.
const MIN_COUNT: u32 = 1;
MIN_COUNT + (dist * min_distance_per_point) as u32
}
/// The minimum number of points used for a lit segment of the given distance and end angle.
///
/// `a_weight` refers to the weight of the point at the beginning of the segment.
pub fn lit_segment_min_point_count(
distance: f32,
end_corner_radians: f32,
distance_per_point: f32,
radians_per_point: f32,
a_weight: u32,
) -> u32 {
a_weight
+ corner_point_count(end_corner_radians, radians_per_point)
+ distance_min_point_count(distance, distance_per_point)
}
/// Returns the points that make up a lit segment between *a* and *b* including delay for the end
/// corner.
///
/// `excess_points` are distributed across the distance point count. This is used to allow the
/// interpolation process to evenly distribute left-over points across a frame.
pub fn lit_segment_points(
a: Point,
br: RawPoint,
corner_point_count: u32,
distance_min_point_count: u32,
excess_points: u32,
) -> impl Iterator<Item = RawPoint> {
let dist_point_count = distance_min_point_count + excess_points;
let weight_points = a.to_raw_weighted();
let ar = a.to_raw();
let dist_points = (0..dist_point_count).map(move |i| {
let lerp_amt = i as f32 / dist_point_count as f32;
ar.lerp(&br, lerp_amt)
});
let corner_points = (0..corner_point_count).map(move |_| br);
weight_points.chain(dist_points).chain(corner_points)
}
/// Interpolate the given `EulerCircuit` with the given configuration in order to produce a path
/// ready to be submitted to the DAC.
///
/// The interpolation process will attempt to generate `target_points` number of points along the
/// circuit, but may generate *more* points in the user's `InterpolationConfig` indicates that more
/// are required for interpolating the specified circuit.
///
/// Performs the following steps:
///
/// 1. Determine the minimum number of required points:
/// - 1 for each edge plus the 1 for the end.
/// - The number of points required for each edge.
/// - For lit edges:
/// - The distance of each edge accounting for minimum points per distance.
/// - The angular distance to the following lit edge (none if blank).
/// - For blank edges:
/// - The specified blank delay.
/// 2. If the total is greater than `target_points`, we're done. If not, goto 3.
/// 3. Determine a weight per lit edge based on the distance of each edge.
/// 4. Distribute the remaining points between each lit edge distance based on their weights.
///
/// **Panic!**s if the given graph is not actually a `EulerCircuit`.
pub fn interpolate_euler_circuit(
ec: &EulerCircuit,
eg: &EulerGraph,
target_points: u32,
conf: &InterpolationConfig,
) -> Vec<RawPoint> {
// Capture a profile of each edge to assist with interpolation.
#[derive(Debug)]
struct EdgeProfile {
a_weight: u32,
kind: EdgeProfileKind,
}
#[derive(Debug)]
enum EdgeProfileKind {
Blank,
Lit {
distance: f32,
end_corner: f32,
}
}
impl EdgeProfile {
// Create an `EdgeProfile` for the edge at the given index.
fn from_index(ix: usize, ec: &EulerCircuit, eg: &EulerGraph) -> Self {
let e = ec[ix];
let e_ref = &eg.raw_edges()[e.index()];
let a = eg[e_ref.source()];
let a_weight = a.weight;
let kind = match e_ref.weight {
SegmentKind::Blank => EdgeProfileKind::Blank,
SegmentKind::Lit => {
let a_pos = a.position;
let b_pos = eg[e_ref.target()].position;
let distance = distance_squared(a_pos, b_pos).sqrt();
let next_ix = (ix + 1) % ec.len();
let e_ref = &eg.raw_edges()[ec[next_ix].index()];
let c_pos = eg[e_ref.target()].position;
let end_corner = straight_angle_variance(a_pos, b_pos, c_pos);
EdgeProfileKind::Lit { distance, end_corner }
}
};
EdgeProfile { a_weight, kind }
}
fn is_lit(&self) -> bool {
match self.kind {
EdgeProfileKind::Lit { .. } => true,
EdgeProfileKind::Blank => false,
}
}
// The lit distance covered by this edge.
fn lit_distance(&self) -> f32 {
match self.kind {
EdgeProfileKind::Lit { distance, .. } => distance,
_ => 0.0,
}
}
// The minimum number of points required to draw the edge.
fn min_points(&self, conf: &InterpolationConfig) -> u32 {
match self.kind {
EdgeProfileKind::Blank => {
blank_segment_point_count(self.a_weight, conf.blank_delay_points)
}
EdgeProfileKind::Lit { distance, end_corner } => {
lit_segment_min_point_count(
distance,
end_corner,
conf.distance_per_point,
conf.radians_per_point,
self.a_weight,
)
}
}
}
// The points for this edge.
fn points(
&self,
e: EdgeIndex,
eg: &EulerGraph,
conf: &InterpolationConfig,
excess_points: u32,
) -> Vec<RawPoint> {
let e_ref = &eg.raw_edges()[e.index()];
let a = eg[e_ref.source()];
let b = eg[e_ref.target()];
match self.kind {
EdgeProfileKind::Blank => {
blank_segment_points(a, b.to_raw(), conf.blank_delay_points)
.collect()
}
EdgeProfileKind::Lit { end_corner, distance } => {
let dist_point_count =
distance_min_point_count(distance, conf.distance_per_point);
let corner_point_count =
corner_point_count(end_corner, conf.radians_per_point);
let br = b.to_raw();
lit_segment_points(a, br, corner_point_count, dist_point_count, excess_points)
.collect()
}
}
}
}
// If the circuit is empty, so is our path.
if ec.is_empty() || target_points == 0 {
return vec![];
}
// Create a profile of each edge containing useful information for interpolation.
let edge_profiles = (0..ec.len())
.map(|ix| EdgeProfile::from_index(ix, ec, eg))
.collect::<Vec<_>>();
// TODO: If the circuit doesn't contain any lit edges, what should we do?.
if !edge_profiles.iter().any(|ep| ep.is_lit()) {
return vec![];
}
// The minimum number of points required to display the image.
let min_points = edge_profiles.iter()
.map(|ep| ep.min_points(conf))
.fold(0, |acc, n| acc + n);
// The target number of points not counting the last to be added at the end.
let target_points_minus_last = target_points - 1;
// The excess points distributed across all edges.
let edge_excess_point_counts = if min_points < target_points_minus_last {
// A multiplier for determining excess points. This should be distributed across distance.
let excess_points = target_points_minus_last - min_points;
// The lit distance covered by each edge.
let edge_lit_dists = edge_profiles.iter()
.map(|ep| (ep.is_lit(), ep.lit_distance()))
.collect::<Vec<_>>();
// The total lit distance covered by the traversal.
let total_lit_dist = edge_lit_dists.iter().fold(0.0, |acc, &(_, d)| acc + d);
// Determine the weights for each edge based on distance.
let edge_weights: Vec<(bool, f32)> = match total_lit_dist <= std::f32::EPSILON {
true => {
// If there was no total distance, distribute evenly.
let n_lit_edges = edge_lit_dists.iter().filter(|&&(b, _)| b).count();
edge_lit_dists.iter()
.map(|&(is_lit, _)| (is_lit, 1.0 / n_lit_edges as f32))
.collect()
},
false => {
// Otherwise weight by distance.
edge_lit_dists.iter()
.map(|&(is_lit, dist)| (is_lit, dist / total_lit_dist))
.collect()
}
};
// Multiply the weight by the excess points. Track fractional error and distribute.
let mut v = Vec::with_capacity(ec.len());
let mut err = 0.0;
let mut count = 0;
for (is_lit, w) in edge_weights {
if !is_lit {
v.push(0);
continue;
}
let nf = w * excess_points as f32 + err;
err = nf.fract();
let n = nf as u32;
count += n;
v.push(n);
}
// Check for rounding error.
if count == (excess_points - 1) {
// Find first lit edge index.
let (i, _) = edge_profiles.iter()
.enumerate()
.find(|&(_, ep)| ep.is_lit())
.expect("expected at least one lit edge");
v[i] += 1;
count += 1;
}
// Sanity check that rounding errors have been handled.
debug_assert_eq!(count, excess_points);
v
} else {
vec![0; ec.len()]
};
// Collect all points.
let total_points = std::cmp::max(min_points, target_points);
let mut points = Vec::with_capacity(total_points as usize);
for elem in ec.iter().zip(&edge_profiles).zip(&edge_excess_point_counts) {
let ((&ix, ep), &excess) = elem;
points.extend(ep.points(ix, eg, conf, excess));
}
// Push the last point.
let last_point = eg[eg.raw_edges()[ec.last().unwrap().index()].target()];
points.push(last_point.to_raw());
// Sanity check that we generated at least `target_points`.
debug_assert!(points.len() >= target_points as usize);
points
}
#[cfg(test)]
mod test {
use crate::point::{Point, Position};
use hashbrown::HashSet;
use super::{euler_graph_to_euler_circuit, point_graph_to_euler_graph, points_to_segments,
segments_to_point_graph};
use super::{EulerGraph, PointGraph, SegmentKind};
fn graph_eq<N, E, Ty, Ix>(
a: &petgraph::Graph<N, E, Ty, Ix>,
b: &petgraph::Graph<N, E, Ty, Ix>,
) -> bool
where
N: PartialEq,
E: PartialEq,
Ty: petgraph::EdgeType,
Ix: petgraph::graph::IndexType + PartialEq,
{
let a_ns = a.raw_nodes().iter().map(|n| &n.weight);
let b_ns = b.raw_nodes().iter().map(|n| &n.weight);
let a_es = a.raw_edges().iter().map(|e| (e.source(), e.target(), &e.weight));
let b_es = b.raw_edges().iter().map(|e| (e.source(), e.target(), &e.weight));
a_ns.eq(b_ns) && a_es.eq(b_es)
}
fn is_euler_graph<N, E, Ty, Ix>(g: &petgraph::Graph<N, E, Ty, Ix>) -> bool
where
Ty: petgraph::EdgeType,
Ix: petgraph::graph::IndexType,
{
let even_degree = g.node_indices().all(|n| g.edges(n).count() % 2 == 0);
let strongly_connected = petgraph::algo::kosaraju_scc(g).len() == 1;
even_degree && strongly_connected
}
fn white_pt(position: Position) -> Point {
Point {
position,
color: [1.0; 3],
weight: 0,
}
}
fn blank_pt(position: Position) -> Point {
Point {
position,
color: [0.0; 3],
weight: 0,
}
}
fn square_pts() -> [Point; 5] {
let a = white_pt([-1.0, -1.0]);
let b = white_pt([-1.0, 1.0]);
let c = white_pt([1.0, 1.0]);
let d = white_pt([1.0, -1.0]);
[a, b, c, d, a]
}
fn two_vertical_lines_pts() -> [Point; 8] {
let a = [-1.0, -1.0];
let b = [-1.0, 1.0];
let c = [1.0, -1.0];
let d = [1.0, 1.0];
[
white_pt(a),
white_pt(b),
blank_pt(b),
blank_pt(c),
white_pt(c),
white_pt(d),
blank_pt(d),
blank_pt(a),
]
}
#[test]
fn test_points_to_point_graph_no_blanks() {
let pts = square_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let mut expected = PointGraph::default();
let na = expected.add_node(pts[0]);
let nb = expected.add_node(pts[1]);
let nc = expected.add_node(pts[2]);
let nd = expected.add_node(pts[3]);
expected.add_edge(na, nb, ());
expected.add_edge(nb, nc, ());
expected.add_edge(nc, nd, ());
expected.add_edge(nd, na, ());
assert!(graph_eq(&pg, &expected));
}
#[test]
fn test_points_to_point_graph_with_blanks() {
let pts = two_vertical_lines_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let mut expected = PointGraph::default();
let na = expected.add_node(pts[0]);
let nb = expected.add_node(pts[1]);
let nc = expected.add_node(pts[4]);
let nd = expected.add_node(pts[5]);
expected.add_edge(na, nb, ());
expected.add_edge(nc, nd, ());
assert!(graph_eq(&pg, &expected));
}
#[test]
fn test_point_graph_to_euler_graph_no_blanks() {
let pts = square_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&pg);
let mut expected = EulerGraph::default();
let na = expected.add_node(pts[0]);
let nb = expected.add_node(pts[1]);
let nc = expected.add_node(pts[2]);
let nd = expected.add_node(pts[3]);
expected.add_edge(na, nb, SegmentKind::Lit);
expected.add_edge(nb, nc, SegmentKind::Lit);
expected.add_edge(nc, nd, SegmentKind::Lit);
expected.add_edge(nd, na, SegmentKind::Lit);
assert!(graph_eq(&eg, &expected));
}
#[test]
fn test_point_graph_to_euler_graph_with_blanks() {
let pts = two_vertical_lines_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&pg);
assert!(is_euler_graph(&eg));
let pg_ns: Vec<_> = pg.raw_nodes().iter().map(|n| n.weight).collect();
let eg_ns: Vec<_> = eg.raw_nodes().iter().map(|n| n.weight).collect();
assert_eq!(pg_ns, eg_ns);
assert_eq!(eg.raw_edges().iter().filter(|e| e.weight == SegmentKind::Blank).count(), 2);
assert_eq!(eg.raw_edges().iter().filter(|e| e.weight == SegmentKind::Lit).count(), 2);
}
#[test]
fn test_euler_graph_to_euler_circuit_no_blanks() {
let pts = square_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&pg);
let ec = euler_graph_to_euler_circuit(&eg);
let mut ns = eg.node_indices();
let na = ns.next().unwrap();
let nb = ns.next().unwrap();
let nc = ns.next().unwrap();
let nd = ns.next().unwrap();
let expected = vec![
eg.find_edge(na, nb).unwrap(),
eg.find_edge(nb, nc).unwrap(),
eg.find_edge(nc, nd).unwrap(),
eg.find_edge(nd, na).unwrap(),
];
assert_eq!(ec, expected);
}
#[test]
fn test_euler_graph_to_euler_circuit_with_blanks() {
let pts = two_vertical_lines_pts();
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&pg);
let ec = euler_graph_to_euler_circuit(&eg);
assert_eq!(ec.len(), eg.edge_count());
let mut visited = HashSet::new();
let mut walk = ec.iter().cycle().map(|&e| (e, &eg.raw_edges()[e.index()]));
while visited.len() < 4 {
let (e_id, _) = walk.next().unwrap();
assert!(visited.insert(e_id));
}
}
#[test]
fn test_euler_circuit_duplicate_points() {
let pts = [white_pt([0., 0.]), white_pt([0., 1.]), white_pt([0., 1.])];
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&dbg!(pg));
let _ = euler_graph_to_euler_circuit(&dbg!(eg));
}
#[test]
fn test_single_point() {
let pts = [white_pt([0., 0.]), white_pt([0., 0.])];
let segs = points_to_segments(pts.iter().cloned());
let pg = segments_to_point_graph(segs);
let eg = point_graph_to_euler_graph(&dbg!(pg));
let ec = euler_graph_to_euler_circuit(&dbg!(eg));
assert_eq!(ec.len(), 0);
}
}
| {
let ar = a.to_raw();
Some(ar)
.into_iter()
.chain(a.to_raw_weighted())
.chain(Some(ar.blanked()))
.chain(Some(br.blanked()))
.chain((0..blank_delay_points).map(move |_| br.blanked()))
} |
testDataSorted.js | const data = [
{
user: 'tom',
score: 31,
},
{
user: 'bra',
score: 30,
},
{
user: 'joe',
score: 27,
},
{
user: 'gra',
score: 24,
},
{
user: 'but',
score: 21,
},
{
user: 'bom',
score: 19,
},
{
user: 'age',
score: 16,
},
{
user: 'ruf',
score: 10,
},
{
user: 'ni',
score: 5,
},
]; | export default data; | |
lang_steam.go |
import (
"os"
"github.com/hajimehoshi/go-steamworks"
"golang.org/x/text/language"
)
const appID = 1710390
func init() {
if steamworks.RestartAppIfNecessary(appID) {
os.Exit(1)
}
if !steamworks.Init() {
panic("steamworks.Init failed")
}
}
func SystemLang() language.Tag {
switch steamworks.SteamApps().GetCurrentGameLanguage() {
case "english":
return language.English
case "japanese":
return language.Japanese
}
return language.English
} | //go:build !js && steam
// +build !js,steam
package lang |
|
loader.py | from dictknife import loading
from dictknife.jsonknife import get_resolver
from dictknife import DictWalker
def run(*, filename):
def onload(d, resolver, w=DictWalker(["$include"])):
for _, sd, in w.walk(d):
subresolver, jsref = resolver.resolve(sd.pop("$include"))
sd.update(subresolver.access_by_json_pointer(jsref))
resolver = get_resolver(filename, onload=onload)
loading.dumpfile(resolver.doc)
def main():
|
if __name__ == "__main__":
main()
| import argparse
# import logging
loading.setup()
# logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("filename")
args = parser.parse_args()
return run(**vars(args)) |
cli.py | """This module defines functions and globals required for the
command line interface of browser-history."""
import sys
import argparse
from browser_history import get_history, generic, browsers, utils
# get list of all implemented browser by finding subclasses of generic.Browser
AVAILABLE_BROWSERS = ', '.join(b.__name__ for b in generic.Browser.__subclasses__())
AVAILABLE_FORMATS = ', '.join(generic.Outputs.formats)
def make_parser():
"""Creates an ArgumentParser, configures and returns it.
This was made into a separate function to be used with sphinx-argparse
:rtype: :py:class:`argparse.ArgumentParser`
"""
parser_ = argparse.ArgumentParser(description='''
A tool to retrieve history from
(almost) any browser on (almost) any platform''',
epilog='''
Checkout the GitHub repo https://github.com/pesos/browser-history
if you have any issues or want to help contribute''')
parser_.add_argument('-b', '--browser',
default='all',
help=f'''
browser to retrieve history from. Should be one of all, {AVAILABLE_BROWSERS}.
Default is all (gets history from all browsers).''')
parser_.add_argument('-f', '--format',
default="csv",
help=f'''
Format to be used in output. Should be one of {AVAILABLE_FORMATS}.
Default is csv''')
parser_.add_argument('-o', '--output',
default=None,
help='''
File where output is to be written.
If not provided standard output is used.''')
return parser_
parser = make_parser()
def main():
"""Entrypoint to the command-line interface (CLI) of browser-history.
It parses arguments from sys.argv and performs the appropriate actions.
"""
args = parser.parse_args()
if args.browser == 'all':
outputs = get_history()
else:
try:
# gets browser class by name (string).
selected_browser = args.browser
for browser in generic.Browser.__subclasses__(): | if browser.__name__.lower() == args.browser.lower():
selected_browser = browser.__name__
break
browser_class = getattr(browsers, selected_browser)
except AttributeError:
utils.logger.error('Browser %s is unavailable. Check --help for available browsers',
args.browser)
sys.exit(1)
try:
browser = browser_class().fetch()
outputs = browser
except AssertionError as e:
utils.logger.error(e)
sys.exit(1)
# Format the output
try:
formatted = outputs.formatted(args.format)
except ValueError as e:
utils.logger.error(e)
sys.exit(1)
if args.output is None:
print(formatted)
else:
filename = args.output
with open(filename, 'w') as output_file:
output_file.write(formatted) | |
main.go | package main
import (
"context"
"fmt"
"github.com/esqimo/go-onfido"
)
func | () {
ctx := context.Background()
client, err := onfido.NewClientFromEnv()
if err != nil {
panic(err)
}
if client.Token().Prod() {
panic("onfido token is only for production use")
}
applicant, err := client.CreateApplicant(ctx, onfido.Applicant{
Email: "[email protected]",
FirstName: "Rob",
LastName: "Crowe",
Address: onfido.Address{
BuildingNumber: "18",
Street: "Wind Corner",
Town: "Crawley",
State: "West Sussex",
Postcode: "NW9 5AB",
Country: "GBR",
StartDate: "2018-02-10",
},
})
if err != nil {
panic(err)
}
t, err := client.NewSdkTokenWeb(ctx, applicant.ID, "https://*.onfido.com/documentation/*")
if err != nil {
panic(err)
}
fmt.Printf("Token: %v\n", t.Token)
if err := client.DeleteApplicant(ctx, applicant.ID); err != nil {
panic(err)
}
}
| main |
overloaded-deref-count.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
use std::cell::Cell;
use std::ops::{Deref, DerefMut};
use std::vec::Vec;
struct DerefCounter<T> {
count_imm: Cell<usize>,
count_mut: usize,
value: T
}
impl<T> DerefCounter<T> {
fn new(value: T) -> DerefCounter<T> {
DerefCounter {
count_imm: Cell::new(0),
count_mut: 0,
value: value
}
}
fn counts(&self) -> (usize, usize) {
(self.count_imm.get(), self.count_mut)
}
}
impl<T> Deref for DerefCounter<T> {
type Target = T;
fn deref(&self) -> &T {
self.count_imm.set(self.count_imm.get() + 1);
&self.value
}
}
impl<T> DerefMut for DerefCounter<T> {
fn | (&mut self) -> &mut T {
self.count_mut += 1;
&mut self.value
}
}
pub fn main() {
let mut n = DerefCounter::new(0);
let mut v = DerefCounter::new(Vec::new());
let _ = *n; // Immutable deref + copy a POD.
assert_eq!(n.counts(), (1, 0));
let _ = (&*n, &*v); // Immutable deref + borrow.
assert_eq!(n.counts(), (2, 0)); assert_eq!(v.counts(), (1, 0));
let _ = (&mut *n, &mut *v); // Mutable deref + mutable borrow.
assert_eq!(n.counts(), (2, 1)); assert_eq!(v.counts(), (1, 1));
let mut v2 = Vec::new();
v2.push(1);
*n = 5; *v = v2; // Mutable deref + assignment.
assert_eq!(n.counts(), (2, 2)); assert_eq!(v.counts(), (1, 2));
*n -= 3; // Mutable deref + assignment with binary operation.
assert_eq!(n.counts(), (2, 3));
// Immutable deref used for calling a method taking &self. (The
// typechecker is smarter now about doing this.)
(*n).to_string();
assert_eq!(n.counts(), (3, 3));
// Mutable deref used for calling a method taking &mut self.
(*v).push(2);
assert_eq!(v.counts(), (1, 3));
// Check the final states.
assert_eq!(*n, 2);
let expected: &[_] = &[1, 2];
assert_eq!((*v), expected);
}
| deref_mut |
main.rs | #![cfg_attr(target_arch = "wasm32", feature(async_closure))]
#![allow(unused_imports)]
#![allow(unreachable_code)]
use anyhow::{anyhow, Result};
use raw_window_handle::HasRawWindowHandle;
use wgpu::SurfaceError;
use winit::event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent};
use winit::event_loop::{ControlFlow, EventLoop};
use winit::window::WindowBuilder;
use wolf::render::graphics_device::WindowInfo;
use wolf::system::os::sigslot::SigSlot;
use wolf::{
render::{graphics_device::GraphicsDevice, scene::IScene},
system::{chrono::gametime::GameTime, os::runtime::RunTime, script::rhai::Rhai},
w_log,
};
#[cfg(target_arch = "wasm32")] | x + y
}
async fn test() {
let mut script = Rhai::new();
// register add function for our embedded script
script.register_function("add", add);
let res = script.run_return_any::<i64>(r#"add(10, 7)"#);
match res {
Ok(v) => {
w_log!("add returns: {}", v);
}
Err(e) => {
w_log!("add returns error: {:?}", e);
}
};
#[cfg(not(target_arch = "wasm32"))]
{
let f = async move {
println!("t1 started");
RunTime::sleep(std::time::Duration::from_secs(1));
w_log!("t1 just stopped after 2 seconds");
};
// execute thread
RunTime::green_thread(f).await;
RunTime::async_sleep(std::time::Duration::from_secs(2)).await;
}
#[cfg(target_arch = "wasm32")]
{
let f1 = async move {
let js = JavaScript::new(None);
let _js_res = js
.execute(
"
console.log(\"hello from javascript promise\");
const sub = (a, b) => new Promise(resolve => {
setTimeout(() => resolve(a - b), 1000);
});
return await sub(1, 2);
"
.to_owned(),
true,
)
.await;
};
RunTime::spawn_local(f1);
#[cfg(target_arch = "wasm32")]
let f2 = FnOnce!(async move || {
w_log!("t1 worker started");
RunTime::async_sleep(std::time::Duration::from_secs(2)).await;
w_log!("t1 worker just stopped after 5 seconds");
Ok(JsValue::null())
});
// execute thread
RunTime::thread(f2);
}
// create SigSlot
let mut sig_slot = SigSlot::new();
// create slots
let i = 1;
let con_1 = sig_slot.connect(move || {
w_log!("hello from slot{}", i);
});
let con_2 = sig_slot.connect(|| {
w_log!("hello from slot2");
});
// check for connections
if con_1.is_connected() && con_2.is_connected() {
w_log!("slot 1 & 2 was connected");
// disconnect slot 2
con_2.disconnect();
w_log!("slot 2 just disconnected");
}
// wait for threads
RunTime::async_sleep(std::time::Duration::from_secs(1)).await;
// emit all
sig_slot.emit();
}
#[derive(Default)]
struct WScene {}
impl IScene for WScene {
fn load(&self, _p_g_device: &mut GraphicsDevice) -> Result<()> {
w_log!("scene is going to loaded");
RunTime::spawn_local(async move {
test().await;
});
w_log!("scene just loaded");
Ok(())
}
fn render(
&self,
p_gdevice: &mut GraphicsDevice,
p_gametime: &mut GameTime,
) -> std::result::Result<(), wgpu::SurfaceError> {
w_log!(
"scene is rendering {}",
p_gametime.get_total_elapsed_seconds()
);
// get output from surface
let output_res = if let Some(surf) = &p_gdevice.surface {
surf.get_current_texture()
} else {
w_log!("surface is None, make sure use render_to_texture function for offscreen rendering mode");
Err(SurfaceError::Outdated)
};
let output = output_res?;
// create texture view
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
// create command encoder
let mut cmd_encoder =
p_gdevice
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
// execute command
{
let _render_pass = cmd_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
}
// submit to the queue
p_gdevice
.queue
.submit(std::iter::once(cmd_encoder.finish()));
// send to output
output.present();
Ok(())
}
}
async fn run<I>(p_scene: I) -> Result<()>
where
I: IScene + Sync + 'static,
{
const TRACE: &str = "WSceneManager::run";
// create a window
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.build(&event_loop)
.map_err(|e| anyhow!("could not create window {:?}", e))?;
#[cfg(target_arch = "wasm32")]
{
// Winit prevents sizing with CSS, so we have to set the size manually when on web.
use winit::dpi::PhysicalSize;
window.set_inner_size(PhysicalSize::new(800, 600));
use winit::platform::web::WindowExtWebSys;
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| {
let dst = doc.get_element_by_id("wolf")?;
let canvas = web_sys::Element::from(window.canvas());
dst.append_child(&canvas).ok()?;
Some(())
})
.expect("couldn't append canvas to document body.");
}
let size = window.inner_size();
let win_info = WindowInfo {
handle: window.raw_window_handle(),
width: size.width,
height: size.height,
};
// create a graphics device
let mut g_device = GraphicsDevice::new(Some(win_info)).await?;
let mut game_time = GameTime::new();
// load scene
let load_res = p_scene.load(&mut g_device);
match load_res {
Ok(_) => {}
Err(e) => {
w_log!("could not load scene because {:?} trace info: {}", e, TRACE);
}
}
// create an event loop
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested
| WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
},
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
Event::RedrawRequested(window_id) if window_id == window.id() => {
// tick game time
game_time.tick();
// render scene
let render_res = p_scene.render(&mut g_device, &mut game_time);
match render_res {
Ok(()) => {
//Success
}
// Reconfigure the surface if lost
Err(SurfaceError::Lost) => {
g_device.resize(g_device.get_size());
}
// The system is out of memory, we should probably quit
Err(SurfaceError::OutOfMemory) => {
*control_flow = ControlFlow::Exit;
}
// All other errors (Outdated, Timeout) should be resolved by the next frame
Err(e) => {
w_log!("render failed because of {:?}", e);
}
};
}
Event::RedrawEventsCleared => {
// RedrawRequested will only trigger once, unless we manually, request it.
window.request_redraw();
}
_ => {}
});
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub fn main() {
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// run scene
w_log!("starting wolf-demo in wasm mode");
RunTime::spawn_local(async move {
let _res = run(WScene::default()).await;
});
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::main]
pub async fn main() {
// run scene
w_log!("starting wolf-demo in native mode");
let _res = run(WScene::default()).await;
} | use {wasm_bindgen::prelude::*, wasm_mt::prelude::*, wolf::system::script::javascript::JavaScript};
// Normal function
fn add(x: i64, y: i64) -> i64 { |
cursor_list.rs | //! A generic cursor implementation merging multiple cursors.
use super::Cursor;
/// Provides a cursor interface over a list of cursors.
///
/// The `CursorList` tracks the indices of cursors with the minimum key, and the the indices of cursors with
/// the minimum key and minimum value. It performs no clever management of these sets otherwise.
#[derive(Debug)]
pub struct CursorList<K, V, T, R, C: Cursor<K, V, T, R>> {
_phantom: ::std::marker::PhantomData<(K, V, T, R)>,
cursors: Vec<C>,
min_key: Vec<usize>,
min_val: Vec<usize>,
}
impl<K, V, T, R, C: Cursor<K, V, T, R>> CursorList<K, V, T, R, C> where K: Ord, V: Ord {
/// Creates a new cursor list from pre-existing cursors.
pub fn new(cursors: Vec<C>, storage: &[C::Storage]) -> Self {
let mut result = CursorList {
_phantom: ::std::marker::PhantomData,
cursors,
min_key: Vec::new(),
min_val: Vec::new(),
};
result.minimize_keys(storage);
result
}
// Initialize min_key with the indices of cursors with the minimum key.
//
// This method scans the current keys of each cursor, and tracks the indices
// of cursors whose key equals the minimum valid key seen so far. As it goes,
// if it observes an improved key it clears the current list, updates the
// minimum key, and continues.
//
// Once finished, it invokes `minimize_vals()` to ensure the value cursor is
// in a consistent state as well.
fn minimize_keys(&mut self, storage: &[C::Storage]) {
self.min_key.clear();
// Determine the index of the cursor with minimum key.
let mut min_key_opt: Option<&K> = None;
for (index, cursor) in self.cursors.iter().enumerate() {
let key = cursor.get_key(&storage[index]);
if key.is_some() {
if min_key_opt.is_none() || key.lt(&min_key_opt) {
min_key_opt = key;
self.min_key.clear();
}
if key.eq(&min_key_opt) {
self.min_key.push(index);
}
}
}
self.minimize_vals(storage);
}
// Initialize min_val with the indices of minimum key cursors with the minimum value.
//
// This method scans the current values of cursor with minimum keys, and tracks the
// indices of cursors whose value equals the minimum valid value seen so far. As it
// goes, if it observes an improved value it clears the current list, updates the minimum
// value, and continues.
fn minimize_vals(&mut self, storage: &[C::Storage]) {
self.min_val.clear();
// Determine the index of the cursor with minimum value.
let mut min_val: Option<&V> = None;
for &index in self.min_key.iter() {
let val = self.cursors[index].get_val(&storage[index]);
if val.is_some() {
if min_val.is_none() || val.lt(&min_val) {
min_val = val;
self.min_val.clear();
}
if val.eq(&min_val) {
self.min_val.push(index);
}
}
}
}
}
impl<K, V, T, R, C: Cursor<K, V, T, R>> Cursor<K, V, T, R> for CursorList<K, V, T, R, C>
where
K: Ord,
V: Ord {
type Storage = Vec<C::Storage>;
// validation methods
#[inline(always)]
fn key_valid(&self, _storage: &Self::Storage) -> bool { !self.min_key.is_empty() }
#[inline(always)]
fn val_valid(&self, _storage: &Self::Storage) -> bool { !self.min_val.is_empty() }
// accessors
#[inline(always)]
fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K {
debug_assert!(self.key_valid(storage));
debug_assert!(self.cursors[self.min_key[0]].key_valid(&storage[self.min_key[0]]));
self.cursors[self.min_key[0]].key(&storage[self.min_key[0]])
}
#[inline(always)]
fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V {
debug_assert!(self.key_valid(storage));
debug_assert!(self.val_valid(storage));
debug_assert!(self.cursors[self.min_val[0]].val_valid(&storage[self.min_val[0]]));
self.cursors[self.min_val[0]].val(&storage[self.min_val[0]])
}
#[inline(always)]
fn map_times<L: FnMut(&T, R)>(&mut self, storage: &Self::Storage, mut logic: L) {
for &index in self.min_val.iter() {
self.cursors[index].map_times(&storage[index], |t,d| logic(t,d));
}
}
// key methods
#[inline(always)]
fn step_key(&mut self, storage: &Self::Storage) {
for &index in self.min_key.iter() {
self.cursors[index].step_key(&storage[index]);
}
self.minimize_keys(storage);
}
#[inline(always)]
fn seek_key(&mut self, storage: &Self::Storage, key: &K) {
for index in 0 .. self.cursors.len() {
self.cursors[index].seek_key(&storage[index], key);
}
self.minimize_keys(storage);
}
// value methods
#[inline(always)]
fn step_val(&mut self, storage: &Self::Storage) {
for &index in self.min_val.iter() {
self.cursors[index].step_val(&storage[index]);
}
self.minimize_vals(storage);
}
#[inline(always)]
fn seek_val(&mut self, storage: &Self::Storage, val: &V) {
for &index in self.min_key.iter() {
self.cursors[index].seek_val(&storage[index], val);
}
self.minimize_vals(storage);
}
// rewinding methods
#[inline(always)]
fn rewind_keys(&mut self, storage: &Self::Storage) {
for index in 0 .. self.cursors.len() {
self.cursors[index].rewind_keys(&storage[index]);
}
self.minimize_keys(storage);
}
#[inline(always)]
fn rewind_vals(&mut self, storage: &Self::Storage) |
} | {
for &index in self.min_key.iter() {
self.cursors[index].rewind_vals(&storage[index]);
}
self.minimize_vals(storage);
} |
Emoji_1f60b.js | 'use babel';
import React from 'react'
import IconBase from './IconBase' |
export default function Emoji_1f60b(props) {
return (
<IconBase viewBox="0 0 64 64" {...props}>
<g><path fill="#FFDD67" d="M62 31.996C62 48.566 48.566 62 32 62 15.432 62 2 48.566 2 31.996 2 15.434 15.432 2 32 2c16.566 0 30 13.428 30 29.996z"/><path fill="#FF717F" d="M42.004 47.881c-5.027-5.027-2.777-2.779 2.525-8.082 5.307-5.301 3.055-7.553 8.082-2.527 5.023 5.027 5.174 9.926 2.244 12.855-2.929 2.928-7.826 2.779-12.851-2.246z"/><path fill="#E2596C" d="M45.59 38.738l6.849 8.973-8.97-6.852z"/><g fill="#664E27"><path d="M28.526 24.854c-1.859-5.111-4.66-7.669-7.461-7.669s-5.602 2.558-7.461 7.669c-.184.515.775 1.443 1.254.938 1.803-1.901 3.957-2.658 6.207-2.658s4.404.757 6.207 2.658c.479.506 1.438-.423 1.254-.938zM50.396 24.854c-1.859-5.111-4.66-7.669-7.461-7.669s-5.602 2.558-7.461 7.669c-.184.515.775 1.443 1.254.938 1.803-1.901 3.957-2.658 6.207-2.658s4.404.757 6.207 2.658c.478.506 1.437-.423 1.254-.938z"/></g><path fill="#664E27" d="M48.11 33.02c-4.328 6.107-9.451 7.644-16.11 7.644s-11.782-1.536-16.11-7.644c-.604-.85-2.19-.315-1.84.919 2.273 8.005 10 12.668 17.95 12.668s15.677-4.663 17.95-12.668c.351-1.235-1.236-1.769-1.84-.919z"/></g>
</IconBase>
);
} | |
confirm-modal.component.ts | /*
* Copyright 2015 - 2021 TU Dortmund
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Component, Input } from '@angular/core';
import { NgbActiveModal } from '@ng-bootstrap/ng-bootstrap';
@Component({
selector: 'confirm-modal',
templateUrl: './confirm-modal.component.html'
})
export class | {
/** The text message to display. */
@Input()
text: string;
constructor(public modal: NgbActiveModal) {
}
}
| ConfirmModalComponent |
build_client.go | package bosh
import (
"github.com/cloudfoundry-incubator/bosh-backup-and-restore/instance"
"github.com/cloudfoundry-incubator/bosh-backup-and-restore/ssh"
"github.com/cloudfoundry/bosh-cli/director"
"github.com/pkg/errors"
boshuaa "github.com/cloudfoundry/bosh-cli/uaa"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
)
func BuildClient(targetUrl, username, password, caCert string, logger boshlog.Logger) (BoshClient, error) {
config, err := director.NewConfigFromURL(targetUrl)
if err != nil {
return nil, errors.Errorf("invalid bosh URL - %s", err.Error())
}
config.CACert = caCert
directorFactory := director.NewFactory(logger)
info, err := getDirectorInfo(directorFactory, config)
if err != nil {
return nil, err
}
if info.Auth.Type == "uaa" {
uaa, err := buildUaa(info, username, password, caCert, logger)
if err != nil {
return nil, err
}
config.TokenFunc = boshuaa.NewClientTokenSession(uaa).TokenFunc
} else {
config.Client = username
config.ClientSecret = password
}
boshDirector, err := directorFactory.New(config, director.NewNoopTaskReporter(), director.NewNoopFileReporter())
if err != nil {
return nil, errors.Wrap(err, "error building bosh director client")
}
return NewClient(boshDirector, director.NewSSHOpts, ssh.NewSshRemoteRunner, logger, instance.NewJobFinder(logger), NewBoshManifestReleaseMapping), nil
}
func getDirectorInfo(directorFactory director.Factory, config director.FactoryConfig) (director.Info, error) {
infoDirector, err := directorFactory.New(config, director.NewNoopTaskReporter(), director.NewNoopFileReporter())
if err != nil {
return director.Info{}, errors.Wrap(err, "error building bosh director client")
}
info, err := infoDirector.Info()
if err != nil {
return director.Info{}, errors.Wrap(err, "bosh director unreachable or unhealthy")
}
return info, nil
}
func | (info director.Info, username, password, cert string, logger boshlog.Logger) (boshuaa.UAA, error) {
urlAsInterface := info.Auth.Options["url"]
url, ok := urlAsInterface.(string)
if !ok {
return nil, errors.Errorf("Expected URL '%s' to be a string", urlAsInterface)
}
uaaConfig, err := boshuaa.NewConfigFromURL(url)
if err != nil {
return nil, errors.Wrap(err, "invalid UAA URL")
}
uaaConfig.CACert = cert
uaaConfig.Client = username
uaaConfig.ClientSecret = password
return boshuaa.NewFactory(logger).New(uaaConfig)
}
| buildUaa |
get_nms_summary_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package server
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/byxorna/nycmesh-tool/generated/go/uisp/models"
)
// GetNmsSummaryReader is a Reader for the GetNmsSummary structure.
type GetNmsSummaryReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetNmsSummaryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetNmsSummaryOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 401:
result := NewGetNmsSummaryUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewGetNmsSummaryForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewGetNmsSummaryInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewGetNmsSummaryOK creates a GetNmsSummaryOK with default headers values
func NewGetNmsSummaryOK() *GetNmsSummaryOK {
return &GetNmsSummaryOK{}
}
/* GetNmsSummaryOK describes a response with status code 200, with default header values.
Successful
*/
type GetNmsSummaryOK struct {
Payload *models.ServerSummary
}
func (o *GetNmsSummaryOK) Error() string {
return fmt.Sprintf("[GET /nms/summary][%d] getNmsSummaryOK %+v", 200, o.Payload)
}
func (o *GetNmsSummaryOK) GetPayload() *models.ServerSummary {
return o.Payload
}
func (o *GetNmsSummaryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ServerSummary)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetNmsSummaryUnauthorized creates a GetNmsSummaryUnauthorized with default headers values
func NewGetNmsSummaryUnauthorized() *GetNmsSummaryUnauthorized {
return &GetNmsSummaryUnauthorized{}
}
/* GetNmsSummaryUnauthorized describes a response with status code 401, with default header values.
Unauthorized
*/
type GetNmsSummaryUnauthorized struct {
Payload *models.Error
}
func (o *GetNmsSummaryUnauthorized) Error() string {
return fmt.Sprintf("[GET /nms/summary][%d] getNmsSummaryUnauthorized %+v", 401, o.Payload)
}
func (o *GetNmsSummaryUnauthorized) GetPayload() *models.Error {
return o.Payload
}
func (o *GetNmsSummaryUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetNmsSummaryForbidden creates a GetNmsSummaryForbidden with default headers values
func NewGetNmsSummaryForbidden() *GetNmsSummaryForbidden {
return &GetNmsSummaryForbidden{}
}
/* GetNmsSummaryForbidden describes a response with status code 403, with default header values.
Forbidden
*/
type GetNmsSummaryForbidden struct {
Payload *models.Error
}
func (o *GetNmsSummaryForbidden) Error() string {
return fmt.Sprintf("[GET /nms/summary][%d] getNmsSummaryForbidden %+v", 403, o.Payload)
}
func (o *GetNmsSummaryForbidden) GetPayload() *models.Error {
return o.Payload
}
func (o *GetNmsSummaryForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF |
return nil
}
// NewGetNmsSummaryInternalServerError creates a GetNmsSummaryInternalServerError with default headers values
func NewGetNmsSummaryInternalServerError() *GetNmsSummaryInternalServerError {
return &GetNmsSummaryInternalServerError{}
}
/* GetNmsSummaryInternalServerError describes a response with status code 500, with default header values.
Internal Server Error
*/
type GetNmsSummaryInternalServerError struct {
Payload *models.Error
}
func (o *GetNmsSummaryInternalServerError) Error() string {
return fmt.Sprintf("[GET /nms/summary][%d] getNmsSummaryInternalServerError %+v", 500, o.Payload)
}
func (o *GetNmsSummaryInternalServerError) GetPayload() *models.Error {
return o.Payload
}
func (o *GetNmsSummaryInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.Error)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return err
} |
radar.service_20220304191408.ts | import { Injectable } from '@nestjs/common';
import { RadarI } from './interfaces/radar.interface';
import { CoordenadaI } from './interfaces/coordenada.interface';
import { ScanI } from './interfaces/scan.interface';
@Injectable()
export class RadarService {
crearLibro(radar: RadarI) {
let protocols = radar["protocols"];
let scans = radar["scan"];
let coordinates: CoordenadaI[];
let scansResult: ScanI[];
// añadir for todo protocols
switch (protocols[0] as any) {
// case "closest-enemies": // Se deberá priorizar el punto más cercano en el que haya enemigos.
// coordinates = closestEnemies(scans);
// break;
// case "furthest-enemies": // Se deberá priorizar el punto más lejano en el que haya enemigos. ***>100m descarrar
// coordinates = furthestEnemies(scans);
// break;
// case "assist-allies": // Deberan de priorizarse los puntos en los que exista algún aliado.
// coordinates = assistAllies(scans);
// break;
// case "avoid-crossfire": // No debe de atacarse ningún punto en el que haya algún aliado.
// coordinates = avoidCrossfire(scans);
// break;
// case "prioritize-mech": // Debe de atacarse unm ech si se encuentra. En caso negativo, cualquier otro tipo deobjetivo será válido.
// coordinates = prioritizeMech(scans);
// break;
case "avoid-mech": // No debe de atacarse ningún enemigo del tipom ech
scansResult = avoidMech(scans);
break;
default:
break;
}
//seleccionar objectivo
return scansResult[0].coordinates;
}
}
function closestEnem | y { // Se deberá priorizar el punto más cercano en el que haya enemigos.
let coordinates: CoordenadaI;
console.log("closest-enemies");
//Se repite el primero y tiene q ser < 100
coordinates = scans[0]["coordinates"]; // meto el primero
for (let i in scans) {
if (distanciaPunto(coordinates) > distanciaPunto(scans[i]["coordinates"])) {
coordinates = scans[i]["coordinates"];
console.log(scans[i]);
}
}
return coordinates;
}
function furthestEnemies(scans): any { // Se deberá priorizar el punto más lejano en el que haya enemigos. ***>100m descarrar
let coordinates: CoordenadaI;
console.log("furthest-enemies");
//Se repite el primero y tiene q ser < 100
coordinates = scans[0]["coordinates"]; // meto el primero
for (let i in scans) {
if ((distanciaPunto(coordinates) < distanciaPunto(scans[i]["coordinates"])) && distanciaPunto(scans[i]["coordinates"]) < 100) {
coordinates = scans[i]["coordinates"];
console.log(scans[i]);
}
}
return coordinates;
}
function assistAllies(scans): any { // Deberan de priorizarse los puntos en los que exista algún aliado.
let coordinates: CoordenadaI;
console.log("assist-allies");
for (let i in scans) {
if (typeof scans[i]["allies"] !== "undefined") { // si existe
coordinates = scans[i]["coordinates"];
console.log(scans[i]);
}
}
return coordinates;
}
function avoidCrossfire(scans): any { // No debe de atacarse ningún punto en el que haya algún aliado.
let coordinates: CoordenadaI;
console.log("avoid-crossfire");
for (let i in scans) {
if (typeof scans[i]["allies"] === "undefined") { // si no existe
coordinates = scans[i]["coordinates"];
console.log(scans[i]);
}
}
return coordinates;
}
function prioritizeMech(scans): any { // Debe de atacarse unm ech si se encuentra. En caso negativo, cualquier otro tipo deobjetivo será válido.
let coordinates: CoordenadaI;
console.log("prioritize-mech");
coordinates = scans[0]["coordinates"]; // meto el primero por si no hay ningun ech
for (let i in scans) {
if (scans[i]["enemies"]["type"] === "mech") {
coordinates = scans[i]["coordinates"];
console.log(scans[i]);
}
}
return coordinates;
}
function avoidMech(scans): ScanI[] { // No debe de atacarse ningún enemigo del tipom ech
let scansResult: ScanI[] = [];
console.log("avoid-mech");
for (let i in scans) {
if (scans[i]["enemies"]["type"] !== "mech") {
scansResult.push(scans[i]);
console.log(scans[i]);
}
}
return scansResult;
}
function distanciaPunto(coordinates: CoordenadaI): number {// d(a,b) = raiz (x2-x1)2 + (y2 - y1)2
let origen: CoordenadaI;
origen = { x: 0, y: 0 };
console.log("La distancia es: " + Math.sqrt(Math.pow((coordinates.x - 0), 2) + Math.pow((coordinates.y - 0), 2)))
return Math.sqrt(Math.pow((coordinates.x - 0), 2) + Math.pow((coordinates.y - 0), 2));
} | ies(scans): an |
token.go | // Copyright 2009 The Go Authors. All rights reserved.
// Modified work copyright 2018 Alex Browne. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package token defines constants representing the lexical tokens of the Go
// programming language and basic operations on tokens (printing, predicates).
//
package token
import "strconv"
// Token is the set of lexical tokens of the Go programming language.
type Token int
// The list of tokens.
const (
// Special tokens
ILLEGAL Token = iota
EOF
COMMENT
literal_beg
// Identifiers and basic type literals
// (these tokens stand for classes of literals)
IDENT // main
INT // 12345
FLOAT // 123.45
IMAG // 123.45i
CHAR // 'a'
STRING // "abc"
literal_end
operator_beg
// Operators and delimiters
ADD // +
SUB // -
MUL // *
QUO // /
REM // %
AND // &
OR // |
XOR // ^
SHL // <<
SHR // >>
AND_NOT // &^
ADD_ASSIGN // +=
SUB_ASSIGN // -=
MUL_ASSIGN // *=
QUO_ASSIGN // /=
REM_ASSIGN // %=
AND_ASSIGN // &=
OR_ASSIGN // |=
XOR_ASSIGN // ^=
SHL_ASSIGN // <<=
SHR_ASSIGN // >>=
AND_NOT_ASSIGN // &^=
LAND // &&
LOR // ||
ARROW // <-
INC // ++
DEC // --
EQL // ==
LSS // <
GTR // >
ASSIGN // =
NOT // !
NEQ // !=
LEQ // <=
GEQ // >=
DEFINE // :=
ELLIPSIS // ...
LPAREN // (
LBRACK // [
LBRACE // {
COMMA // ,
PERIOD // .
RPAREN // )
RBRACK // ]
RBRACE // }
SEMICOLON // ;
COLON // :
operator_end
keyword_beg
// Keywords
BREAK
CASE
CHAN
CONST
CONTINUE
DEFAULT
DEFER
ELSE
FALLTHROUGH
FOR
FUNC
GO
GOTO
IF
IMPORT
INTERFACE
MAP
PACKAGE
RANGE
RETURN
SELECT
STRUCT
SWITCH
TYPE
VAR
keyword_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
INT: "INT",
FLOAT: "FLOAT",
IMAG: "IMAG",
CHAR: "CHAR",
STRING: "STRING",
ADD: "+",
SUB: "-",
MUL: "*",
QUO: "/",
REM: "%",
AND: "&",
OR: "|",
XOR: "^",
SHL: "<<",
SHR: ">>",
AND_NOT: "&^",
ADD_ASSIGN: "+=",
SUB_ASSIGN: "-=",
MUL_ASSIGN: "*=",
QUO_ASSIGN: "/=",
REM_ASSIGN: "%=",
AND_ASSIGN: "&=", | XOR_ASSIGN: "^=",
SHL_ASSIGN: "<<=",
SHR_ASSIGN: ">>=",
AND_NOT_ASSIGN: "&^=",
LAND: "&&",
LOR: "||",
ARROW: "<-",
INC: "++",
DEC: "--",
EQL: "==",
LSS: "<",
GTR: ">",
ASSIGN: "=",
NOT: "!",
NEQ: "!=",
LEQ: "<=",
GEQ: ">=",
DEFINE: ":=",
ELLIPSIS: "...",
LPAREN: "(",
LBRACK: "[",
LBRACE: "{",
COMMA: ",",
PERIOD: ".",
RPAREN: ")",
RBRACK: "]",
RBRACE: "}",
SEMICOLON: ";",
COLON: ":",
BREAK: "break",
CASE: "case",
CHAN: "chan",
CONST: "const",
CONTINUE: "continue",
DEFAULT: "default",
DEFER: "defer",
ELSE: "else",
FALLTHROUGH: "fallthrough",
FOR: "for",
FUNC: "func",
GO: "go",
GOTO: "goto",
IF: "if",
IMPORT: "import",
INTERFACE: "interface",
MAP: "map",
PACKAGE: "package",
RANGE: "range",
RETURN: "return",
SELECT: "select",
STRUCT: "struct",
SWITCH: "switch",
TYPE: "type",
VAR: "var",
}
// String returns the string corresponding to the token tok.
// For operators, delimiters, and keywords the string is the actual
// token character sequence (e.g., for the token ADD, the string is
// "+"). For all other tokens the string corresponds to the token
// constant name (e.g. for the token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
s = tokens[tok]
}
if s == "" {
s = "token(" + strconv.Itoa(int(tok)) + ")"
}
return s
}
// A set of constants for precedence-based expression parsing.
// Non-operators have lowest precedence, followed by operators
// starting with precedence 1 up to unary operators. The highest
// precedence serves as "catch-all" precedence for selector,
// indexing, and other operator and delimiter tokens.
//
const (
LowestPrec = 0 // non-operators
UnaryPrec = 6
HighestPrec = 7
)
// Precedence returns the operator precedence of the binary
// operator op. If op is not a binary operator, the result
// is LowestPrecedence.
//
func (op Token) Precedence() int {
switch op {
case LOR:
return 1
case LAND:
return 2
case EQL, NEQ, LSS, LEQ, GTR, GEQ:
return 3
case ADD, SUB, OR, XOR:
return 4
case MUL, QUO, REM, SHL, SHR, AND, AND_NOT:
return 5
}
return LowestPrec
}
var keywords map[string]Token
func init() {
keywords = make(map[string]Token)
for i := keyword_beg + 1; i < keyword_end; i++ {
keywords[tokens[i]] = i
}
}
// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
//
func Lookup(ident string) Token {
if tok, is_keyword := keywords[ident]; is_keyword {
return tok
}
return IDENT
}
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
// IsKeyword returns true for tokens corresponding to keywords;
// it returns false otherwise.
//
func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end } | OR_ASSIGN: "|=", |
p0_3.py | kwh_used = 1000
out = 0
if(kwh_used < 500):
out += 500 * 0.45
elif(kwh_used >= 500 and kwh_used < 1500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74)
elif(kwh_used >= 1500 and kwh_used < 2500):
|
elif(kwh_used >= 2500):
out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25) + ((kwh_used - 2500) * 2)
out += out * 0.2
print(out) | out += 500 * 0.45 + ((kwh_used - 500) * 0.74) + ((kwh_used - 1500) * 1.25) |
finalizer.go | package operator
import (
"context"
"fmt"
"time"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
imageregistryv1 "github.com/openshift/api/imageregistry/v1"
regopset "github.com/openshift/client-go/imageregistry/clientset/versioned/typed/imageregistry/v1"
"github.com/openshift/cluster-image-registry-operator/pkg/defaults"
)
func (c *Controller) RemoveResources(o *imageregistryv1.Config) error {
c.setStatusRemoving(o)
return c.generator.Remove(o)
}
func (c *Controller) finalizeResources(o *imageregistryv1.Config) error {
if o.ObjectMeta.DeletionTimestamp == nil {
return nil
}
finalizers := []string{} | finalizers = append(finalizers, v)
}
}
if len(finalizers) == len(o.ObjectMeta.Finalizers) {
return nil
}
klog.Infof("finalizing %s", utilObjectInfo(o))
client, err := regopset.NewForConfig(c.kubeconfig)
if err != nil {
return err
}
err = c.RemoveResources(o)
if err != nil {
c.setStatusRemoveFailed(o, err)
return fmt.Errorf("unable to finalize resource: %s", err)
}
cr := o
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if cr == nil {
// Skip using the cache here so we don't have as many
// retries due to slow cache updates
cr, err := client.Configs().Get(
context.TODO(), o.Name, metav1.GetOptions{},
)
if err != nil {
return fmt.Errorf("failed to get %s: %s", utilObjectInfo(o), err)
}
finalizers = []string{}
for _, v := range cr.ObjectMeta.Finalizers {
if v != defaults.ImageRegistryOperatorResourceFinalizer {
finalizers = append(finalizers, v)
}
}
}
cr.ObjectMeta.Finalizers = finalizers
_, err := client.Configs().Update(
context.TODO(), cr, metav1.UpdateOptions{},
)
if err != nil {
cr = nil
return err
}
return nil
})
if err != nil {
return fmt.Errorf("unable to update finalizers in %s: %s", utilObjectInfo(o), err)
}
// These errors may indicate a transient error that we can retry in tests.
errorFuncs := []func(error) bool{
kerrors.IsInternalError,
kerrors.IsTimeout,
kerrors.IsServerTimeout,
kerrors.IsTooManyRequests,
utilnet.IsProbableEOF,
utilnet.IsConnectionReset,
}
retryTime := 3 * time.Second
err = wait.PollInfinite(retryTime, func() (stop bool, err error) {
_, err = c.listers.RegistryConfigs.Get(o.Name)
if err == nil {
return
}
if !kerrors.IsNotFound(err) {
for _, isRetryError := range errorFuncs {
if isRetryError(err) {
return false, nil
}
}
// If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
if delaySeconds, shouldRetry := kerrors.SuggestsClientDelay(err); shouldRetry {
delayTime := time.Duration(delaySeconds) * time.Second
if retryTime < delayTime {
time.Sleep(delayTime - retryTime)
}
return false, nil
}
err = fmt.Errorf("failed to get %s: %s", utilObjectInfo(o), err)
return
}
return true, nil
})
if err != nil {
return fmt.Errorf("unable to wait for %s deletion: %s", utilObjectInfo(o), err)
}
return nil
} | for _, v := range o.ObjectMeta.Finalizers {
if v != defaults.ImageRegistryOperatorResourceFinalizer { |
test_default.py | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_hostname(host):
assert 'instance' == host.check_output('hostname -s')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance')
assert f.is_file
assert f.user == 'root' | assert f.group == 'root'
assert f.mode == 0o644 |
|
authentication.go | package message
import (
"io"
)
type AuthenticationOk struct {
ID uint32
}
type AuthenticationKerberosV5 struct {
ID uint32
}
type AuthenticationCleartextPassword struct {
ID uint32
}
type AuthenticationMD5Password struct {
ID uint32
Salt []byte
}
type AuthenticationSCMCredential struct {
ID uint32
}
type AuthenticationGSS struct {
ID uint32
}
type AuthenticationSSPI struct {
ID uint32
}
type AuthenticationGSSContinue struct {
ID uint32
Data []byte
}
type AuthenticationSASL struct {
ID uint32
Mechanisms []string
}
type AuthenticationSASLContinue struct {
ID uint32
Data []byte
}
type AuthenticationSASLFinal struct {
ID uint32
Data []byte
}
func (m *AuthenticationOk) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationKerberosV5) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationCleartextPassword) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationMD5Password) Reader() io.Reader {
b := NewBase(4 + len(m.Salt))
b.WriteUint32(m.ID)
b.WriteByteN(m.Salt)
return b.SetType('R').Reader()
}
func (m *AuthenticationSCMCredential) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationGSS) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationSSPI) Reader() io.Reader {
b := NewBase(4)
b.WriteUint32(m.ID)
return b.SetType('R').Reader()
}
func (m *AuthenticationGSSContinue) Reader() io.Reader {
b := NewBase(4 + len(m.Data))
b.WriteUint32(m.ID)
b.WriteByteN(m.Data)
return b.SetType('R').Reader()
}
func (m *AuthenticationSASL) Reader() io.Reader {
listLength := len(m.Mechanisms) + 1
for _, str := range m.Mechanisms {
listLength += len(str)
}
b := NewBase(4 + listLength)
b.WriteUint32(m.ID)
for _, str := range m.Mechanisms {
b.WriteString(str)
}
b.WriteByte(0)
return b.SetType('R').Reader()
}
func (m *AuthenticationSASLContinue) Reader() io.Reader {
b := NewBase(4 + len(m.Data))
b.WriteUint32(m.ID)
b.WriteByteN(m.Data)
return b.SetType('R').Reader()
}
func (m *AuthenticationSASLFinal) Reader() io.Reader {
b := NewBase(4 + len(m.Data))
b.WriteUint32(m.ID)
b.WriteByteN(m.Data)
return b.SetType('R').Reader()
}
func ReadAuthentication(raw []byte) interface{} | {
b := NewBaseFromBytes(raw)
id := b.ReadUint32()
switch id {
case 0:
return &AuthenticationOk{ID: id}
case 2:
return &AuthenticationKerberosV5{ID: id}
case 3:
return &AuthenticationCleartextPassword{ID: id}
case 5:
return &AuthenticationMD5Password{ID: id, Salt: b.ReadByteN(4)}
case 6:
return &AuthenticationSCMCredential{ID: id}
case 7:
return &AuthenticationGSS{ID: id}
case 8:
return &AuthenticationGSSContinue{ID: id, Data: b.ReadByteN(uint32(len(raw)) - 4)}
case 9:
return &AuthenticationSSPI{ID: id}
case 10:
var list []string
for !b.isEnd() {
if s := b.ReadString(); s != "" {
list = append(list, s)
}
}
return &AuthenticationSASL{ID: id, Mechanisms: list}
case 11:
return &AuthenticationSASLContinue{ID: id, Data: b.ReadByteN(uint32(len(raw)) - 4)}
case 12:
return &AuthenticationSASLFinal{ID: id, Data: b.ReadByteN(uint32(len(raw)) - 4)}
}
return nil
} |
|
contract.rs | use cosmwasm_std::{
entry_point, to_binary, to_vec, Binary, ContractResult, CosmosMsg, Deps, DepsMut, Env,
MessageInfo, QueryRequest, QueryResponse, Reply, Response, StdError, StdResult, SubMsg,
SystemResult,
};
use crate::errors::ReflectError;
use crate::msg::{
CapitalizedResponse, ChainResponse, CustomMsg, ExecuteMsg, InstantiateMsg, OwnerResponse,
QueryMsg, RawResponse, SpecialQuery, SpecialResponse,
};
use crate::state::{config, config_read, replies, replies_read, State};
#[entry_point]
pub fn instantiate(
deps: DepsMut<SpecialQuery>,
_env: Env,
info: MessageInfo,
_msg: InstantiateMsg,
) -> StdResult<Response<CustomMsg>> {
let state = State { owner: info.sender };
config(deps.storage).save(&state)?;
Ok(Response::default())
}
#[entry_point]
pub fn execute(
deps: DepsMut<SpecialQuery>,
env: Env,
info: MessageInfo,
msg: ExecuteMsg,
) -> Result<Response<CustomMsg>, ReflectError> {
match msg {
ExecuteMsg::ReflectMsg { msgs } => try_reflect(deps, env, info, msgs),
ExecuteMsg::ReflectSubMsg { msgs } => try_reflect_subcall(deps, env, info, msgs),
ExecuteMsg::ChangeOwner { owner } => try_change_owner(deps, env, info, owner),
}
}
pub fn try_reflect(
deps: DepsMut<SpecialQuery>,
_env: Env,
info: MessageInfo,
msgs: Vec<CosmosMsg<CustomMsg>>,
) -> Result<Response<CustomMsg>, ReflectError> {
let state = config(deps.storage).load()?;
if info.sender != state.owner {
return Err(ReflectError::NotCurrentOwner {
expected: state.owner.into(),
actual: info.sender.into(),
});
}
if msgs.is_empty() {
return Err(ReflectError::MessagesEmpty);
}
Ok(Response::new()
.add_attribute("action", "reflect")
.add_messages(msgs))
}
pub fn try_reflect_subcall(
deps: DepsMut<SpecialQuery>,
_env: Env,
info: MessageInfo,
msgs: Vec<SubMsg<CustomMsg>>,
) -> Result<Response<CustomMsg>, ReflectError> {
let state = config(deps.storage).load()?;
if info.sender != state.owner {
return Err(ReflectError::NotCurrentOwner {
expected: state.owner.into(),
actual: info.sender.into(),
});
}
if msgs.is_empty() {
return Err(ReflectError::MessagesEmpty);
}
Ok(Response::new()
.add_attribute("action", "reflect_subcall")
.add_submessages(msgs))
}
pub fn try_change_owner(
deps: DepsMut<SpecialQuery>,
_env: Env,
info: MessageInfo,
new_owner: String,
) -> Result<Response<CustomMsg>, ReflectError> {
let api = deps.api;
config(deps.storage).update(|mut state| {
if info.sender != state.owner {
return Err(ReflectError::NotCurrentOwner {
expected: state.owner.into(),
actual: info.sender.into(),
});
}
state.owner = api.addr_validate(&new_owner)?;
Ok(state)
})?;
Ok(Response::new()
.add_attribute("action", "change_owner")
.add_attribute("owner", new_owner))
}
/// This just stores the result for future query
#[entry_point]
pub fn reply(deps: DepsMut<SpecialQuery>, _env: Env, msg: Reply) -> Result<Response, ReflectError> {
let key = msg.id.to_be_bytes();
replies(deps.storage).save(&key, &msg)?;
Ok(Response::default())
}
#[entry_point]
pub fn query(deps: Deps<SpecialQuery>, _env: Env, msg: QueryMsg) -> StdResult<QueryResponse> {
match msg {
QueryMsg::Owner {} => to_binary(&query_owner(deps)?),
QueryMsg::Capitalized { text } => to_binary(&query_capitalized(deps, text)?),
QueryMsg::Chain { request } => to_binary(&query_chain(deps, &request)?),
QueryMsg::Raw { contract, key } => to_binary(&query_raw(deps, contract, key)?),
QueryMsg::SubMsgResult { id } => to_binary(&query_subcall(deps, id)?),
}
}
fn query_owner(deps: Deps<SpecialQuery>) -> StdResult<OwnerResponse> {
let state = config_read(deps.storage).load()?;
let resp = OwnerResponse {
owner: state.owner.into(),
};
Ok(resp)
}
fn query_subcall(deps: Deps<SpecialQuery>, id: u64) -> StdResult<Reply> {
let key = id.to_be_bytes();
replies_read(deps.storage).load(&key)
}
fn query_capitalized(deps: Deps<SpecialQuery>, text: String) -> StdResult<CapitalizedResponse> {
let req = SpecialQuery::Capitalized { text }.into();
let response: SpecialResponse = deps.querier.query(&req)?;
Ok(CapitalizedResponse { text: response.msg })
}
fn query_chain(
deps: Deps<SpecialQuery>,
request: &QueryRequest<SpecialQuery>,
) -> StdResult<ChainResponse> {
let raw = to_vec(request).map_err(|serialize_err| {
StdError::generic_err(format!("Serializing QueryRequest: {}", serialize_err))
})?;
match deps.querier.raw_query(&raw) {
SystemResult::Err(system_err) => Err(StdError::generic_err(format!(
"Querier system error: {}",
system_err
))),
SystemResult::Ok(ContractResult::Err(contract_err)) => Err(StdError::generic_err(format!(
"Querier contract error: {}",
contract_err
))),
SystemResult::Ok(ContractResult::Ok(value)) => Ok(ChainResponse { data: value }),
}
}
fn query_raw(deps: Deps<SpecialQuery>, contract: String, key: Binary) -> StdResult<RawResponse> {
let response: Option<Vec<u8>> = deps.querier.query_wasm_raw(contract, key)?;
Ok(RawResponse {
data: response.unwrap_or_default().into(),
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::testing::mock_dependencies_with_custom_querier;
use cosmwasm_std::testing::{mock_env, mock_info, MOCK_CONTRACT_ADDR};
use cosmwasm_std::{
coin, coins, from_binary, AllBalanceResponse, BankMsg, BankQuery, Binary, Event,
StakingMsg, StdError, SubMsgResponse, SubMsgResult,
};
#[test]
fn proper_instantialization() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(1000, "earth"));
// we can just call .unwrap() to assert this was a success
let res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
assert_eq!(0, res.messages.len());
// it worked, let's query the state
let value = query_owner(deps.as_ref()).unwrap();
assert_eq!("creator", value.owner.as_str());
}
#[test]
fn reflect() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let payload = vec![BankMsg::Send {
to_address: String::from("friend"),
amount: coins(1, "token"),
}
.into()];
let msg = ExecuteMsg::ReflectMsg {
msgs: payload.clone(),
};
let info = mock_info("creator", &[]);
let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap();
let payload: Vec<_> = payload.into_iter().map(SubMsg::new).collect();
assert_eq!(payload, res.messages);
}
#[test]
fn reflect_requires_owner() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
// signer is not owner
let payload = vec![BankMsg::Send {
to_address: String::from("friend"),
amount: coins(1, "token"),
}
.into()];
let msg = ExecuteMsg::ReflectMsg { msgs: payload };
let info = mock_info("random", &[]);
let res = execute(deps.as_mut(), mock_env(), info, msg);
match res.unwrap_err() {
ReflectError::NotCurrentOwner { .. } => {}
err => panic!("Unexpected error: {:?}", err),
}
}
#[test]
fn reflect_reject_empty_msgs() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let info = mock_info("creator", &[]);
let payload = vec![];
let msg = ExecuteMsg::ReflectMsg { msgs: payload };
let err = execute(deps.as_mut(), mock_env(), info, msg).unwrap_err();
assert_eq!(err, ReflectError::MessagesEmpty);
}
#[test]
fn | () {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let payload = vec![
BankMsg::Send {
to_address: String::from("friend"),
amount: coins(1, "token"),
}
.into(),
// make sure we can pass through custom native messages
CustomMsg::Raw(Binary(b"{\"foo\":123}".to_vec())).into(),
CustomMsg::Debug("Hi, Dad!".to_string()).into(),
StakingMsg::Delegate {
validator: String::from("validator"),
amount: coin(100, "ustake"),
}
.into(),
];
let msg = ExecuteMsg::ReflectMsg {
msgs: payload.clone(),
};
let info = mock_info("creator", &[]);
let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap();
let payload: Vec<_> = payload.into_iter().map(SubMsg::new).collect();
assert_eq!(payload, res.messages);
}
#[test]
fn change_owner_works() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let info = mock_info("creator", &[]);
let new_owner = String::from("friend");
let msg = ExecuteMsg::ChangeOwner { owner: new_owner };
let res = execute(deps.as_mut(), mock_env(), info, msg).unwrap();
// should change state
assert_eq!(0, res.messages.len());
let value = query_owner(deps.as_ref()).unwrap();
assert_eq!("friend", value.owner.as_str());
}
#[test]
fn change_owner_requires_current_owner_as_sender() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let creator = String::from("creator");
let info = mock_info(&creator, &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let random = String::from("random");
let info = mock_info(&random, &[]);
let new_owner = String::from("friend");
let msg = ExecuteMsg::ChangeOwner { owner: new_owner };
let err = execute(deps.as_mut(), mock_env(), info, msg).unwrap_err();
assert_eq!(
err,
ReflectError::NotCurrentOwner {
expected: creator,
actual: random
}
);
}
#[test]
fn change_owner_errors_for_invalid_new_address() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let creator = String::from("creator");
let msg = InstantiateMsg {};
let info = mock_info(&creator, &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let info = mock_info(&creator, &[]);
let msg = ExecuteMsg::ChangeOwner {
owner: String::from("x"),
};
let err = execute(deps.as_mut(), mock_env(), info, msg).unwrap_err();
match err {
ReflectError::Std(StdError::GenericErr { msg, .. }) => {
assert!(msg.contains("human address too short"))
}
e => panic!("Unexpected error: {:?}", e),
}
}
#[test]
fn capitalized_query_works() {
let deps = mock_dependencies_with_custom_querier(&[]);
let msg = QueryMsg::Capitalized {
text: "demo one".to_string(),
};
let response = query(deps.as_ref(), mock_env(), msg).unwrap();
let value: CapitalizedResponse = from_binary(&response).unwrap();
assert_eq!(value.text, "DEMO ONE");
}
#[test]
fn chain_query_works() {
let deps = mock_dependencies_with_custom_querier(&coins(123, "ucosm"));
// with bank query
let msg = QueryMsg::Chain {
request: BankQuery::AllBalances {
address: MOCK_CONTRACT_ADDR.to_string(),
}
.into(),
};
let response = query(deps.as_ref(), mock_env(), msg).unwrap();
let outer: ChainResponse = from_binary(&response).unwrap();
let inner: AllBalanceResponse = from_binary(&outer.data).unwrap();
assert_eq!(inner.amount, coins(123, "ucosm"));
// with custom query
let msg = QueryMsg::Chain {
request: SpecialQuery::Ping {}.into(),
};
let response = query(deps.as_ref(), mock_env(), msg).unwrap();
let outer: ChainResponse = from_binary(&response).unwrap();
let inner: SpecialResponse = from_binary(&outer.data).unwrap();
assert_eq!(inner.msg, "pong");
}
#[test]
fn reflect_subcall() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let id = 123u64;
let payload = SubMsg::reply_always(
BankMsg::Send {
to_address: String::from("friend"),
amount: coins(1, "token"),
},
id,
);
let msg = ExecuteMsg::ReflectSubMsg {
msgs: vec![payload.clone()],
};
let info = mock_info("creator", &[]);
let mut res = execute(deps.as_mut(), mock_env(), info, msg).unwrap();
assert_eq!(1, res.messages.len());
let msg = res.messages.pop().expect("must have a message");
assert_eq!(payload, msg);
}
// this mocks out what happens after reflect_subcall
#[test]
fn reply_and_query() {
let mut deps = mock_dependencies_with_custom_querier(&[]);
let msg = InstantiateMsg {};
let info = mock_info("creator", &coins(2, "token"));
let _res = instantiate(deps.as_mut(), mock_env(), info, msg).unwrap();
let id = 123u64;
let data = Binary::from(b"foobar");
let events = vec![Event::new("message").add_attribute("signer", "caller-addr")];
let result = SubMsgResult::Ok(SubMsgResponse {
events: events.clone(),
data: Some(data.clone()),
});
let subcall = Reply { id, result };
let res = reply(deps.as_mut(), mock_env(), subcall).unwrap();
assert_eq!(0, res.messages.len());
// query for a non-existant id
let qres = query(
deps.as_ref(),
mock_env(),
QueryMsg::SubMsgResult { id: 65432 },
);
assert!(qres.is_err());
// query for the real id
let raw = query(deps.as_ref(), mock_env(), QueryMsg::SubMsgResult { id }).unwrap();
let qres: Reply = from_binary(&raw).unwrap();
assert_eq!(qres.id, id);
let result = qres.result.unwrap();
assert_eq!(result.data, Some(data));
assert_eq!(result.events, events);
}
}
| reflect_multiple_messages |
server.rs | //! UDP and TCP server implementations for DNS
use std::collections::VecDeque;
use std::io::Write;
use std::net::SocketAddr;
use std::net::{Shutdown, TcpListener, TcpStream, UdpSocket};
use std::sync::atomic::Ordering;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Condvar, Mutex};
use std::thread::Builder;
use derive_more::{Display, Error, From};
use rand::random;
use crate::dns::buffer::{BytePacketBuffer, PacketBuffer, StreamPacketBuffer, VectorPacketBuffer};
use crate::dns::context::ServerContext;
use crate::dns::netutil::{read_packet_length, write_packet_length};
use crate::dns::protocol::{DnsPacket, DnsRecord, QueryType, ResultCode};
use crate::dns::resolve::DnsResolver;
#[derive(Debug, Display, From, Error)]
pub enum ServerError {
Io(std::io::Error),
}
type Result<T> = std::result::Result<T, ServerError>;
macro_rules! return_or_report {
( $x:expr, $message:expr ) => {
match $x {
Ok(res) => res,
Err(_) => {
println!($message);
return;
}
}
};
}
macro_rules! ignore_or_report {
( $x:expr, $message:expr ) => {
match $x {
Ok(_) => {}
Err(_) => {
println!($message);
return;
}
};
};
}
/// Common trait for DNS servers
pub trait DnsServer {
/// Initialize the server and start listenening
///
/// This method should _NOT_ block. Rather, servers are expected to spawn a new
/// thread to handle requests and return immediately.
fn run_server(self) -> Result<()>;
}
/// Utility function for resolving domains referenced in for example CNAME or SRV
/// records. This usually spares the client from having to perform additional
/// lookups.
fn resolve_cnames(
lookup_list: &[DnsRecord],
results: &mut Vec<DnsPacket>,
resolver: &mut Box<dyn DnsResolver>,
depth: u16,
) {
if depth > 10 {
return;
}
for ref rec in lookup_list {
match **rec {
DnsRecord::CNAME { ref host, .. } | DnsRecord::SRV { ref host, .. } => {
if let Ok(result2) = resolver.resolve(host, QueryType::A, true) {
let new_unmatched = result2.get_unresolved_cnames();
results.push(result2);
resolve_cnames(&new_unmatched, results, resolver, depth + 1);
}
}
_ => {}
}
}
}
/// Perform the actual work for a query
///
/// Incoming requests are validated to make sure they are well formed and adhere
/// to the server configuration. If so, the request will be passed on to the
/// active resolver and a query will be performed. It will also resolve some
/// possible references within the query, such as CNAME hosts.
///
/// This function will always return a valid packet, even if the request could not
/// be performed, since we still want to send something back to the client.
pub fn execute_query(context: Arc<ServerContext>, request: &DnsPacket) -> DnsPacket {
let mut packet = DnsPacket::new();
packet.header.id = request.header.id;
packet.header.recursion_available = context.allow_recursive;
packet.header.response = true;
if request.header.recursion_desired && !context.allow_recursive {
packet.header.rescode = ResultCode::REFUSED;
} else if request.questions.is_empty() {
packet.header.rescode = ResultCode::FORMERR;
} else {
let mut results = Vec::new();
let question = &request.questions[0];
packet.questions.push(question.clone());
let mut resolver = context.create_resolver(context.clone());
let rescode = match resolver.resolve(
&question.name,
question.qtype,
request.header.recursion_desired,
) {
Ok(result) => {
let rescode = result.header.rescode;
let unmatched = result.get_unresolved_cnames();
results.push(result);
resolve_cnames(&unmatched, &mut results, &mut resolver, 0);
rescode
}
Err(err) => {
println!(
"Failed to resolve {:?} {}: {:?}",
question.qtype, question.name, err
);
ResultCode::SERVFAIL
}
};
packet.header.rescode = rescode;
for result in results {
for rec in result.answers {
packet.answers.push(rec);
}
for rec in result.authorities {
packet.authorities.push(rec);
}
for rec in result.resources {
packet.resources.push(rec);
}
}
}
packet
}
/// The UDP server
///
/// Accepts DNS queries through UDP, and uses the `ServerContext` to determine
/// how to service the request. Packets are read on a single thread, after which
/// a new thread is spawned to service the request asynchronously.
pub struct DnsUdpServer {
context: Arc<ServerContext>,
request_queue: Arc<Mutex<VecDeque<(SocketAddr, DnsPacket)>>>,
request_cond: Arc<Condvar>,
thread_count: usize,
}
impl DnsUdpServer {
pub fn new(context: Arc<ServerContext>, thread_count: usize) -> DnsUdpServer {
DnsUdpServer {
context: context,
request_queue: Arc::new(Mutex::new(VecDeque::new())),
request_cond: Arc::new(Condvar::new()),
thread_count: thread_count,
}
}
}
impl DnsServer for DnsUdpServer {
/// Launch the server
///
/// This method takes ownership of the server, preventing the method from
/// being called multiple times.
fn run_server(self) -> Result<()> {
// Bind the socket
let socket = UdpSocket::bind(("0.0.0.0", self.context.dns_port))?;
// Spawn threads for handling requests
for thread_id in 0..self.thread_count {
let socket_clone = match socket.try_clone() {
Ok(x) => x,
Err(e) => {
println!("Failed to clone socket when starting UDP server: {:?}", e);
continue;
}
};
let context = self.context.clone();
let request_cond = self.request_cond.clone();
let request_queue = self.request_queue.clone();
let name = "DnsUdpServer-request-".to_string() + &thread_id.to_string();
let _ = Builder::new().name(name).spawn(move || {
loop {
// Acquire lock, and wait on the condition until data is
// available. Then proceed with popping an entry of the queue.
let (src, request) = match request_queue
.lock()
.ok()
.and_then(|x| request_cond.wait(x).ok())
.and_then(|mut x| x.pop_front())
{
Some(x) => x,
None => {
println!("Not expected to happen!");
continue;
}
};
let mut size_limit = 512;
// Check for EDNS
if request.resources.len() == 1 {
if let DnsRecord::OPT { packet_len, .. } = request.resources[0] {
size_limit = packet_len as usize;
}
}
// Create a response buffer, and ask the context for an appropriate
// resolver
let mut res_buffer = VectorPacketBuffer::new();
let mut packet = execute_query(context.clone(), &request);
let _ = packet.write(&mut res_buffer, size_limit);
// Fire off the response
let len = res_buffer.pos();
let data = return_or_report!(
res_buffer.get_range(0, len),
"Failed to get buffer data"
);
ignore_or_report!(
socket_clone.send_to(data, src),
"Failed to send response packet"
);
}
})?;
}
// Start servicing requests
let _ = Builder::new()
.name("DnsUdpServer-incoming".into())
.spawn(move || {
loop {
let _ = self
.context
.statistics
.udp_query_count
.fetch_add(1, Ordering::Release);
// Read a query packet
let mut req_buffer = BytePacketBuffer::new();
let (_, src) = match socket.recv_from(&mut req_buffer.buf) {
Ok(x) => x,
Err(e) => {
println!("Failed to read from UDP socket: {:?}", e);
continue;
}
};
// Parse it
let request = match DnsPacket::from_buffer(&mut req_buffer) {
Ok(x) => x,
Err(e) => {
println!("Failed to parse UDP query packet: {:?}", e);
continue;
}
};
// Acquire lock, add request to queue, and notify waiting threads
// using the condition.
match self.request_queue.lock() {
Ok(mut queue) => {
queue.push_back((src, request));
self.request_cond.notify_one();
}
Err(e) => {
println!("Failed to send UDP request for processing: {}", e);
}
}
}
})?;
Ok(())
}
}
/// TCP DNS server
pub struct DnsTcpServer {
context: Arc<ServerContext>,
senders: Vec<Sender<TcpStream>>,
thread_count: usize,
}
impl DnsTcpServer {
pub fn new(context: Arc<ServerContext>, thread_count: usize) -> DnsTcpServer {
DnsTcpServer {
context: context,
senders: Vec::new(),
thread_count: thread_count,
}
}
}
impl DnsServer for DnsTcpServer {
fn run_server(mut self) -> Result<()> {
let socket = TcpListener::bind(("0.0.0.0", self.context.dns_port))?;
// Spawn threads for handling requests, and create the channels
for thread_id in 0..self.thread_count {
let (tx, rx) = channel();
self.senders.push(tx);
let context = self.context.clone();
let name = "DnsTcpServer-request-".to_string() + &thread_id.to_string();
let _ = Builder::new().name(name).spawn(move || {
loop {
let mut stream = match rx.recv() {
Ok(x) => x,
Err(_) => continue,
};
let _ = context
.statistics
.tcp_query_count
.fetch_add(1, Ordering::Release);
// When DNS packets are sent over TCP, they're prefixed with a two byte
// length. We don't really need to know the length in advance, so we
// just move past it and continue reading as usual
ignore_or_report!(
read_packet_length(&mut stream),
"Failed to read query packet length"
);
let request = {
let mut stream_buffer = StreamPacketBuffer::new(&mut stream);
return_or_report!(
DnsPacket::from_buffer(&mut stream_buffer),
"Failed to read query packet"
)
};
let mut res_buffer = VectorPacketBuffer::new();
let mut packet = execute_query(context.clone(), &request);
ignore_or_report!(
packet.write(&mut res_buffer, 0xFFFF),
"Failed to write packet to buffer"
);
// As is the case for incoming queries, we need to send a 2 byte length
// value before handing of the actual packet.
let len = res_buffer.pos();
ignore_or_report!(
write_packet_length(&mut stream, len),
"Failed to write packet size"
);
// Now we can go ahead and write the actual packet
let data = return_or_report!(
res_buffer.get_range(0, len),
"Failed to get packet data"
);
ignore_or_report!(stream.write(data), "Failed to write response packet");
ignore_or_report!(stream.shutdown(Shutdown::Both), "Failed to shutdown socket");
}
})?;
}
let _ = Builder::new()
.name("DnsTcpServer-incoming".into())
.spawn(move || {
for wrap_stream in socket.incoming() {
let stream = match wrap_stream {
Ok(stream) => stream,
Err(err) => {
println!("Failed to accept TCP connection: {:?}", err);
continue;
}
};
// Hand it off to a worker thread
let thread_no = random::<usize>() % self.thread_count;
match self.senders[thread_no].send(stream) {
Ok(_) => {}
Err(e) => {
println!(
"Failed to send TCP request for processing on thread {}: {}",
thread_no, e
);
}
}
}
})?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::net::Ipv4Addr;
use std::sync::Arc;
use crate::dns::protocol::{
DnsPacket, DnsQuestion, DnsRecord, QueryType, ResultCode, TransientTtl,
};
use super::*;
use crate::dns::context::tests::create_test_context;
use crate::dns::context::ResolveStrategy;
fn build_query(qname: &str, qtype: QueryType) -> DnsPacket {
let mut query_packet = DnsPacket::new();
query_packet.header.recursion_desired = true;
query_packet
.questions
.push(DnsQuestion::new(qname.into(), qtype));
query_packet
}
#[test]
fn test_execute_query() {
// Construct a context to execute some queries successfully
let mut context = create_test_context(Box::new(|qname, qtype, _, _| {
let mut packet = DnsPacket::new();
if qname == "google.com" {
packet.answers.push(DnsRecord::A {
domain: "google.com".to_string(),
addr: "127.0.0.1".parse::<Ipv4Addr>().unwrap(),
ttl: TransientTtl(3600), | host: "cdn.facebook.com".to_string(),
ttl: TransientTtl(3600),
});
packet.answers.push(DnsRecord::A {
domain: "cdn.facebook.com".to_string(),
addr: "127.0.0.1".parse::<Ipv4Addr>().unwrap(),
ttl: TransientTtl(3600),
});
} else if qname == "www.microsoft.com" && qtype == QueryType::CNAME {
packet.answers.push(DnsRecord::CNAME {
domain: "www.microsoft.com".to_string(),
host: "cdn.microsoft.com".to_string(),
ttl: TransientTtl(3600),
});
} else if qname == "cdn.microsoft.com" && qtype == QueryType::A {
packet.answers.push(DnsRecord::A {
domain: "cdn.microsoft.com".to_string(),
addr: "127.0.0.1".parse::<Ipv4Addr>().unwrap(),
ttl: TransientTtl(3600),
});
} else {
packet.header.rescode = ResultCode::NXDOMAIN;
}
Ok(packet)
}));
match Arc::get_mut(&mut context) {
Some(mut ctx) => {
ctx.resolve_strategy = ResolveStrategy::Forward {
host: "127.0.0.1".to_string(),
port: 53,
};
}
None => panic!(),
}
// A successful resolve
{
let res = execute_query(context.clone(), &build_query("google.com", QueryType::A));
assert_eq!(1, res.answers.len());
match res.answers[0] {
DnsRecord::A { ref domain, .. } => {
assert_eq!("google.com", domain);
}
_ => panic!(),
}
};
// A successful resolve, that also resolves a CNAME without recursive lookup
{
let res = execute_query(
context.clone(),
&build_query("www.facebook.com", QueryType::CNAME),
);
assert_eq!(2, res.answers.len());
match res.answers[0] {
DnsRecord::CNAME { ref domain, .. } => {
assert_eq!("www.facebook.com", domain);
}
_ => panic!(),
}
match res.answers[1] {
DnsRecord::A { ref domain, .. } => {
assert_eq!("cdn.facebook.com", domain);
}
_ => panic!(),
}
};
// A successful resolve, that also resolves a CNAME through recursive lookup
{
let res = execute_query(
context.clone(),
&build_query("www.microsoft.com", QueryType::CNAME),
);
assert_eq!(2, res.answers.len());
match res.answers[0] {
DnsRecord::CNAME { ref domain, .. } => {
assert_eq!("www.microsoft.com", domain);
}
_ => panic!(),
}
match res.answers[1] {
DnsRecord::A { ref domain, .. } => {
assert_eq!("cdn.microsoft.com", domain);
}
_ => panic!(),
}
};
// An unsuccessful resolve, but without any error
{
let res = execute_query(context.clone(), &build_query("yahoo.com", QueryType::A));
assert_eq!(ResultCode::NXDOMAIN, res.header.rescode);
assert_eq!(0, res.answers.len());
};
// Disable recursive resolves to generate a failure
match Arc::get_mut(&mut context) {
Some(mut ctx) => {
ctx.allow_recursive = false;
}
None => panic!(),
}
// This should generate an error code, since recursive resolves are
// no longer allowed
{
let res = execute_query(context.clone(), &build_query("yahoo.com", QueryType::A));
assert_eq!(ResultCode::REFUSED, res.header.rescode);
assert_eq!(0, res.answers.len());
};
// Send a query without a question, which should fail with an error code
{
let query_packet = DnsPacket::new();
let res = execute_query(context.clone(), &query_packet);
assert_eq!(ResultCode::FORMERR, res.header.rescode);
assert_eq!(0, res.answers.len());
};
// Now construct a context where the dns client will return a failure
let mut context2 = create_test_context(Box::new(|_, _, _, _| {
Err(crate::dns::client::ClientError::Io(std::io::Error::new(
std::io::ErrorKind::NotFound,
"Fail",
)))
}));
match Arc::get_mut(&mut context2) {
Some(mut ctx) => {
ctx.resolve_strategy = ResolveStrategy::Forward {
host: "127.0.0.1".to_string(),
port: 53,
};
}
None => panic!(),
}
// We expect this to set the server failure rescode
{
let res = execute_query(context2.clone(), &build_query("yahoo.com", QueryType::A));
assert_eq!(ResultCode::SERVFAIL, res.header.rescode);
assert_eq!(0, res.answers.len());
};
}
} | });
} else if qname == "www.facebook.com" && qtype == QueryType::CNAME {
packet.answers.push(DnsRecord::CNAME {
domain: "www.facebook.com".to_string(), |
index.js | const fs = require('fs/promises');
const stylesUrl = `${__dirname}/styles`;
const distUrl = `${__dirname}/project-dist`;
(async function(){ | await fs.writeFile(`${distUrl}/bundle.css`, bundleContent);
})(); | const fileNames = (await fs.readdir(stylesUrl, {withFileTypes: true})).filter((file) => file.isFile() && file.name.endsWith('.css'));
const bundleContent = (await Promise.all(fileNames.map(async (file) => await fs.readFile(`${stylesUrl}/${file.name}`)))).join(''); |
vsMetaInfoGenerator.py | import re
from vsmetaEncoder import vsmetaInfo
from datetime import datetime, date
class VsMetaInfoGenerator(vsmetaInfo.VsMetaInfo):
def __init__(self, feedItem):
super(VsMetaInfoGenerator, self).__init__()
self.feedItem = feedItem
self.download_url = ''
# parse feedItem
if hasattr(feedItem, 'title'): self.episodeTitle = feedItem.title
if hasattr(feedItem, 'category'): self.showTitle = feedItem.category
if hasattr(feedItem, 'summary'): self.chapterSummary = feedItem.summary
if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description
if hasattr(feedItem, 'link'): self.download_url = feedItem.link
#if hasattr(feedItem, 'published'): self.episodeReleaseDate = datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT" ) |
#cleaning some parts
self.chapterSummary = self.chapterSummary.replace('![CDATA[', '')
self.chapterSummary = self.chapterSummary.replace(']]', '')
self.tvshowLocked = True
self.episodeLocked = True
episodeFound = re.search('[(](\d*)\/\d[)]',self.episodeTitle)
if episodeFound != None:
self.episode = int(episodeFound.group(1))
seasonFound = re.search(' Staffel (\d*) ',self.episodeTitle)
if seasonFound != None:
self.season = int(seasonFound.group(1))
# set other defaults
self.episodeLocked = False
self.tvshowLocked = False
self.identifyingTerm = '%s - %s -s%se%s' % (self.showTitle, self.episodeTitle, self.season, self.episode)
def isUsable(self) ->bool:
if (len(self.episodeTitle) > 0 or len(self.showTitle) > 0 or len(self.showTitle2) > 0) and len(self.download_url) > 0:
return True
else:
return False | if hasattr(feedItem, 'published'): self.setEpisodeDate(datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT").date())
if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description |
Solution.py | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'birthdayCakeCandles' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY candles as parameter.
#
def birthdayCakeCandles(candles):
# Write your code here
|
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
candles_count = int(input().strip())
candles = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(candles)
fptr.write(str(result) + '\n')
fptr.close()
| count = 0
pt = max(candles)
for i in candles:
if i == pt:
count+=1
return count |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.