file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
invoke_context.rs | use crate::{
ic_logger_msg, ic_msg,
instruction_processor::{ExecuteDetailsTimings, Executor, Executors, PreAccount},
instruction_recorder::InstructionRecorder,
log_collector::LogCollector,
};
use log::*;
use solana_sdk::{
account::{AccountSharedData, ReadableAccount},
compute_budget::ComputeBudget,
feature_set::{
demote_program_write_locks, do_support_realloc, neon_evm_compute_budget,
remove_native_loader, requestable_heap_size, tx_wide_compute_cap, FeatureSet,
},
hash::Hash,
instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError},
keyed_account::{create_keyed_accounts_unified, KeyedAccount},
message::Message,
pubkey::Pubkey,
rent::Rent,
sysvar::Sysvar,
};
use std::{cell::RefCell, rc::Rc, sync::Arc};
/// Compute meter
pub trait ComputeMeter {
/// Consume compute units
fn consume(&mut self, amount: u64) -> Result<(), InstructionError>;
/// Get the number of remaining compute units
fn get_remaining(&self) -> u64;
}
pub struct ThisComputeMeter {
remaining: u64,
}
impl ComputeMeter for ThisComputeMeter {
fn consume(&mut self, amount: u64) -> Result<(), InstructionError> {
let exceeded = self.remaining < amount;
self.remaining = self.remaining.saturating_sub(amount);
if exceeded {
return Err(InstructionError::ComputationalBudgetExceeded);
}
Ok(())
}
fn get_remaining(&self) -> u64 {
self.remaining
}
}
impl ThisComputeMeter {
pub fn new_ref(remaining: u64) -> Rc<RefCell<Self>> {
Rc::new(RefCell::new(Self { remaining }))
}
}
/// Log messages
pub trait Logger {
fn log_enabled(&self) -> bool;
/// Log a message.
///
/// Unless explicitly stated, log messages are not considered stable and may change in the
/// future as necessary
fn log(&self, message: &str);
}
pub struct ThisLogger {
log_collector: Option<Rc<LogCollector>>,
}
impl Logger for ThisLogger {
fn log_enabled(&self) -> bool {
log_enabled!(log::Level::Info) || self.log_collector.is_some()
}
fn log(&self, message: &str) {
debug!("{}", message);
if let Some(log_collector) = &self.log_collector {
log_collector.log(message);
}
}
}
impl ThisLogger {
pub fn new_ref(log_collector: Option<Rc<LogCollector>>) -> Rc<RefCell<Self>> {
Rc::new(RefCell::new(Self { log_collector }))
}
}
/// Convenience macro to log a message with an `Rc<RefCell<dyn Logger>>`
#[macro_export]
macro_rules! ic_logger_msg {
($logger:expr, $message:expr) => {
if let Ok(logger) = $logger.try_borrow_mut() {
if logger.log_enabled() {
logger.log($message);
}
}
};
($logger:expr, $fmt:expr, $($arg:tt)*) => {
if let Ok(logger) = $logger.try_borrow_mut() {
if logger.log_enabled() {
logger.log(&format!($fmt, $($arg)*));
}
}
};
}
/// Convenience macro to log a message with an `InvokeContext`
#[macro_export]
macro_rules! ic_msg {
($invoke_context:expr, $message:expr) => {
$crate::ic_logger_msg!($invoke_context.get_logger(), $message)
};
($invoke_context:expr, $fmt:expr, $($arg:tt)*) => {
$crate::ic_logger_msg!($invoke_context.get_logger(), $fmt, $($arg)*)
};
}
pub struct InvokeContextStackFrame<'a> {
pub number_of_program_accounts: usize,
pub keyed_accounts: Vec<KeyedAccount<'a>>,
pub keyed_accounts_range: std::ops::Range<usize>,
}
impl<'a> InvokeContextStackFrame<'a> {
pub fn new(number_of_program_accounts: usize, keyed_accounts: Vec<KeyedAccount<'a>>) -> Self {
let keyed_accounts_range = std::ops::Range {
start: 0,
end: keyed_accounts.len(),
};
Self {
number_of_program_accounts,
keyed_accounts,
keyed_accounts_range,
}
}
pub fn program_id(&self) -> Option<&Pubkey> {
self.keyed_accounts
.get(self.number_of_program_accounts.saturating_sub(1))
.map(|keyed_account| keyed_account.unsigned_key())
}
}
pub struct ThisInvokeContext<'a> {
instruction_index: usize,
invoke_stack: Vec<InvokeContextStackFrame<'a>>,
rent: Rent,
pre_accounts: Vec<PreAccount>,
accounts: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
sysvars: &'a [(Pubkey, Vec<u8>)],
logger: Rc<RefCell<dyn Logger>>,
compute_budget: ComputeBudget,
current_compute_budget: ComputeBudget,
compute_meter: Rc<RefCell<dyn ComputeMeter>>,
executors: Rc<RefCell<Executors>>,
instruction_recorders: Option<&'a [InstructionRecorder]>,
feature_set: Arc<FeatureSet>,
pub timings: ExecuteDetailsTimings,
blockhash: Hash,
lamports_per_signature: u64,
return_data: (Pubkey, Vec<u8>),
}
impl<'a> ThisInvokeContext<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
rent: Rent,
accounts: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
sysvars: &'a [(Pubkey, Vec<u8>)],
log_collector: Option<Rc<LogCollector>>,
compute_budget: ComputeBudget,
compute_meter: Rc<RefCell<dyn ComputeMeter>>,
executors: Rc<RefCell<Executors>>,
instruction_recorders: Option<&'a [InstructionRecorder]>,
feature_set: Arc<FeatureSet>,
blockhash: Hash,
lamports_per_signature: u64,
) -> Self {
Self {
instruction_index: 0,
invoke_stack: Vec::with_capacity(compute_budget.max_invoke_depth),
rent,
pre_accounts: Vec::new(),
accounts,
programs,
sysvars,
logger: ThisLogger::new_ref(log_collector),
current_compute_budget: compute_budget,
compute_budget,
compute_meter,
executors,
instruction_recorders,
feature_set,
timings: ExecuteDetailsTimings::default(),
blockhash,
lamports_per_signature,
return_data: (Pubkey::default(), Vec::new()),
}
}
pub fn new_mock_with_sysvars_and_features(
accounts: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
sysvars: &'a [(Pubkey, Vec<u8>)],
feature_set: Arc<FeatureSet>,
) -> Self {
Self::new(
Rent::default(),
accounts,
programs,
sysvars,
None,
ComputeBudget::default(),
ThisComputeMeter::new_ref(std::i64::MAX as u64),
Rc::new(RefCell::new(Executors::default())),
None,
feature_set,
Hash::default(),
0,
)
}
pub fn new_mock(
accounts: &'a [(Pubkey, Rc<RefCell<AccountSharedData>>)],
programs: &'a [(Pubkey, ProcessInstructionWithContext)],
) -> Self {
Self::new_mock_with_sysvars_and_features(
accounts,
programs,
&[],
Arc::new(FeatureSet::all_enabled()),
)
}
}
/// Invocation context passed to loaders
pub trait InvokeContext {
/// Push a stack frame onto the invocation stack
fn push(
&mut self,
message: &Message,
instruction: &CompiledInstruction,
program_indices: &[usize],
account_indices: Option<&[usize]>,
) -> Result<(), InstructionError>;
/// Pop a stack frame from the invocation stack
fn pop(&mut self);
/// Current depth of the invocation stake
fn invoke_depth(&self) -> usize;
/// Verify the results of an instruction
fn verify(
&mut self,
message: &Message,
instruction: &CompiledInstruction,
program_indices: &[usize],
) -> Result<(), InstructionError>;
/// Verify and update PreAccount state based on program execution
fn verify_and_update(
&mut self,
instruction: &CompiledInstruction,
account_indices: &[usize],
write_privileges: &[bool],
) -> Result<(), InstructionError>;
/// Get the program ID of the currently executing program
fn get_caller(&self) -> Result<&Pubkey, InstructionError>;
/// Removes the first keyed account
#[deprecated(
since = "1.9.0",
note = "To be removed together with remove_native_loader"
)]
fn remove_first_keyed_account(&mut self) -> Result<(), InstructionError>;
/// Get the list of keyed accounts
fn get_keyed_accounts(&self) -> Result<&[KeyedAccount], InstructionError>;
/// Get a list of built-in programs
fn get_programs(&self) -> &[(Pubkey, ProcessInstructionWithContext)];
/// Get this invocation's logger
fn get_logger(&self) -> Rc<RefCell<dyn Logger>>;
/// Get this invocation's compute meter
fn get_compute_meter(&self) -> Rc<RefCell<dyn ComputeMeter>>;
/// Loaders may need to do work in order to execute a program. Cache
/// the work that can be re-used across executions
fn add_executor(&self, pubkey: &Pubkey, executor: Arc<dyn Executor>);
/// Get the completed loader work that can be re-used across executions
fn get_executor(&self, pubkey: &Pubkey) -> Option<Arc<dyn Executor>>;
/// Set which instruction in the message is currently being recorded
fn set_instruction_index(&mut self, instruction_index: usize);
/// Record invoked instruction
fn record_instruction(&self, instruction: &Instruction);
/// Get the bank's active feature set
fn is_feature_active(&self, feature_id: &Pubkey) -> bool;
/// Find an account_index and account by its key
fn get_account(&self, pubkey: &Pubkey) -> Option<(usize, Rc<RefCell<AccountSharedData>>)>;
/// Update timing
fn update_timing(
&mut self,
serialize_us: u64,
create_vm_us: u64,
execute_us: u64,
deserialize_us: u64,
);
/// Get sysvars
fn get_sysvars(&self) -> &[(Pubkey, Vec<u8>)];
/// Get this invocation's compute budget
fn get_compute_budget(&self) -> &ComputeBudget;
/// Set this invocation's blockhash
fn set_blockhash(&mut self, hash: Hash);
/// Get this invocation's blockhash
fn get_blockhash(&self) -> &Hash;
/// Set this invocation's lamports_per_signature value
fn set_lamports_per_signature(&mut self, lamports_per_signature: u64);
/// Get this invocation's lamports_per_signature value
fn get_lamports_per_signature(&self) -> u64;
/// Set the return data
fn set_return_data(&mut self, data: Vec<u8>) -> Result<(), InstructionError>;
/// Get the return data
fn get_return_data(&self) -> (Pubkey, &[u8]);
}
impl<'a> InvokeContext for ThisInvokeContext<'a> {
fn push(
&mut self,
message: &Message,
instruction: &CompiledInstruction,
program_indices: &[usize],
account_indices: Option<&[usize]>,
) -> Result<(), InstructionError> {
if self.invoke_stack.len() > self.compute_budget.max_invoke_depth {
return Err(InstructionError::CallDepth);
}
let program_id = program_indices
.last()
.map(|index_of_program_id| &self.accounts[*index_of_program_id].0);
if self.invoke_stack.is_empty() {
let mut compute_budget = self.compute_budget;
if !self.is_feature_active(&tx_wide_compute_cap::id())
&& self.is_feature_active(&neon_evm_compute_budget::id())
&& program_id == Some(&crate::neon_evm_program::id())
{
// Bump the compute budget for neon_evm
compute_budget.max_units = compute_budget.max_units.max(500_000);
}
if !self.is_feature_active(&requestable_heap_size::id())
&& self.is_feature_active(&neon_evm_compute_budget::id())
&& program_id == Some(&crate::neon_evm_program::id())
{
// Bump the compute budget for neon_evm
compute_budget.heap_size = Some(256_usize.saturating_mul(1024));
}
self.current_compute_budget = compute_budget;
if !self.feature_set.is_active(&tx_wide_compute_cap::id()) {
self.compute_meter =
ThisComputeMeter::new_ref(self.current_compute_budget.max_units);
}
self.pre_accounts = Vec::with_capacity(instruction.accounts.len());
let mut work = |_unique_index: usize, account_index: usize| {
if account_index < self.accounts.len() {
let account = self.accounts[account_index].1.borrow();
self.pre_accounts
.push(PreAccount::new(&self.accounts[account_index].0, &account));
return Ok(());
}
Err(InstructionError::MissingAccount)
};
instruction.visit_each_account(&mut work)?;
} else {
let contains = self
.invoke_stack
.iter()
.any(|frame| frame.program_id() == program_id);
let is_last = if let Some(last_frame) = self.invoke_stack.last() {
last_frame.program_id() == program_id
} else {
false
};
if contains && !is_last {
// Reentrancy not allowed unless caller is calling itself
return Err(InstructionError::ReentrancyNotAllowed);
}
}
// Create the KeyedAccounts that will be passed to the program
let demote_program_write_locks = self
.feature_set
.is_active(&demote_program_write_locks::id());
let keyed_accounts = program_indices
.iter()
.map(|account_index| {
(
false,
false,
&self.accounts[*account_index].0,
&self.accounts[*account_index].1 as &RefCell<AccountSharedData>,
)
})
.chain(instruction.accounts.iter().map(|index_in_instruction| {
let index_in_instruction = *index_in_instruction as usize;
let account_index = if let Some(account_indices) = account_indices {
account_indices[index_in_instruction]
} else {
index_in_instruction
};
(
message.is_signer(index_in_instruction),
message.is_writable(index_in_instruction, demote_program_write_locks),
&self.accounts[account_index].0,
&self.accounts[account_index].1 as &RefCell<AccountSharedData>,
)
}))
.collect::<Vec<_>>();
self.invoke_stack.push(InvokeContextStackFrame::new(
program_indices.len(),
create_keyed_accounts_unified(keyed_accounts.as_slice()),
));
Ok(())
}
fn pop(&mut self) {
self.invoke_stack.pop();
}
fn invoke_depth(&self) -> usize {
self.invoke_stack.len()
}
fn verify(
&mut self,
message: &Message,
instruction: &CompiledInstruction,
program_indices: &[usize],
) -> Result<(), InstructionError> {
let program_id = instruction.program_id(&message.account_keys);
let demote_program_write_locks = self.is_feature_active(&demote_program_write_locks::id());
let do_support_realloc = self.is_feature_active(&do_support_realloc::id());
// Verify all executable accounts have zero outstanding refs
for account_index in program_indices.iter() {
self.accounts[*account_index]
.1
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?;
}
// Verify the per-account instruction results
let (mut pre_sum, mut post_sum) = (0_u128, 0_u128);
let mut work = |unique_index: usize, account_index: usize| {
{
// Verify account has no outstanding references
let _ = self.accounts[account_index]
.1
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?;
}
let pre_account = &self.pre_accounts[unique_index];
let account = self.accounts[account_index].1.borrow();
pre_account
.verify(
program_id,
message.is_writable(account_index, demote_program_write_locks),
&self.rent,
&account,
&mut self.timings,
true,
do_support_realloc,
)
.map_err(|err| {
ic_logger_msg!(
self.logger,
"failed to verify account {}: {}",
pre_account.key(),
err
);
err
})?;
pre_sum = pre_sum
.checked_add(u128::from(pre_account.lamports()))
.ok_or(InstructionError::UnbalancedInstruction)?;
post_sum = post_sum
.checked_add(u128::from(account.lamports()))
.ok_or(InstructionError::UnbalancedInstruction)?;
Ok(())
};
instruction.visit_each_account(&mut work)?;
// Verify that the total sum of all the lamports did not change
if pre_sum != post_sum {
return Err(InstructionError::UnbalancedInstruction);
}
Ok(())
}
fn verify_and_update(
&mut self,
instruction: &CompiledInstruction,
account_indices: &[usize],
write_privileges: &[bool],
) -> Result<(), InstructionError> {
let do_support_realloc = self.feature_set.is_active(&do_support_realloc::id());
let program_id = self
.invoke_stack
.last()
.and_then(|frame| frame.program_id())
.ok_or(InstructionError::CallDepth)?;
let rent = &self.rent;
let logger = &self.logger;
let accounts = &self.accounts;
let pre_accounts = &mut self.pre_accounts;
let timings = &mut self.timings;
// Verify the per-account instruction results
let (mut pre_sum, mut post_sum) = (0_u128, 0_u128);
let mut work = |_unique_index: usize, index_in_instruction: usize| {
if index_in_instruction < write_privileges.len()
&& index_in_instruction < account_indices.len()
{
let account_index = account_indices[index_in_instruction];
let (key, account) = &accounts[account_index];
let is_writable = write_privileges[index_in_instruction];
// Find the matching PreAccount
for pre_account in pre_accounts.iter_mut() {
if key == pre_account.key() {
{
// Verify account has no outstanding references
let _ = account
.try_borrow_mut()
.map_err(|_| InstructionError::AccountBorrowOutstanding)?; | .verify(
program_id,
is_writable,
rent,
&account,
timings,
false,
do_support_realloc,
)
.map_err(|err| {
ic_logger_msg!(logger, "failed to verify account {}: {}", key, err);
err
})?;
pre_sum = pre_sum
.checked_add(u128::from(pre_account.lamports()))
.ok_or(InstructionError::UnbalancedInstruction)?;
post_sum = post_sum
.checked_add(u128::from(account.lamports()))
.ok_or(InstructionError::UnbalancedInstruction)?;
if is_writable && !pre_account.executable() {
pre_account.update(&account);
}
return Ok(());
}
}
}
Err(InstructionError::MissingAccount)
};
instruction.visit_each_account(&mut work)?;
// Verify that the total sum of all the lamports did not change
if pre_sum != post_sum {
return Err(InstructionError::UnbalancedInstruction);
}
Ok(())
}
fn get_caller(&self) -> Result<&Pubkey, InstructionError> {
self.invoke_stack
.last()
.and_then(|frame| frame.program_id())
.ok_or(InstructionError::CallDepth)
}
fn remove_first_keyed_account(&mut self) -> Result<(), InstructionError> {
if !self.is_feature_active(&remove_native_loader::id()) {
let stack_frame = &mut self
.invoke_stack
.last_mut()
.ok_or(InstructionError::CallDepth)?;
stack_frame.keyed_accounts_range.start =
stack_frame.keyed_accounts_range.start.saturating_add(1);
}
Ok(())
}
fn get_keyed_accounts(&self) -> Result<&[KeyedAccount], InstructionError> {
self.invoke_stack
.last()
.map(|frame| &frame.keyed_accounts[frame.keyed_accounts_range.clone()])
.ok_or(InstructionError::CallDepth)
}
fn get_programs(&self) -> &[(Pubkey, ProcessInstructionWithContext)] {
self.programs
}
fn get_logger(&self) -> Rc<RefCell<dyn Logger>> {
self.logger.clone()
}
fn get_compute_meter(&self) -> Rc<RefCell<dyn ComputeMeter>> {
self.compute_meter.clone()
}
fn add_executor(&self, pubkey: &Pubkey, executor: Arc<dyn Executor>) {
self.executors.borrow_mut().insert(*pubkey, executor);
}
fn get_executor(&self, pubkey: &Pubkey) -> Option<Arc<dyn Executor>> {
self.executors.borrow().get(pubkey)
}
fn set_instruction_index(&mut self, instruction_index: usize) {
self.instruction_index = instruction_index;
}
fn record_instruction(&self, instruction: &Instruction) {
if let Some(instruction_recorders) = &self.instruction_recorders {
instruction_recorders[self.instruction_index].record_instruction(instruction.clone());
}
}
fn is_feature_active(&self, feature_id: &Pubkey) -> bool {
self.feature_set.is_active(feature_id)
}
fn get_account(&self, pubkey: &Pubkey) -> Option<(usize, Rc<RefCell<AccountSharedData>>)> {
for (index, (key, account)) in self.accounts.iter().enumerate().rev() {
if key == pubkey {
return Some((index, account.clone()));
}
}
None
}
fn update_timing(
&mut self,
serialize_us: u64,
create_vm_us: u64,
execute_us: u64,
deserialize_us: u64,
) {
self.timings.serialize_us = self.timings.serialize_us.saturating_add(serialize_us);
self.timings.create_vm_us = self.timings.create_vm_us.saturating_add(create_vm_us);
self.timings.execute_us = self.timings.execute_us.saturating_add(execute_us);
self.timings.deserialize_us = self.timings.deserialize_us.saturating_add(deserialize_us);
}
fn get_sysvars(&self) -> &[(Pubkey, Vec<u8>)] {
self.sysvars
}
fn get_compute_budget(&self) -> &ComputeBudget {
&self.current_compute_budget
}
fn set_blockhash(&mut self, hash: Hash) {
self.blockhash = hash;
}
fn get_blockhash(&self) -> &Hash {
&self.blockhash
}
fn set_lamports_per_signature(&mut self, lamports_per_signature: u64) {
self.lamports_per_signature = lamports_per_signature;
}
fn get_lamports_per_signature(&self) -> u64 {
self.lamports_per_signature
}
fn set_return_data(&mut self, data: Vec<u8>) -> Result<(), InstructionError> {
self.return_data = (*self.get_caller()?, data);
Ok(())
}
fn get_return_data(&self) -> (Pubkey, &[u8]) {
(self.return_data.0, &self.return_data.1)
}
}
// This method which has a generic parameter is outside of the InvokeContext,
// because the InvokeContext is a dyn Trait.
pub fn get_sysvar<T: Sysvar>(
invoke_context: &dyn InvokeContext,
id: &Pubkey,
) -> Result<T, InstructionError> {
invoke_context
.get_sysvars()
.iter()
.find_map(|(key, data)| {
if id == key {
bincode::deserialize(data).ok()
} else {
None
}
})
.ok_or_else(|| {
ic_msg!(invoke_context, "Unable to get sysvar {}", id);
InstructionError::UnsupportedSysvar
})
}
pub type ProcessInstructionWithContext =
fn(usize, &[u8], &mut dyn InvokeContext) -> Result<(), InstructionError>;
pub struct MockInvokeContextPreparation {
pub accounts: Vec<(Pubkey, Rc<RefCell<AccountSharedData>>)>,
pub message: Message,
pub account_indices: Vec<usize>,
}
pub fn prepare_mock_invoke_context(
program_indices: &[usize],
instruction_data: &[u8],
keyed_accounts: &[(bool, bool, Pubkey, Rc<RefCell<AccountSharedData>>)],
) -> MockInvokeContextPreparation {
#[allow(clippy::type_complexity)]
let (accounts, mut metas): (
Vec<(Pubkey, Rc<RefCell<AccountSharedData>>)>,
Vec<AccountMeta>,
) = keyed_accounts
.iter()
.map(|(is_signer, is_writable, pubkey, account)| {
(
(*pubkey, account.clone()),
AccountMeta {
pubkey: *pubkey,
is_signer: *is_signer,
is_writable: *is_writable,
},
)
})
.unzip();
let program_id = if let Some(program_index) = program_indices.last() {
accounts[*program_index].0
} else {
Pubkey::default()
};
for program_index in program_indices.iter().rev() {
metas.remove(*program_index);
}
let message = Message::new(
&[Instruction::new_with_bytes(
program_id,
instruction_data,
metas,
)],
None,
);
let account_indices: Vec<usize> = message
.account_keys
.iter()
.map(|search_key| {
accounts
.iter()
.position(|(key, _account)| key == search_key)
.unwrap_or(accounts.len())
})
.collect();
MockInvokeContextPreparation {
accounts,
message,
account_indices,
}
}
pub fn with_mock_invoke_context<R, F: FnMut(&mut ThisInvokeContext) -> R>(
loader_id: Pubkey,
account_size: usize,
mut callback: F,
) -> R {
let program_indices = vec![0, 1];
let keyed_accounts = [
(
false,
false,
loader_id,
AccountSharedData::new_ref(0, 0, &solana_sdk::native_loader::id()),
),
(
false,
false,
Pubkey::new_unique(),
AccountSharedData::new_ref(1, 0, &loader_id),
),
(
false,
false,
Pubkey::new_unique(),
AccountSharedData::new_ref(2, account_size, &Pubkey::new_unique()),
),
];
let preparation = prepare_mock_invoke_context(&program_indices, &[], &keyed_accounts);
let mut invoke_context = ThisInvokeContext::new_mock(&preparation.accounts, &[]);
invoke_context
.push(
&preparation.message,
&preparation.message.instructions[0],
&program_indices,
Some(&preparation.account_indices),
)
.unwrap();
callback(&mut invoke_context)
}
pub fn mock_process_instruction(
loader_id: &Pubkey,
mut program_indices: Vec<usize>,
instruction_data: &[u8],
keyed_accounts: &[(bool, bool, Pubkey, Rc<RefCell<AccountSharedData>>)],
process_instruction: ProcessInstructionWithContext,
) -> Result<(), InstructionError> {
let mut preparation =
prepare_mock_invoke_context(&program_indices, instruction_data, keyed_accounts);
let processor_account = AccountSharedData::new_ref(0, 0, &solana_sdk::native_loader::id());
program_indices.insert(0, preparation.accounts.len());
preparation.accounts.push((*loader_id, processor_account));
let mut invoke_context = ThisInvokeContext::new_mock(&preparation.accounts, &[]);
invoke_context.push(
&preparation.message,
&preparation.message.instructions[0],
&program_indices,
Some(&preparation.account_indices),
)?;
process_instruction(1, instruction_data, &mut invoke_context)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::instruction_processor::InstructionProcessor;
use serde::{Deserialize, Serialize};
use solana_sdk::{
account::{ReadableAccount, WritableAccount},
instruction::{AccountMeta, Instruction, InstructionError},
keyed_account::keyed_account_at_index,
message::Message,
native_loader,
};
#[derive(Debug, Serialize, Deserialize)]
enum MockInstruction {
NoopSuccess,
NoopFail,
ModifyOwned,
ModifyNotOwned,
ModifyReadonly,
}
#[allow(clippy::integer_arithmetic)]
fn mock_process_instruction(
first_instruction_account: usize,
data: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
let program_id = invoke_context.get_caller()?;
let keyed_accounts = invoke_context.get_keyed_accounts()?;
assert_eq!(
*program_id,
keyed_account_at_index(keyed_accounts, first_instruction_account)?.owner()?
);
assert_ne!(
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?.owner()?,
*keyed_account_at_index(keyed_accounts, first_instruction_account)?.unsigned_key()
);
if let Ok(instruction) = bincode::deserialize(data) {
match instruction {
MockInstruction::NoopSuccess => (),
MockInstruction::NoopFail => return Err(InstructionError::GenericError),
MockInstruction::ModifyOwned => {
keyed_account_at_index(keyed_accounts, first_instruction_account)?
.try_account_ref_mut()?
.data_as_mut_slice()[0] = 1
}
MockInstruction::ModifyNotOwned => {
keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?
.try_account_ref_mut()?
.data_as_mut_slice()[0] = 1
}
MockInstruction::ModifyReadonly => {
keyed_account_at_index(keyed_accounts, first_instruction_account + 2)?
.try_account_ref_mut()?
.data_as_mut_slice()[0] = 1
}
}
} else {
return Err(InstructionError::InvalidInstructionData);
}
Ok(())
}
#[test]
fn test_invoke_context() {
const MAX_DEPTH: usize = 10;
let mut invoke_stack = vec![];
let mut accounts = vec![];
let mut metas = vec![];
for i in 0..MAX_DEPTH {
invoke_stack.push(solana_sdk::pubkey::new_rand());
accounts.push((
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(AccountSharedData::new(
i as u64,
1,
&invoke_stack[i],
))),
));
metas.push(AccountMeta::new(accounts[i].0, false));
}
for program_id in invoke_stack.iter() {
accounts.push((
*program_id,
Rc::new(RefCell::new(AccountSharedData::new(
1,
1,
&solana_sdk::pubkey::Pubkey::default(),
))),
));
metas.push(AccountMeta::new(*program_id, false));
}
let account_indices = (0..accounts.len()).collect::<Vec<usize>>();
let message = Message::new(
&[Instruction::new_with_bytes(invoke_stack[0], &[0], metas)],
None,
);
let mut invoke_context = ThisInvokeContext::new_mock(&accounts, &[]);
// Check call depth increases and has a limit
let mut depth_reached = 0;
for _ in 0..invoke_stack.len() {
if Err(InstructionError::CallDepth)
== invoke_context.push(
&message,
&message.instructions[0],
&[MAX_DEPTH + depth_reached],
None,
)
{
break;
}
depth_reached += 1;
}
assert_ne!(depth_reached, 0);
assert!(depth_reached < MAX_DEPTH);
// Mock each invocation
for owned_index in (1..depth_reached).rev() {
let not_owned_index = owned_index - 1;
let metas = vec![
AccountMeta::new(accounts[not_owned_index].0, false),
AccountMeta::new(accounts[owned_index].0, false),
];
let message = Message::new(
&[Instruction::new_with_bytes(
invoke_stack[owned_index],
&[0],
metas,
)],
None,
);
let write_privileges: Vec<bool> = (0..message.account_keys.len())
.map(|i| message.is_writable(i, /*demote_program_write_locks=*/ true))
.collect();
// modify account owned by the program
accounts[owned_index].1.borrow_mut().data_as_mut_slice()[0] =
(MAX_DEPTH + owned_index) as u8;
invoke_context
.verify_and_update(
&message.instructions[0],
&account_indices[not_owned_index..owned_index + 1],
&write_privileges,
)
.unwrap();
assert_eq!(
invoke_context.pre_accounts[owned_index].data()[0],
(MAX_DEPTH + owned_index) as u8
);
// modify account not owned by the program
let data = accounts[not_owned_index].1.borrow_mut().data()[0];
accounts[not_owned_index].1.borrow_mut().data_as_mut_slice()[0] =
(MAX_DEPTH + not_owned_index) as u8;
assert_eq!(
invoke_context.verify_and_update(
&message.instructions[0],
&account_indices[not_owned_index..owned_index + 1],
&write_privileges,
),
Err(InstructionError::ExternalAccountDataModified)
);
assert_eq!(invoke_context.pre_accounts[not_owned_index].data()[0], data);
accounts[not_owned_index].1.borrow_mut().data_as_mut_slice()[0] = data;
invoke_context.pop();
}
}
#[test]
fn test_invoke_context_verify() {
let accounts = vec![(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(AccountSharedData::default())),
)];
let message = Message::new(
&[Instruction::new_with_bincode(
accounts[0].0,
&MockInstruction::NoopSuccess,
vec![AccountMeta::new_readonly(accounts[0].0, false)],
)],
None,
);
let mut invoke_context = ThisInvokeContext::new_mock(&accounts, &[]);
invoke_context
.push(&message, &message.instructions[0], &[0], None)
.unwrap();
assert!(invoke_context
.verify(&message, &message.instructions[0], &[0])
.is_ok());
let mut _borrowed = accounts[0].1.borrow();
assert_eq!(
invoke_context.verify(&message, &message.instructions[0], &[0]),
Err(InstructionError::AccountBorrowOutstanding)
);
}
#[test]
fn test_process_cross_program() {
let caller_program_id = solana_sdk::pubkey::new_rand();
let callee_program_id = solana_sdk::pubkey::new_rand();
let owned_account = AccountSharedData::new(42, 1, &callee_program_id);
let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand());
let readonly_account = AccountSharedData::new(168, 1, &solana_sdk::pubkey::new_rand());
let loader_account = AccountSharedData::new(0, 0, &native_loader::id());
let mut program_account = AccountSharedData::new(1, 0, &native_loader::id());
program_account.set_executable(true);
let accounts = vec![
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(owned_account)),
),
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(not_owned_account)),
),
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(readonly_account)),
),
(caller_program_id, Rc::new(RefCell::new(loader_account))),
(callee_program_id, Rc::new(RefCell::new(program_account))),
];
let account_indices = [0, 1, 2];
let program_indices = [3, 4];
let programs: Vec<(_, ProcessInstructionWithContext)> =
vec![(callee_program_id, mock_process_instruction)];
let metas = vec![
AccountMeta::new(accounts[0].0, false),
AccountMeta::new(accounts[1].0, false),
AccountMeta::new_readonly(accounts[2].0, false),
];
let caller_instruction =
CompiledInstruction::new(program_indices[0] as u8, &(), vec![0, 1, 2, 3, 4]);
let callee_instruction = Instruction::new_with_bincode(
callee_program_id,
&MockInstruction::NoopSuccess,
metas.clone(),
);
let message = Message::new(&[callee_instruction], None);
let mut invoke_context = ThisInvokeContext::new_mock(&accounts, programs.as_slice());
invoke_context
.push(&message, &caller_instruction, &program_indices[..1], None)
.unwrap();
// not owned account modified by the caller (before the invoke)
let demote_program_write_locks =
invoke_context.is_feature_active(&demote_program_write_locks::id());
let caller_write_privileges = message
.account_keys
.iter()
.enumerate()
.map(|(i, _)| message.is_writable(i, demote_program_write_locks))
.collect::<Vec<bool>>();
accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 1;
assert_eq!(
InstructionProcessor::process_cross_program_instruction(
&message,
&program_indices[1..],
&account_indices,
&caller_write_privileges,
&mut invoke_context,
),
Err(InstructionError::ExternalAccountDataModified)
);
accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 0;
// readonly account modified by the invoker
accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 1;
assert_eq!(
InstructionProcessor::process_cross_program_instruction(
&message,
&program_indices[1..],
&account_indices,
&caller_write_privileges,
&mut invoke_context,
),
Err(InstructionError::ReadonlyDataModified)
);
accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 0;
invoke_context.pop();
let cases = vec![
(MockInstruction::NoopSuccess, Ok(())),
(
MockInstruction::NoopFail,
Err(InstructionError::GenericError),
),
(MockInstruction::ModifyOwned, Ok(())),
(
MockInstruction::ModifyNotOwned,
Err(InstructionError::ExternalAccountDataModified),
),
];
for case in cases {
let callee_instruction =
Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone());
let message = Message::new(&[callee_instruction], None);
invoke_context
.push(&message, &caller_instruction, &program_indices[..1], None)
.unwrap();
let caller_write_privileges = message
.account_keys
.iter()
.enumerate()
.map(|(i, _)| message.is_writable(i, demote_program_write_locks))
.collect::<Vec<bool>>();
assert_eq!(
InstructionProcessor::process_cross_program_instruction(
&message,
&program_indices[1..],
&account_indices,
&caller_write_privileges,
&mut invoke_context,
),
case.1
);
invoke_context.pop();
}
}
#[test]
fn test_native_invoke() {
let caller_program_id = solana_sdk::pubkey::new_rand();
let callee_program_id = solana_sdk::pubkey::new_rand();
let owned_account = AccountSharedData::new(42, 1, &callee_program_id);
let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand());
let readonly_account = AccountSharedData::new(168, 1, &solana_sdk::pubkey::new_rand());
let loader_account = AccountSharedData::new(0, 0, &native_loader::id());
let mut program_account = AccountSharedData::new(1, 0, &native_loader::id());
program_account.set_executable(true);
let accounts = vec![
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(owned_account)),
),
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(not_owned_account)),
),
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(readonly_account)),
),
(caller_program_id, Rc::new(RefCell::new(loader_account))),
(callee_program_id, Rc::new(RefCell::new(program_account))),
];
let program_indices = [3];
let programs: Vec<(_, ProcessInstructionWithContext)> =
vec![(callee_program_id, mock_process_instruction)];
let metas = vec![
AccountMeta::new(accounts[0].0, false),
AccountMeta::new(accounts[1].0, false),
AccountMeta::new_readonly(accounts[2].0, false),
];
let caller_instruction =
CompiledInstruction::new(program_indices[0] as u8, &(), vec![0, 1, 2, 3, 4]);
let callee_instruction = Instruction::new_with_bincode(
callee_program_id,
&MockInstruction::NoopSuccess,
metas.clone(),
);
let message = Message::new(&[callee_instruction.clone()], None);
let mut invoke_context = ThisInvokeContext::new_mock(&accounts, programs.as_slice());
invoke_context
.push(&message, &caller_instruction, &program_indices, None)
.unwrap();
// not owned account modified by the invoker
accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 1;
assert_eq!(
InstructionProcessor::native_invoke(
&mut invoke_context,
callee_instruction.clone(),
&[]
),
Err(InstructionError::ExternalAccountDataModified)
);
accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 0;
// readonly account modified by the invoker
accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 1;
assert_eq!(
InstructionProcessor::native_invoke(&mut invoke_context, callee_instruction, &[]),
Err(InstructionError::ReadonlyDataModified)
);
accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 0;
invoke_context.pop();
// Other test cases
let cases = vec![
(MockInstruction::NoopSuccess, Ok(())),
(
MockInstruction::NoopFail,
Err(InstructionError::GenericError),
),
(MockInstruction::ModifyOwned, Ok(())),
(
MockInstruction::ModifyNotOwned,
Err(InstructionError::ExternalAccountDataModified),
),
(
MockInstruction::ModifyReadonly,
Err(InstructionError::ReadonlyDataModified),
),
];
for case in cases {
let callee_instruction =
Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone());
let message = Message::new(&[callee_instruction.clone()], None);
invoke_context
.push(&message, &caller_instruction, &program_indices, None)
.unwrap();
assert_eq!(
InstructionProcessor::native_invoke(&mut invoke_context, callee_instruction, &[]),
case.1
);
invoke_context.pop();
}
}
#[test]
fn test_invoke_context_compute_budget() {
let accounts = vec![
(
solana_sdk::pubkey::new_rand(),
Rc::new(RefCell::new(AccountSharedData::default())),
),
(
crate::neon_evm_program::id(),
Rc::new(RefCell::new(AccountSharedData::default())),
),
];
let noop_message = Message::new(
&[Instruction::new_with_bincode(
accounts[0].0,
&MockInstruction::NoopSuccess,
vec![AccountMeta::new_readonly(accounts[0].0, false)],
)],
None,
);
let neon_message = Message::new(
&[Instruction::new_with_bincode(
crate::neon_evm_program::id(),
&MockInstruction::NoopSuccess,
vec![AccountMeta::new_readonly(accounts[0].0, false)],
)],
None,
);
let mut feature_set = FeatureSet::all_enabled();
feature_set.deactivate(&tx_wide_compute_cap::id());
feature_set.deactivate(&requestable_heap_size::id());
let mut invoke_context = ThisInvokeContext::new_mock_with_sysvars_and_features(
&accounts,
&[],
&[],
Arc::new(feature_set),
);
invoke_context
.push(&noop_message, &noop_message.instructions[0], &[0], None)
.unwrap();
assert_eq!(
*invoke_context.get_compute_budget(),
ComputeBudget::default()
);
invoke_context.pop();
invoke_context
.push(&neon_message, &neon_message.instructions[0], &[1], None)
.unwrap();
let expected_compute_budget = ComputeBudget {
max_units: 500_000,
heap_size: Some(256_usize.saturating_mul(1024)),
..ComputeBudget::default()
};
assert_eq!(
*invoke_context.get_compute_budget(),
expected_compute_budget
);
invoke_context.pop();
invoke_context
.push(&noop_message, &noop_message.instructions[0], &[0], None)
.unwrap();
assert_eq!(
*invoke_context.get_compute_budget(),
ComputeBudget::default()
);
invoke_context.pop();
}
} | }
let account = account.borrow();
pre_account |
isFunction.min.js | define(function(){return function a(b){return typeof b==="function"&&typeof b.nodeType!=="number"}}); |
||
aggregate.rs | use crate::ext::*;
use crate::infer::InferenceTable;
use crate::solve::slg::SlgContext;
use crate::solve::slg::SlgContextOps;
use crate::solve::slg::SubstitutionExt;
use crate::solve::{Guidance, Solution};
use chalk_ir::cast::Cast;
use chalk_ir::*;
use chalk_engine::context;
use chalk_engine::SimplifiedAnswer;
use std::fmt::Debug;
/// Draws as many answers as it needs from `simplified_answers` (but
/// no more!) in order to come up with a solution.
impl<'me> context::AggregateOps<SlgContext> for SlgContextOps<'me> {
fn make_solution(
&self,
root_goal: &Canonical<InEnvironment<Goal>>,
mut simplified_answers: impl context::AnswerStream<SlgContext>,
) -> Option<Solution> {
// No answers at all?
if simplified_answers.peek_answer().is_none() {
return None;
}
let SimplifiedAnswer { subst, ambiguous } = simplified_answers.next_answer().unwrap();
// Exactly 1 unconditional answer?
if simplified_answers.peek_answer().is_none() && !ambiguous {
return Some(Solution::Unique(subst));
}
// Otherwise, we either have >1 answer, or else we have
// ambiguity. Either way, we are only going to be giving back
// **guidance**, and with guidance, the caller doesn't get
// back any region constraints. So drop them from our `subst`
// variable.
//
// FIXME-- there is actually a 3rd possibility. We could have
// >1 answer where all the answers have the same substitution,
// but different region constraints. We should collapse those
// cases into an `OR` region constraint at some point, but I
// leave that for future work. This is basically
// rust-lang/rust#21974.
let mut subst = subst.map(|cs| cs.subst);
// Extract answers and merge them into `subst`. Stop once we have
// a trivial subst (or run out of answers).
let guidance = loop {
if subst.value.is_empty() || is_trivial(&subst) {
break Guidance::Unknown;
}
if !simplified_answers
.any_future_answer(|ref mut new_subst| new_subst.may_invalidate(&subst))
{
break Guidance::Definite(subst);
}
match simplified_answers.next_answer() {
Some(answer1) => {
subst = merge_into_guidance(root_goal, subst, &answer1.subst);
}
None => {
break Guidance::Definite(subst);
}
}
};
Some(Solution::Ambig(guidance))
}
}
/// Given a current substitution used as guidance for `root_goal`, and
/// a new possible answer to `root_goal`, returns a new set of
/// guidance that encompasses both of them. This is often more general
/// than the old guidance. For example, if we had a guidance of `?0 =
/// u32` and the new answer is `?0 = i32`, then the guidance would
/// become `?0 = ?X` (where `?X` is some fresh variable).
fn merge_into_guidance(
root_goal: &Canonical<InEnvironment<Goal>>,
guidance: Canonical<Substitution>,
answer: &Canonical<ConstrainedSubst>,
) -> Canonical<Substitution> {
let mut infer = InferenceTable::new();
let Canonical {
value: ConstrainedSubst {
subst: subst1,
constraints: _,
},
binders: _,
} = answer;
// Collect the types that the two substitutions have in
// common.
let aggr_parameters: Vec<_> = guidance
.value
.parameters
.iter()
.zip(&subst1.parameters)
.enumerate()
.map(|(index, (value, value1))| {
// We have two values for some variable X that
// appears in the root goal. Find out the universe
// of X.
let universe = root_goal.binders[index].into_inner();
let ty = match &value.0 {
ParameterKind::Ty(ty) => ty,
ParameterKind::Lifetime(_) => {
// Ignore the lifetimes from the substitution: we're just
// creating guidance here anyway.
return infer.new_variable(universe).to_lifetime().cast();
}
};
let ty1 = value1.assert_ty_ref();
// Combine the two types into a new type.
let mut aggr = AntiUnifier {
infer: &mut infer,
universe,
};
aggr.aggregate_tys(&ty, ty1).cast()
})
.collect();
let aggr_subst = Substitution {
parameters: aggr_parameters,
};
infer.canonicalize(&aggr_subst).quantified
}
fn is_trivial(subst: &Canonical<Substitution>) -> bool {
// A subst is trivial if..
subst
.value
.parameters
.iter()
.enumerate()
.all(|(index, parameter)| match ¶meter.0 {
// All types are mapped to distinct variables. Since this
// has been canonicalized, those will also be the first N
// variables.
ParameterKind::Ty(t) => match t.bound() {
None => false,
Some(depth) => depth == index,
},
// And no lifetime mappings. (This is too strict, but we never
// product substs with lifetimes.)
ParameterKind::Lifetime(_) => false,
})
}
/// [Anti-unification] is the act of taking two things that do not
/// unify and finding a minimal generarlization of them. So for
/// example `Vec<u32>` anti-unified with `Vec<i32>` might be
/// `Vec<?X>`. This is a **very simplistic** anti-unifier.
///
/// [Anti-unification]: https://en.wikipedia.org/wiki/Anti-unification_(computer_science)
struct AntiUnifier<'infer> {
infer: &'infer mut InferenceTable,
universe: UniverseIndex,
}
impl<'infer> AntiUnifier<'infer> {
fn aggregate_tys(&mut self, ty0: &Ty, ty1: &Ty) -> Ty {
match (ty0, ty1) {
// If we see bound things on either side, just drop in a
// fresh variable. This means we will sometimes
// overgeneralize. So for example if we have two
// solutions that are both `(X, X)`, we just produce `(Y,
// Z)` in all cases.
(Ty::InferenceVar(_), Ty::InferenceVar(_)) => self.new_variable(),
// Ugh. Aggregating two types like `for<'a> fn(&'a u32,
// &'a u32)` and `for<'a, 'b> fn(&'a u32, &'b u32)` seems
// kinda' hard. Don't try to be smart for now, just plop a
// variable in there and be done with it.
(Ty::BoundVar(_), Ty::BoundVar(_)) | (Ty::ForAll(_), Ty::ForAll(_)) => {
self.new_variable()
}
(Ty::Apply(apply1), Ty::Apply(apply2)) => {
self.aggregate_application_tys(apply1, apply2)
}
(Ty::Projection(apply1), Ty::Projection(apply2)) => {
self.aggregate_projection_tys(apply1, apply2)
}
// Mismatched base kinds.
(Ty::InferenceVar(_), _)
| (Ty::BoundVar(_), _)
| (Ty::ForAll(_), _)
| (Ty::Apply(_), _)
| (Ty::Projection(_), _) => self.new_variable(),
}
}
fn aggregate_application_tys(&mut self, apply1: &ApplicationTy, apply2: &ApplicationTy) -> Ty {
let ApplicationTy {
name: name1,
parameters: parameters1,
} = apply1;
let ApplicationTy {
name: name2,
parameters: parameters2,
} = apply2;
self.aggregate_name_and_substs(name1, parameters1, name2, parameters2)
.map(|(&name, parameters)| Ty::Apply(ApplicationTy { name, parameters }))
.unwrap_or_else(|| self.new_variable())
}
fn aggregate_projection_tys(&mut self, proj1: &ProjectionTy, proj2: &ProjectionTy) -> Ty {
let ProjectionTy {
associated_ty_id: name1,
parameters: parameters1,
} = proj1;
let ProjectionTy {
associated_ty_id: name2,
parameters: parameters2,
} = proj2;
self.aggregate_name_and_substs(name1, parameters1, name2, parameters2)
.map(|(&associated_ty_id, parameters)| {
Ty::Projection(ProjectionTy {
associated_ty_id,
parameters,
})
})
.unwrap_or_else(|| self.new_variable())
}
fn aggregate_name_and_substs<N>(
&mut self,
name1: N,
parameters1: &[Parameter],
name2: N,
parameters2: &[Parameter],
) -> Option<(N, Vec<Parameter>)>
where
N: Copy + Eq + Debug,
{
if name1 != name2 {
return None;
}
let name = name1;
assert_eq!(
parameters1.len(),
parameters2.len(),
"does {:?} take {} parameters or {}? can't both be right",
name,
parameters1.len(),
parameters2.len()
);
let parameters: Vec<_> = parameters1
.iter()
.zip(parameters2)
.map(|(p1, p2)| self.aggregate_parameters(p1, p2))
.collect();
Some((name, parameters))
}
fn aggregate_parameters(&mut self, p1: &Parameter, p2: &Parameter) -> Parameter {
match (&p1.0, &p2.0) {
(ParameterKind::Ty(ty1), ParameterKind::Ty(ty2)) => self.aggregate_tys(ty1, ty2).cast(),
(ParameterKind::Lifetime(l1), ParameterKind::Lifetime(l2)) => {
self.aggregate_lifetimes(l1, l2).cast()
}
(ParameterKind::Ty(_), _) | (ParameterKind::Lifetime(_), _) => {
panic!("mismatched parameter kinds: p1={:?} p2={:?}", p1, p2)
}
}
}
fn aggregate_lifetimes(&mut self, l1: &Lifetime, l2: &Lifetime) -> Lifetime {
match (l1, l2) {
(Lifetime::InferenceVar(_), _) | (_, Lifetime::InferenceVar(_)) => {
self.new_lifetime_variable()
}
(Lifetime::BoundVar(_), _) | (_, Lifetime::BoundVar(_)) => self.new_lifetime_variable(),
(Lifetime::Placeholder(_), Lifetime::Placeholder(_)) => {
if l1 == l2 {
*l1
} else {
self.new_lifetime_variable()
}
}
}
}
fn new_variable(&mut self) -> Ty {
self.infer.new_variable(self.universe).to_ty()
}
fn new_lifetime_variable(&mut self) -> Lifetime {
self.infer.new_variable(self.universe).to_lifetime()
}
}
/// Test the equivalent of `Vec<i32>` vs `Vec<u32>`
#[test]
fn | () {
let mut infer = InferenceTable::new();
let mut anti_unifier = AntiUnifier {
infer: &mut infer,
universe: UniverseIndex::root(),
};
let ty = anti_unifier.aggregate_tys(
&ty!(apply (item 0) (apply (item 1))),
&ty!(apply (item 0) (apply (item 2))),
);
assert_eq!(ty!(apply (item 0) (infer 0)), ty);
}
/// Test the equivalent of `Vec<i32>` vs `Vec<i32>`
#[test]
fn vec_i32_vs_vec_i32() {
let mut infer = InferenceTable::new();
let mut anti_unifier = AntiUnifier {
infer: &mut infer,
universe: UniverseIndex::root(),
};
let ty = anti_unifier.aggregate_tys(
&ty!(apply (item 0) (apply (item 1))),
&ty!(apply (item 0) (apply (item 1))),
);
assert_eq!(ty!(apply (item 0) (apply (item 1))), ty);
}
/// Test the equivalent of `Vec<X>` vs `Vec<Y>`
#[test]
fn vec_x_vs_vec_y() {
let mut infer = InferenceTable::new();
let mut anti_unifier = AntiUnifier {
infer: &mut infer,
universe: UniverseIndex::root(),
};
// Note that the `var 0` and `var 1` in these types would be
// referring to canonicalized free variables, not variables in
// `infer`.
let ty = anti_unifier.aggregate_tys(
&ty!(apply (item 0) (infer 0)),
&ty!(apply (item 0) (infer 1)),
);
// But this `var 0` is from `infer.
assert_eq!(ty!(apply (item 0) (infer 0)), ty);
}
| vec_i32_vs_vec_u32 |
per.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u16,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u16,
}
impl super::PER {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits };
let mut w = W { bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct PERR {
bits: u16,
}
impl PERR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _PERW<'a> {
w: &'a mut W,
}
impl<'a> _PERW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u16) << OFFSET);
self.w.bits |= ((value & MASK) as u16) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
#[doc = "Bits 0:15 - Counter Period"]
#[inline]
pub fn per(&self) -> PERR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u16) as u16
};
PERR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn | (&mut self, bits: u16) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - Counter Period"]
#[inline]
pub fn per(&mut self) -> _PERW {
_PERW { w: self }
}
}
| bits |
installer.rs | extern crate dirs;
extern crate os_type;
extern crate reqwest;
extern crate serde;
extern crate serde_json;
use serde_json::Error;
use std::fs::File;
use std::io;
use std::io::Write;
use std::path::PathBuf;
const AIV_SHELF_URL: &str = "https://www.giorgiopomettini.eu/aiv_shelf.json";
#[derive(Serialize, Deserialize, Debug)]
pub struct | {
pub response: String,
pub shelf_url: String,
pub shelf_name: String,
pub icons_url: String,
pub icons_name: Vec<String>,
pub icons_extension: String,
pub icons_variants: Vec<String>,
}
#[derive(Debug, Default)]
pub struct Icon {
pub name: String,
pub data: Vec<u8>,
}
pub fn get_json_data() -> String {
match reqwest::get(AIV_SHELF_URL) {
Ok(mut request) => match request.text() {
Ok(text) => {
info!("Shelf data downloaded");
text
}
Err(error) => {
error!("Shelf data downloaded but got error: {}", error);
panic!();
}
},
Err(error) => {
error!("Error downloading shelf data: {}", error);
panic!();
}
}
}
pub fn get_shelf_data(data: &str) -> Result<Shelf, Error> {
let shelf: Shelf = serde_json::from_str(data)?;
Ok(shelf)
}
pub fn check_json(json_data: Result<Shelf, Error>) -> Shelf {
match json_data {
Ok(shelf_data) => {
if shelf_data.response == "OK" {
info!("Shelf data OK");
shelf_data
} else {
error!("Shelf data error");
panic!();
}
}
Err(error) => {
error!("Json cannot be parsed: {}", error);
panic!();
}
}
}
pub fn download_shelf_file(shelf: &Shelf) -> String {
match reqwest::get(&format!("{}{}", &shelf.shelf_url, &shelf.shelf_name)) {
Ok(mut request) => match request.text() {
Ok(text) => {
info!("Shelf downloaded");
text
}
Err(error) => {
error!("Shelf downloaded but got error: {}", error);
panic!();
}
},
Err(error) => {
error!("Error downloading shelf: {}", error);
panic!();
}
}
}
pub fn construct_icons_url(shelf: &Shelf) -> Vec<Icon> {
let mut icons: Vec<Icon> = Vec::new();
for icon in &shelf.icons_name {
for variant in &shelf.icons_variants {
let mut i: Icon = Default::default();
i.name = format!("{}{}.{}", &icon, &variant, &shelf.icons_extension);
icons.push(i);
}
}
icons
}
pub fn download_icons(shelf: &Shelf, icons: &mut Vec<Icon>) {
for icon in icons {
info!("Downloading icon {}", &icon.name);
match reqwest::get(&format!("{}{}", &shelf.icons_url, &icon.name)) {
Ok(mut request) => {
let mut buffer: Vec<u8> = vec![];
match request.copy_to(&mut buffer) {
Ok(_size) => {
icon.data = buffer;
}
Err(_error) => (),
}
}
Err(error) => {
warn!("Error downloading icon: {}", error);
}
}
}
}
pub fn set_maya_directory() -> PathBuf {
match get_maya_directory() {
Some(path) => {
info!("Found Maya directory");
path
}
None => {
error!("Maya directory not found:");
panic!();
}
}
}
pub fn get_maya_installed_versions(maya_path: &PathBuf) -> Vec<usize> {
let mut maya_versions = Vec::new();
for entry in maya_path.read_dir().unwrap() {
if let Ok(entry) = entry {
// Inefficent, needs refactor
for version in 2011..2020 {
if entry.path().ends_with(version.to_string()) {
maya_versions.push(version);
}
}
}
}
maya_versions
}
pub fn get_maya_shelf_directory(maya_path: &PathBuf, maya_version: &usize) -> Option<PathBuf> {
let mut shelf_directory = PathBuf::new();
shelf_directory.push(&maya_path);
shelf_directory.push(maya_version.to_string());
shelf_directory.push("prefs");
shelf_directory.push("shelves");
if shelf_directory.exists() {
Some(shelf_directory)
} else {
None
}
}
pub fn get_maya_icons_directory(maya_path: &PathBuf, maya_version: &usize) -> Option<PathBuf> {
let mut icons_directory = PathBuf::new();
icons_directory.push(&maya_path);
icons_directory.push(maya_version.to_string());
icons_directory.push("prefs");
icons_directory.push("icons");
if icons_directory.exists() {
Some(icons_directory)
} else {
None
}
}
pub fn get_maya_directory() -> Option<PathBuf> {
let mut maya_directory = PathBuf::new();
match dirs::home_dir() {
Some(path) => maya_directory.push(path),
None => panic!("Cannot get your HOME dir"),
}
match os_type::current_platform().os_type {
os_type::OSType::OSX => {
maya_directory.push("Library");
maya_directory.push("Preferences");
maya_directory.push("Autodesk");
maya_directory.push("maya");
}
// This will probably be Windows, or maybe not
_ => {
maya_directory.push("Documents");
maya_directory.push("maya");
}
}
info!("Maya directory: {:?}", &maya_directory);
if maya_directory.exists() {
Some(maya_directory)
} else {
None
}
}
pub fn write_file(content: &str, path: &PathBuf) -> io::Result<()> {
let mut file = File::create(path)?;
file.write_all(&content.as_bytes())?;
Ok(())
}
pub fn write_file_binary(content: &Vec<u8>, path: &PathBuf) -> io::Result<()> {
let mut file = File::create(path)?;
file.write_all(&content)?;
Ok(())
}
| Shelf |
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
shared::{get_home_path, normalized_network_name, Home, NetworkHome},
test::TestCommand,
};
use anyhow::{anyhow, Result};
use diem_types::account_address::AccountAddress;
use std::{fs, path::PathBuf};
use structopt::StructOpt;
use shuffle::{account, build, console, deploy, new, node, shared, test, transactions};
#[tokio::main]
pub async fn main() -> Result<()> {
let command = Command::from_args();
let home = Home::new(normalize_home_path(command.home_path).as_path())?;
match command.subcommand {
Subcommand::New { blockchain, path } => new::handle(&home, blockchain, path),
Subcommand::Node { genesis } => node::handle(&home, genesis),
Subcommand::Build {
project_path,
network,
address,
} => build::handle(
&shared::normalized_project_path(project_path)?,
normalized_address(
home.new_network_home(normalized_network_name(network).as_str()),
address,
)?,
),
Subcommand::Deploy {
project_path,
network,
} => {
deploy::handle(
&home.new_network_home(normalized_network_name(network.clone()).as_str()),
&shared::normalized_project_path(project_path)?,
shared::normalized_network_url(&home, network)?,
)
.await
}
Subcommand::Account { root, network } => {
account::handle(
&home,
root,
home.get_network_struct_from_toml(normalized_network_name(network).as_str())?,
)
.await
}
Subcommand::Test { cmd } => test::handle(&home, cmd).await,
Subcommand::Console {
project_path,
network,
key_path,
address,
} => console::handle(
&home,
&shared::normalized_project_path(project_path)?,
home.get_network_struct_from_toml(normalized_network_name(network.clone()).as_str())?,
&normalized_key_path(
home.new_network_home(normalized_network_name(network.clone()).as_str()),
key_path,
)?,
normalized_address(
home.new_network_home(normalized_network_name(network).as_str()),
address,
)?,
),
Subcommand::Transactions {
network,
tail,
address,
raw,
} => {
transactions::handle(
shared::normalized_network_url(&home, network.clone())?,
unwrap_nested_boolean_option(tail),
normalized_address(
home.new_network_home(normalized_network_name(network.clone()).as_str()),
address,
)?,
unwrap_nested_boolean_option(raw),
)
.await
}
}
}
#[derive(Debug, StructOpt)]
struct | {
#[structopt(long, global = true)]
home_path: Option<PathBuf>,
#[structopt(subcommand)]
subcommand: Subcommand,
}
#[derive(Debug, StructOpt)]
#[structopt(name = "shuffle", about = "CLI frontend for Shuffle toolset")]
pub enum Subcommand {
#[structopt(about = "Creates a new shuffle project for Move development")]
New {
#[structopt(short, long, default_value = new::DEFAULT_BLOCKCHAIN)]
blockchain: String,
/// Path to destination dir
#[structopt(parse(from_os_str))]
path: PathBuf,
},
#[structopt(about = "Runs a local devnet with prefunded accounts")]
Node {
#[structopt(short, long, help = "Move package directory to be used for genesis")]
genesis: Option<String>,
},
#[structopt(about = "Compiles the Move package and generates typescript files")]
Build {
#[structopt(short, long)]
project_path: Option<PathBuf>,
#[structopt(short, long)]
network: Option<String>,
#[structopt(
short,
long,
help = "Network specific address to be used for publishing with Named Address Sender"
)]
address: Option<String>,
},
#[structopt(about = "Publishes the main move package using the account as publisher")]
Deploy {
#[structopt(short, long)]
project_path: Option<PathBuf>,
#[structopt(short, long)]
network: Option<String>,
},
Account {
#[structopt(short, long, help = "Creates account from mint.key passed in by user")]
root: Option<PathBuf>,
#[structopt(short, long)]
network: Option<String>,
},
#[structopt(about = "Starts a REPL for onchain inspection")]
Console {
#[structopt(short, long)]
project_path: Option<PathBuf>,
#[structopt(short, long)]
network: Option<String>,
#[structopt(short, long, requires("address"))]
key_path: Option<PathBuf>,
#[structopt(short, long, requires("key-path"))]
address: Option<String>,
},
#[structopt(about = "Runs end to end .ts tests")]
Test {
#[structopt(subcommand)]
cmd: TestCommand,
},
#[structopt(
about = "Captures last 10 transactions and continuously polls for new transactions from the account"
)]
Transactions {
#[structopt(short, long)]
network: Option<String>,
#[structopt(
short,
long,
help = "Writes out transactions without pretty formatting"
)]
raw: Option<Option<bool>>,
#[structopt(
short,
long,
help = "Captures and polls transactions deployed from a given address"
)]
address: Option<String>,
#[structopt(short, help = "Blocks and streams future transactions as they happen")]
tail: Option<Option<bool>>,
},
}
fn normalized_address(
network_home: NetworkHome,
account_address: Option<String>,
) -> Result<AccountAddress> {
let normalized_string = match account_address {
Some(input_address) => {
if &input_address[0..2] != "0x" {
"0x".to_owned() + &input_address
} else {
input_address
}
}
None => get_latest_address(&network_home)?,
};
Ok(AccountAddress::from_hex_literal(
normalized_string.as_str(),
)?)
}
fn get_latest_address(network_home: &NetworkHome) -> Result<String> {
network_home.check_latest_account_address_path_exists()?;
Ok(AccountAddress::from_hex(fs::read_to_string(
network_home.get_latest_account_address_path(),
)?)?
.to_hex_literal())
}
fn normalized_key_path(
network_home: NetworkHome,
diem_root_key_path: Option<PathBuf>,
) -> Result<PathBuf> {
match diem_root_key_path {
Some(key_path) => Ok(key_path),
None => {
if !network_home.get_accounts_path().is_dir() {
return Err(anyhow!(
"An account hasn't been created yet! Run shuffle account first"
));
}
Ok(network_home.get_latest_account_key_path())
}
}
}
fn unwrap_nested_boolean_option(option: Option<Option<bool>>) -> bool {
match option {
Some(Some(val)) => val,
Some(_val) => true,
None => false,
}
}
fn normalize_home_path(home_path: Option<PathBuf>) -> PathBuf {
match home_path {
Some(path) => path,
None => get_home_path(),
}
}
| Command |
redis_scheduler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Created by [email protected]
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Scheduler with redis backend
---------------------------------
"""
import os
import time
import socket
import datetime
import logging
from json import loads, dumps
from gluon.utils import web2py_uuid
from gluon.storage import Storage
from gluon.scheduler import *
from gluon.scheduler import _decode_dict
from gluon.contrib.redis_utils import RWatchError
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.contrib.redis_utils import RConn
from gluon.contrib.redis_scheduler import RScheduler
def demo1(*args,**vars):
print('you passed args=%s and vars=%s' % (args, vars))
return 'done!'
def demo2():
1/0
rconn = RConn()
mysched = RScheduler(db, dict(demo1=demo1,demo2=demo2), ...., redis_conn=rconn)
## run worker nodes with:
cd web2py
python web2py.py -K app
"""
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
POLLING = 'POLLING'
class RScheduler(Scheduler):
def __init__(self, db, tasks=None, migrate=True,
worker_name=None, group_names=None, heartbeat=HEARTBEAT,
max_empty_runs=0, discard_results=False, utc_time=False,
redis_conn=None, mode=1):
"""
Highly-experimental coordination with redis
Takes all args from Scheduler except redis_conn which
must be something closer to a StrictRedis instance.
My only regret - and the reason why I kept this under the hood for a
while - is that it's hard to hook up in web2py to something happening
right after the commit to a table, which will enable this version of the
scheduler to process "immediate" tasks right away instead of waiting a
few seconds (see FIXME in queue_task())
mode is reserved for future usage patterns.
Right now it moves the coordination (which is the most intensive
routine in the scheduler in matters of IPC) of workers to redis.
I'd like to have incrementally redis-backed modes of operations,
such as e.g.:
- 1: IPC through redis (which is the current implementation)
- 2: Store task results in redis (which will relieve further pressure
from the db leaving the scheduler_run table empty and possibly
keep things smooth as tasks results can be set to expire
after a bit of time)
- 3: Move all the logic for storing and queueing tasks to redis
itself - which means no scheduler_task usage too - and use
the database only as an historical record-bookkeeping
(e.g. for reporting)
As usual, I'm eager to see your comments.
"""
Scheduler.__init__(self, db, tasks=tasks, migrate=migrate,
worker_name=worker_name, group_names=group_names,
heartbeat=heartbeat, max_empty_runs=max_empty_runs,
discard_results=discard_results, utc_time=utc_time)
self.r_server = redis_conn
from gluon import current
self._application = current.request.application or 'appname'
def _nkey(self, key):
"""Helper to restrict all keys to a namespace and track them."""
prefix = 'w2p:rsched:%s' % self._application
allkeys = '%s:allkeys' % prefix
newkey = "%s:%s" % (prefix, key)
self.r_server.sadd(allkeys, newkey)
return newkey
def prune_all(self):
"""Global housekeeping."""
all_keys = self._nkey('allkeys')
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('PRUNE_ALL')
while True:
k = pipe.spop(all_keys)
if k is None:
break
pipe.delete(k)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def dt2str(self, value):
return value.strftime('%Y-%m-%d %H:%M:%S')
def str2date(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
def send_heartbeat(self, counter):
"""
Workers coordination in redis.
It has evolved into something is not that easy.
Here we try to do what we need in a single transaction,
and retry that transaction if something goes wrong
"""
with self.r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SEND_HEARTBEAT')
self.inner_send_heartbeat(counter, pipe)
pipe.execute()
self.adj_hibernation()
self.sleep()
break
except RWatchError:
time.sleep(0.1)
continue
def inner_send_heartbeat(self, counter, pipe):
"""
Do a few things in the "maintenance" thread.
Specifically:
- registers the workers
- accepts commands sent to workers (KILL, TERMINATE, PICK, DISABLED, etc)
- adjusts sleep
- saves stats
- elects master
- does "housecleaning" for dead workers
- triggers tasks assignment
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
status_key = self._nkey('worker_status:%s' % (self.worker_name))
now = self.now()
mybackedstatus = r_server.hgetall(status_key)
if not mybackedstatus:
r_server.hmset(
status_key,
dict(
status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=self.dt2str(now),
last_heartbeat=self.dt2str(now),
group_names=dumps(self.group_names), is_ticker=False,
worker_stats=dumps(self.w_stats))
)
r_server.sadd(status_keyset, status_key)
if not self.w_stats.status == POLLING:
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus['status']
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
r_server.hmset(
status_key,
dict(last_heartbeat=self.dt2str(now),
worker_stats=dumps(self.w_stats))
)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.info('........recording heartbeat (%s)',
self.w_stats.status)
r_server.hmset(
status_key,
dict(
last_heartbeat=self.dt2str(now), status=ACTIVE,
worker_stats=dumps(self.w_stats)
)
)
# newroutine
r_server.expire(status_key, self.heartbeat * 3 * 15)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status not in (RUNNING, POLLING):
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
logger.info(
' freeing workers that have not sent heartbeat')
registered_workers = r_server.smembers(status_keyset)
allkeys = self._nkey('allkeys')
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
r_server.srem(status_keyset, worker)
logger.info('removing %s from %s', worker, allkeys)
r_server.srem(allkeys, worker)
continue
try:
self.is_a_ticker = self.being_a_ticker(pipe)
except:
pass
if self.w_stats.status in (ACTIVE, POLLING):
self.do_assign_tasks = True
if self.is_a_ticker and self.do_assign_tasks:
# I'm a ticker, and 5 loops passed without reassigning tasks,
# let's do that and loop again
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
db = self.db_thread
self.wrapped_assign_tasks(db)
return None
except:
logger.error('Error assigning tasks')
def being_a_ticker(self, pipe):
|
def assign_tasks(self, db):
"""
The real beauty.
We don't need to ASSIGN tasks, we just put
them into the relevant queue
"""
st, sd = db.scheduler_task, db.scheduler_task_deps
r_server = self.r_server
now = self.now()
status_keyset = self._nkey('worker_statuses')
with r_server.pipeline() as pipe:
while True:
try:
# making sure we're the only one doing the job
pipe.watch('ASSIGN_TASKS')
registered_workers = pipe.smembers(status_keyset)
all_workers = []
for worker in registered_workers:
w = pipe.hgetall(worker)
if w['status'] == ACTIVE:
all_workers.append(Storage(w))
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
# build workers as dict of groups
wkgroups = {}
for w in all_workers:
group_names = loads(w.group_names)
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
# set queued tasks that expired between "runs" (i.e., you turned off
# the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
# calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
# let's freeze it up
db.commit()
x = 0
r_server = self.r_server
for group in wkgroups.keys():
queued_list = self._nkey('queued:%s' % group)
queued_set = self._nkey('queued_set:%s' % group)
# if are running, let's don't assign them again
running_list = self._nkey('running:%s' % group)
while True:
# the joys for rpoplpush!
t = r_server.rpoplpush(running_list, queued_list)
if not t:
# no more
break
r_server.sadd(queued_set, t)
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
# put tasks in the processing list
for task in tasks:
x += 1
gname = task.group_name
if r_server.sismember(queued_set, task.id):
# already queued, we don't put on the list
continue
r_server.sadd(queued_set, task.id)
r_server.lpush(queued_list, task.id)
d = dict(status=QUEUED)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
db.commit()
# I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
# I'll be greedy only if tasks queued are equal to the limit
# (meaning there could be others ready to be queued)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def pop_task(self, db):
"""Lift a task off a queue."""
r_server = self.r_server
st = self.db.scheduler_task
task = None
# ready to process something
for group in self.group_names:
queued_set = self._nkey('queued_set:%s' % group)
queued_list = self._nkey('queued:%s' % group)
running_list = self._nkey('running:%s' % group)
running_dict = self._nkey('running_dict:%s' % group)
self.w_stats.status = POLLING
# polling for 1 minute in total. If more groups are in,
# polling is 1 minute in total
logger.debug(' polling on %s', group)
task_id = r_server.brpoplpush(queued_list, running_list,
timeout=60 / len(self.group_names))
logger.debug(' finished polling')
self.w_stats.status = ACTIVE
if task_id:
r_server.hset(running_dict, task_id, self.worker_name)
r_server.srem(queued_set, task_id)
task = db(
(st.id == task_id) &
(st.status == QUEUED)
).select().first()
if not task:
r_server.lrem(running_list, 0, task_id)
r_server.hdel(running_dict, task_id)
r_server.lrem(queued_list, 0, task_id)
logger.error("we received a task that isn't there (%s)",
task_id)
return None
break
now = self.now()
if task:
task.update_record(status=RUNNING, last_run_time=now)
# noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
# calc next_run_time based on available slots
# see #1191
next_run_time = task.start_time
secondspassed = self.total_seconds(now - next_run_time)
steps = secondspassed // task.period + 1
next_run_time += datetime.timedelta(seconds=task.period * steps)
if times_run < task.repeats or task.repeats == 0:
# need to run (repeating task)
run_again = True
else:
# no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid,
group_name=task.group_name)
def report_task(self, task, task_report):
"""
Override.
Needs it only because we need to pop from the
running tasks
"""
r_server = self.r_server
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
# result is 'null' as a string if task completed
# if it's stopped it's None as NoneType, so we record
# the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
# if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time and
task.next_run_time > task.stop_time and
True or False)
status = (task.run_again and is_expired and EXPIRED or
task.run_again and not is_expired and
QUEUED or COMPLETED)
if task_report.status == COMPLETED:
# assigned calculations
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0,
assigned_worker_name=self.worker_name
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'FAILED'}[task_report.status]
status = (task.retry_failed and
task.times_failed < task.retry_failed and
QUEUED or task.retry_failed == -1 and
QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=st.times_failed + 1,
next_run_time=task.next_run_time,
status=status,
assigned_worker_name=self.worker_name
)
logger.info('task completed (%s)', task_report.status)
running_list = self._nkey('running:%s' % task.group_name)
running_dict = self._nkey('running_dict:%s' % task.group_name)
r_server.lrem(running_list, 0, task.task_id)
r_server.hdel(running_dict, task.task_id)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions.
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
# this is here to "interrupt" any blrpoplpush op easily
except KeyboardInterrupt:
self.give_up()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def get_workers(self, only_ticker=False):
"""Return a dict holding worker_name : {**columns}
representing all "registered" workers.
only_ticker returns only the worker running as a TICKER,
if there is any
"""
r_server = self.r_server
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
all_workers = {}
for worker in registered_workers:
w = r_server.hgetall(worker)
w = Storage(w)
if not w:
continue
all_workers[w.worker_name] = Storage(
status=w.status,
first_heartbeat=self.str2date(w.first_heartbeat),
last_heartbeat=self.str2date(w.last_heartbeat),
group_names=loads(w.group_names, object_hook=_decode_dict),
is_ticker=w.is_ticker == 'True' and True or False,
worker_stats=loads(w.worker_stats, object_hook=_decode_dict)
)
if only_ticker:
for k, v in all_workers.iteritems():
if v['is_ticker']:
return {k: v}
return {}
return all_workers
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status"""
r_server = self.r_server
all_workers = self.get_workers()
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
exclusion = exclude and exclude.append(action) or [action]
workers = []
if worker_name is not None:
if worker_name in all_workers.keys():
workers = [worker_name]
else:
for k, v in all_workers.iteritems():
if v.status not in exclusion and set(group_names) & set(v.group_names):
workers.append(k)
if limit and worker_name is None:
workers = workers[:limit]
if workers:
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
for w in workers:
worker_key = self._nkey('worker_status:%s' % w)
pipe.hset(worker_key, 'status', action)
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
FIXME: immediate should put item in queue. The hard part is
that currently there are no hooks happening at post-commit time
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they should
be jsonified already, and they will override pargs and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
r_server = self.r_server
ticker = self.get_workers(only_ticker=True)
if ticker.keys():
ticker = ticker.keys()[0]
with r_server.pipeline() as pipe:
while True:
try:
pipe.watch('SET_WORKER_STATUS')
worker_key = self._nkey('worker_status:%s' % ticker)
pipe.hset(worker_key, 'status', 'PICK')
pipe.execute()
break
except RWatchError:
time.sleep(0.1)
continue
else:
rtn.uuid = None
return rtn
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
r_server = self.r_server
st = self.db.scheduler_task
if isinstance(ref, int):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.group_name)
task = task.first()
rtn = None
if not task:
return rtn
running_dict = self._nkey('running_dict:%s' % task.group_name)
if task.status == 'RUNNING':
worker_key = r_server.hget(running_dict, task.id)
worker_key = self._nkey('worker_status:%s' % (worker_key))
r_server.hset(worker_key, 'status', STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
| """
Elects a ticker.
This is slightly more convoluted than the original
but if far more efficient
"""
r_server = pipe
status_keyset = self._nkey('worker_statuses')
registered_workers = r_server.smembers(status_keyset)
ticker = None
all_active = []
all_workers = []
for worker in registered_workers:
w = r_server.hgetall(worker)
if w['worker_name'] != self.worker_name and w['status'] == ACTIVE:
all_active.append(w)
if w['is_ticker'] == 'True' and ticker is None:
ticker = w
all_workers.append(w)
not_busy = self.w_stats.status in (ACTIVE, POLLING)
if not ticker:
if not_busy:
# only if this worker isn't busy, otherwise wait for a free one
for worker in all_workers:
key = self._nkey('worker_status:%s' % worker['worker_name'])
if worker['worker_name'] == self.worker_name:
r_server.hset(key, 'is_ticker', True)
else:
r_server.hset(key, 'is_ticker', False)
logger.info("TICKER: I'm a ticker")
else:
# giving up, only if I'm not alone
if len(all_active) > 1:
key = self._nkey('worker_status:%s' % (self.worker_name))
r_server.hset(key, 'is_ticker', False)
else:
not_busy = True
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker['worker_name'])
return False |
index.ts | import { User } from 'types/User';
import { HOC, branch, renderNothing } from 'recompose';
function | (user: User) {
const fullName = `${user.name || ''} ${user.surname || ''}`.trim();
const name = fullName || user.email || user.phoneNumber || '';
return name;
}
const hideIfNoData = (hasNoData: HOC) => branch(hasNoData, renderNothing);
export { getUserName, hideIfNoData };
| getUserName |
AuthContext.tsx | import { ReactNode, useEffect, useState } from "react";
import { createContext } from "react";
import { auth, firebase } from "../services/firebase";
type User = {
id: string;
name: string;
avatar: string;
};
type AuthContextType = {
user: User | undefined;
signInWithGoogle: () => Promise<void>;
};
type AuthContextProviderProps = {
children: ReactNode;
};
export const AuthContext = createContext({} as AuthContextType);
export function AuthContextProvider(props: AuthContextProviderProps) {
const [user, setUser] = useState<User>();
useEffect(() => {
// auth.signOut()
const unsubscribe = auth.onAuthStateChanged((user) => {
if (user) {
const { displayName, photoURL, uid } = user;
if (!displayName || !photoURL) {
throw new Error("Missing information from Google Account");
}
setUser({
id: uid,
name: displayName,
avatar: photoURL,
});
}
});
return () => unsubscribe();
}, []);
async function signInWithGoogle() {
const provider = new firebase.auth.GoogleAuthProvider(); | if (result.user) {
const { displayName, photoURL, uid } = result.user;
if (!displayName || !photoURL) {
throw new Error("Missing information from Google Account");
}
setUser({
id: uid,
name: displayName,
avatar: photoURL,
});
}
}
return (
<AuthContext.Provider value={{ user, signInWithGoogle }}>
{props.children}
</AuthContext.Provider>
);
} |
const result = await auth.signInWithPopup(provider);
|
config.py | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
class CfgParser(object):
CONF_DEFAULT_PATH = '/etc/contrail/contrail-topology.conf'
def __init__(self, argv):
|
def parse(self):
'''
command line example
contrail-topology [-h] [-c FILE]
[--analytics_api ANALYTICS_API [ANALYTICS_API ...]]
[--collectors COLLECTORS [COLLECTORS ...]]
[--log_file LOG_FILE] [--log_local]
[--log_category LOG_CATEGORY] [--log_level LOG_LEVEL]
[--use_syslog] [--syslog_facility SYSLOG_FACILITY]
[--scan_frequency SCAN_FREQUENCY]
[--http_server_port HTTP_SERVER_PORT]
optional arguments:
-h, --help show this help message and exit
-c FILE, --conf_file FILE
Specify config file
--analytics_api ANALYTICS_API [ANALYTICS_API ...]
List of analytics-api IP addresses in ip:port format
--collectors COLLECTORS [COLLECTORS ...]
List of Collector IP addresses in ip:port format
--log_file LOG_FILE Filename for the logs to be written to
--log_local Enable local logging of sandesh messages
--log_category LOG_CATEGORY
Category filter for local logging of sandesh messages
--log_level LOG_LEVEL
Severity level for local logging of sandesh messages
--use_syslog Use syslog for logging
--syslog_facility SYSLOG_FACILITY
Syslog facility to receive log lines
--scan_frequency SCAN_FREQUENCY
Time between snmp poll
--http_server_port HTTP_SERVER_PORT
introspect server port
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
kwargs = {'help': "Specify config file", 'metavar':"FILE",
'action':'append'
}
if os.path.exists(self.CONF_DEFAULT_PATH):
kwargs['default'] = [self.CONF_DEFAULT_PATH]
conf_parser.add_argument("-c", "--conf_file", **kwargs)
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'collectors' : ['127.0.0.1:8086'],
'analytics_api' : ['127.0.0.1:8081'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'scan_frequency' : 60,
'http_server_port': 5921,
'zookeeper' : '127.0.0.1:2181',
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items("DEFAULTS")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(**defaults)
parser.add_argument("--analytics_api",
help="List of analytics-api IP addresses in ip:port format",
nargs="+")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--scan_frequency", type=int,
help="Time between snmp poll")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--zookeeper",
help="ip:port of zookeeper server")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.analytics_api) is str:
self._args.analytics_api = self._args.analytics_api.split()
self._args.config_sections = config
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def collectors(self):
return self._args.collectors
def zookeeper_server(self):
return self._args.zookeeper
def analytics_api(self):
return self._args.analytics_api
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def frequency(self):
return self._args.scan_frequency
def http_port(self):
return self._args.http_server_port
| self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:]) |
__init__.py | """Output formatters."""
import os
from pathlib import Path
from typing import TYPE_CHECKING, Generic, TypeVar, Union
import rich
if TYPE_CHECKING:
from ansiblelint.errors import MatchError
T = TypeVar('T', bound='BaseFormatter')
class BaseFormatter(Generic[T]):
"""Formatter of ansible-lint output.
Base class for output formatters.
Args:
base_dir (str|Path): reference directory against which display relative path.
display_relative_path (bool): whether to show path as relative or absolute
"""
def __init__(self, base_dir: Union[str, Path], display_relative_path: bool) -> None:
"""Initialize a BaseFormatter instance."""
if isinstance(base_dir, str):
base_dir = Path(base_dir)
if base_dir: # can be None
base_dir = base_dir.absolute()
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(base_dir, Path):
base_dir = str(base_dir) # Drop when Python 3.5 is no longer supported
self._base_dir = base_dir if display_relative_path else None
def _format_path(self, path: Union[str, Path]) -> str:
# Required 'cause os.path.relpath() does not accept Path before 3.6
if isinstance(path, Path):
path = str(path) # Drop when Python 3.5 is no longer supported
if not self._base_dir:
return path
# Use os.path.relpath 'cause Path.relative_to() misbehaves
return os.path.relpath(path, start=self._base_dir)
def format(self, match: "MatchError") -> str:
return str(match)
def escape(self, text: str) -> str:
"""Escapes a string to avoid processing it as markup."""
return rich.markup.escape(text)
class Formatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
_id = getattr(match.rule, 'id', '000')
result = (
f"[error_code]{_id}[/][dim]:[/] [error_title]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
result += (
"\n"
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
if match.details:
result += f" [dim]{match.details}[/]"
result += "\n"
return result
class QuietFormatter(BaseFormatter):
def format(self, match: "MatchError") -> str:
return (
f"[error_code]{match.rule.id}[/] "
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}")
class ParseableFormatter(BaseFormatter):
"""Parseable uses PEP8 compatible format."""
def format(self, match: "MatchError") -> str:
result = (
f"[filename]{self._format_path(match.filename or '')}[/]:{match.position}: "
f"[error_code]E{match.rule.id}[/] [dim]{self.escape(match.message)}[/]")
if match.tag:
result += f" [dim][error_code]({match.tag})[/][/]"
return result
class AnnotationsFormatter(BaseFormatter):
# https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-warning-message
"""Formatter for emitting violations as GitHub Workflow Commands.
These commands trigger the GHA Workflow runners platform to post violations
in a form of GitHub Checks API annotations that appear rendered in pull-
request files view.
::debug file={name},line={line},col={col},severity={severity}::{message}
::warning file={name},line={line},col={col},severity={severity}::{message}
::error file={name},line={line},col={col},severity={severity}::{message}
Supported levels: debug, warning, error
"""
def format(self, match: "MatchError") -> str:
"""Prepare a match instance for reporting as a GitHub Actions annotation."""
level = self._severity_to_level(match.rule.severity)
file_path = self._format_path(match.filename or "")
line_num = match.linenumber
rule_id = match.rule.id
severity = match.rule.severity
violation_details = self.escape(match.message)
if match.column:
col = f",col={match.column}"
else:
col = ""
return (
f"::{level} file={file_path},line={line_num}{col},severity={severity}"
f"::E{rule_id} {violation_details}"
)
@staticmethod
def _severity_to_level(severity: str) -> str:
if severity in ['VERY_LOW', 'LOW']:
return 'warning'
if severity in ['INFO']:
return 'debug'
# ['MEDIUM', 'HIGH', 'VERY_HIGH'] or anything else
return 'error'
class | (BaseFormatter):
def format(self, match: "MatchError") -> str:
filename = self._format_path(match.filename or "")
position = match.position
rule_id = u"E{0}".format(match.rule.id)
severity = match.rule.severity
message = self.escape(str(match.message))
return (
f"[filename]{filename}[/]:{position}: [[error_code]{rule_id}[/]] "
f"[[error_code]{severity}[/]] [dim]{message}[/]")
| ParseableSeverityFormatter |
_base.js | //>>built
define("dojox/encoding/digests/_base",["dojo/_base/lang"],function(e){e=e.getObject("dojox.encoding.digests",!0);e.outputTypes={Base64:0,Hex:1,String:2,Raw:3};e.addWords=function(d,c){var a=(d&65535)+(c&65535);return(d>>16)+(c>>16)+(a>>16)<<16|a&65535};e.stringToWord=function(d){for(var c=[],a=0,b=8*d.length;a<b;a+=8)c[a>>5]|=(d.charCodeAt(a/8)&255)<<a%32;return c};e.wordToString=function(d){for(var c=[],a=0,b=32*d.length;a<b;a+=8)c.push(String.fromCharCode(d[a>>5]>>>a%32&255));return c.join("")};
e.wordToHex=function(d){for(var c=[],a=0,b=4*d.length;a<b;a++)c.push("0123456789abcdef".charAt(d[a>>2]>>8*(a%4)+4&15)+"0123456789abcdef".charAt(d[a>>2]>>8*(a%4)&15));return c.join("")};e.wordToBase64=function(d){for(var c=[],a=0,b=4*d.length;a<b;a+=3)for(var e=(d[a>>2]>>8*(a%4)&255)<<16|(d[a+1>>2]>>8*((a+1)%4)&255)<<8|d[a+2>>2]>>8*((a+2)%4)&255,f=0;4>f;f++)8*a+6*f>32*d.length?c.push("\x3d"):c.push("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(e>>6*(3-f)&63));return c.join("")};
e.stringToUtf8=function(d){for(var c="",a=-1,b,e;++a<d.length;)b=d.charCodeAt(a),e=a+1<d.length?d.charCodeAt(a+1):0,55296<=b&&(56319>=b&&56320<=e&&57343>=e)&&(b=65536+((b&1023)<<10)+(e&1023),a++),127>=b?c+=String.fromCharCode(b):2047>=b?c+=String.fromCharCode(192|b>>>6&31,128|b&63):65535>=b?c+=String.fromCharCode(224|b>>>12&15,128|b>>>6&63,128|b&63):2097151>=b&&(c+=String.fromCharCode(240|b>>>18&7,128|b>>>12&63,128|b>>>6&63,128|b&63));return c};return e}); | //# sourceMappingURL=_base.js.map | |
allow_std.rs | use futures_core::task::{Waker, Poll};
use futures_io::{AsyncRead, AsyncWrite};
use std::{fmt, io};
use std::pin::Pin;
use std::string::String;
use std::vec::Vec;
/// A simple wrapper type which allows types which implement only
/// implement `std::io::Read` or `std::io::Write`
/// to be used in contexts which expect an `AsyncRead` or `AsyncWrite`.
///
/// If these types issue an error with the kind `io::ErrorKind::WouldBlock`,
/// it is expected that they will notify the current task on readiness.
/// Synchronous `std` types should not issue errors of this kind and
/// are safe to use in this context. However, using these types with
/// `AllowStdIo` will cause the event loop to block, so they should be used
/// with care.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct AllowStdIo<T>(T);
impl<T> Unpin for AllowStdIo<T> {}
macro_rules! try_with_interrupt {
($e:expr) => {
loop {
match $e {
Ok(e) => {
break e;
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::Interrupted => {
continue;
}
Err(e) => {
return Poll::Ready(Err(e));
}
}
}
}
}
impl<T> AllowStdIo<T> {
/// Creates a new `AllowStdIo` from an existing IO object.
pub fn new(io: T) -> Self {
AllowStdIo(io)
}
/// Returns a reference to the contained IO object.
pub fn get_ref(&self) -> &T {
&self.0
}
/// Returns a mutable reference to the contained IO object.
pub fn get_mut(&mut self) -> &mut T {
&mut self.0
}
/// Consumes self and returns the contained IO object.
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> io::Write for AllowStdIo<T> where T: io::Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
self.0.write_all(buf)
}
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> io::Result<()> {
self.0.write_fmt(fmt)
}
}
impl<T> AsyncWrite for AllowStdIo<T> where T: io::Write {
fn poll_write(mut self: Pin<&mut Self>, _: &Waker, buf: &[u8])
-> Poll<io::Result<usize>>
{
Poll::Ready(Ok(try_with_interrupt!(self.0.write(buf))))
}
fn poll_flush(mut self: Pin<&mut Self>, _: &Waker) -> Poll<io::Result<()>> {
try_with_interrupt!(self.0.flush());
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, waker: &Waker) -> Poll<io::Result<()>> {
self.poll_flush(waker)
}
}
impl<T> io::Read for AllowStdIo<T> where T: io::Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
// TODO: implement the `initializer` fn when it stabilizes.
// See rust-lang/rust #42788
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.0.read_to_end(buf)
}
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
self.0.read_to_string(buf)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.0.read_exact(buf)
} | -> Poll<io::Result<usize>>
{
Poll::Ready(Ok(try_with_interrupt!(self.0.read(buf))))
}
} | }
impl<T> AsyncRead for AllowStdIo<T> where T: io::Read {
fn poll_read(mut self: Pin<&mut Self>, _: &Waker, buf: &mut [u8]) |
builtin_attrs.rs | //! Built-in attributes and `cfg` flag gating.
use AttributeGate::*;
use AttributeType::*;
use crate::{Features, Stability};
use rustc_data_structures::fx::FxHashMap;
use rustc_span::symbol::{sym, Symbol};
use std::lazy::SyncLazy;
type GateFn = fn(&Features) -> bool;
macro_rules! cfg_fn {
($field: ident) => {
(|features| features.$field) as GateFn
};
}
pub type GatedCfg = (Symbol, Symbol, GateFn);
/// `cfg(...)`'s that are feature gated.
const GATED_CFGS: &[GatedCfg] = &[
// (name in cfg, feature, function to check if the feature is enabled)
(sym::target_abi, sym::cfg_target_abi, cfg_fn!(cfg_target_abi)),
(sym::target_thread_local, sym::cfg_target_thread_local, cfg_fn!(cfg_target_thread_local)),
(sym::target_has_atomic, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
(sym::target_has_atomic_load_store, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
(
sym::target_has_atomic_equal_alignment,
sym::cfg_target_has_atomic,
cfg_fn!(cfg_target_has_atomic),
),
(sym::sanitize, sym::cfg_sanitize, cfg_fn!(cfg_sanitize)),
(sym::version, sym::cfg_version, cfg_fn!(cfg_version)),
(sym::panic, sym::cfg_panic, cfg_fn!(cfg_panic)),
];
/// Find a gated cfg determined by the `pred`icate which is given the cfg's name.
pub fn find_gated_cfg(pred: impl Fn(Symbol) -> bool) -> Option<&'static GatedCfg> {
GATED_CFGS.iter().find(|(cfg_sym, ..)| pred(*cfg_sym))
}
// If you change this, please modify `src/doc/unstable-book` as well. You must
// move that documentation into the relevant place in the other docs, and
// remove the chapter on the flag.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum AttributeType {
/// Normal, builtin attribute that is consumed
/// by the compiler before the unused_attribute check
Normal,
/// Builtin attribute that is only allowed at the crate level
CrateLevel,
}
#[derive(Clone, Copy)]
pub enum AttributeGate {
/// Is gated by a given feature gate, reason
/// and function to check if enabled
Gated(Stability, Symbol, &'static str, fn(&Features) -> bool),
/// Ungated attribute, can be used on all release channels
Ungated,
}
// fn() is not Debug
impl std::fmt::Debug for AttributeGate {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Self::Gated(ref stab, name, expl, _) => {
write!(fmt, "Gated({:?}, {}, {})", stab, name, expl)
}
Self::Ungated => write!(fmt, "Ungated"),
}
}
}
impl AttributeGate {
fn is_deprecated(&self) -> bool {
matches!(*self, Self::Gated(Stability::Deprecated(_, _), ..))
}
}
/// A template that the attribute input must match.
/// Only top-level shape (`#[attr]` vs `#[attr(...)]` vs `#[attr = ...]`) is considered now.
#[derive(Clone, Copy, Default)]
pub struct | {
pub word: bool,
pub list: Option<&'static str>,
pub name_value_str: Option<&'static str>,
}
/// A convenience macro for constructing attribute templates.
/// E.g., `template!(Word, List: "description")` means that the attribute
/// supports forms `#[attr]` and `#[attr(description)]`.
macro_rules! template {
(Word) => { template!(@ true, None, None) };
(List: $descr: expr) => { template!(@ false, Some($descr), None) };
(NameValueStr: $descr: expr) => { template!(@ false, None, Some($descr)) };
(Word, List: $descr: expr) => { template!(@ true, Some($descr), None) };
(Word, NameValueStr: $descr: expr) => { template!(@ true, None, Some($descr)) };
(List: $descr1: expr, NameValueStr: $descr2: expr) => {
template!(@ false, Some($descr1), Some($descr2))
};
(Word, List: $descr1: expr, NameValueStr: $descr2: expr) => {
template!(@ true, Some($descr1), Some($descr2))
};
(@ $word: expr, $list: expr, $name_value_str: expr) => { AttributeTemplate {
word: $word, list: $list, name_value_str: $name_value_str
} };
}
macro_rules! ungated {
($attr:ident, $typ:expr, $tpl:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Ungated)
};
}
macro_rules! gated {
($attr:ident, $typ:expr, $tpl:expr, $gate:ident, $msg:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Gated(Stability::Unstable, sym::$gate, $msg, cfg_fn!($gate)))
};
($attr:ident, $typ:expr, $tpl:expr, $msg:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Gated(Stability::Unstable, sym::$attr, $msg, cfg_fn!($attr)))
};
}
macro_rules! rustc_attr {
(TEST, $attr:ident, $typ:expr, $tpl:expr $(,)?) => {
rustc_attr!(
$attr,
$typ,
$tpl,
concat!(
"the `#[",
stringify!($attr),
"]` attribute is just used for rustc unit tests \
and will never be stable",
),
)
};
($attr:ident, $typ:expr, $tpl:expr, $msg:expr $(,)?) => {
(
sym::$attr,
$typ,
$tpl,
Gated(Stability::Unstable, sym::rustc_attrs, $msg, cfg_fn!(rustc_attrs)),
)
};
}
macro_rules! experimental {
($attr:ident) => {
concat!("the `#[", stringify!($attr), "]` attribute is an experimental feature")
};
}
const IMPL_DETAIL: &str = "internal implementation detail";
const INTERNAL_UNSTABLE: &str = "this is an internal attribute that will never be stable";
pub type BuiltinAttribute = (Symbol, AttributeType, AttributeTemplate, AttributeGate);
/// Attributes that have a special meaning to rustc or rustdoc.
#[rustfmt::skip]
pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// ==========================================================================
// Stable attributes:
// ==========================================================================
// Conditional compilation:
ungated!(cfg, Normal, template!(List: "predicate")),
ungated!(cfg_attr, Normal, template!(List: "predicate, attr1, attr2, ...")),
// Testing:
ungated!(ignore, Normal, template!(Word, NameValueStr: "reason")),
ungated!(
should_panic, Normal,
template!(Word, List: r#"expected = "reason"#, NameValueStr: "reason"),
),
// FIXME(Centril): This can be used on stable but shouldn't.
ungated!(reexport_test_harness_main, CrateLevel, template!(NameValueStr: "name")),
// Macros:
ungated!(automatically_derived, Normal, template!(Word)),
// FIXME(#14407)
ungated!(macro_use, Normal, template!(Word, List: "name1, name2, ...")),
ungated!(macro_escape, Normal, template!(Word)), // Deprecated synonym for `macro_use`.
ungated!(macro_export, Normal, template!(Word, List: "local_inner_macros")),
ungated!(proc_macro, Normal, template!(Word)),
ungated!(
proc_macro_derive, Normal,
template!(List: "TraitName, /*opt*/ attributes(name1, name2, ...)"),
),
ungated!(proc_macro_attribute, Normal, template!(Word)),
// Lints:
ungated!(warn, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(deny, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(must_use, Normal, template!(Word, NameValueStr: "reason")),
// FIXME(#14407)
ungated!(
deprecated, Normal,
template!(
Word,
List: r#"/*opt*/ since = "version", /*opt*/ note = "reason""#,
NameValueStr: "reason"
),
),
// Crate properties:
ungated!(crate_name, CrateLevel, template!(NameValueStr: "name")),
ungated!(crate_type, CrateLevel, template!(NameValueStr: "bin|lib|...")),
ungated!(crate_id, CrateLevel, template!(NameValueStr: "ignored")),
// ABI, linking, symbols, and FFI
ungated!(
link, Normal,
template!(List: r#"name = "...", /*opt*/ kind = "dylib|static|...", /*opt*/ wasm_import_module = "...""#),
),
ungated!(link_name, Normal, template!(NameValueStr: "name")),
ungated!(no_link, Normal, template!(Word)),
ungated!(repr, Normal, template!(List: "C")),
ungated!(export_name, Normal, template!(NameValueStr: "name")),
ungated!(link_section, Normal, template!(NameValueStr: "name")),
ungated!(no_mangle, Normal, template!(Word)),
ungated!(used, Normal, template!(Word)),
// Limits:
ungated!(recursion_limit, CrateLevel, template!(NameValueStr: "N")),
ungated!(type_length_limit, CrateLevel, template!(NameValueStr: "N")),
gated!(
const_eval_limit, CrateLevel, template!(NameValueStr: "N"), const_eval_limit,
experimental!(const_eval_limit)
),
gated!(
move_size_limit, CrateLevel, template!(NameValueStr: "N"), large_assignments,
experimental!(move_size_limit)
),
// Entry point:
ungated!(main, Normal, template!(Word)),
ungated!(start, Normal, template!(Word)),
ungated!(no_start, CrateLevel, template!(Word)),
ungated!(no_main, CrateLevel, template!(Word)),
// Modules, prelude, and resolution:
ungated!(path, Normal, template!(NameValueStr: "file")),
ungated!(no_std, CrateLevel, template!(Word)),
ungated!(no_implicit_prelude, Normal, template!(Word)),
ungated!(non_exhaustive, Normal, template!(Word)),
// Runtime
ungated!(windows_subsystem, Normal, template!(NameValueStr: "windows|console")),
ungated!(panic_handler, Normal, template!(Word)), // RFC 2070
// Code generation:
ungated!(inline, Normal, template!(Word, List: "always|never")),
ungated!(cold, Normal, template!(Word)),
ungated!(no_builtins, Normal, template!(Word)),
ungated!(target_feature, Normal, template!(List: r#"enable = "name""#)),
ungated!(track_caller, Normal, template!(Word)),
gated!(
no_sanitize, Normal,
template!(List: "address, memory, thread"),
experimental!(no_sanitize)
),
gated!(no_coverage, Normal, template!(Word), experimental!(no_coverage)),
// FIXME: #14408 assume docs are used since rustdoc looks at them.
ungated!(doc, Normal, template!(List: "hidden|inline|...", NameValueStr: "string")),
// ==========================================================================
// Unstable attributes:
// ==========================================================================
// Linking:
gated!(naked, Normal, template!(Word), naked_functions, experimental!(naked)),
gated!(
link_ordinal, Normal, template!(List: "ordinal"), raw_dylib,
experimental!(link_ordinal)
),
// Plugins:
(
sym::plugin, CrateLevel, template!(List: "name"),
Gated(
Stability::Deprecated(
"https://github.com/rust-lang/rust/pull/64675",
Some("may be removed in a future compiler version"),
),
sym::plugin,
"compiler plugins are deprecated",
cfg_fn!(plugin)
)
),
// Testing:
gated!(allow_fail, Normal, template!(Word), experimental!(allow_fail)),
gated!(
test_runner, CrateLevel, template!(List: "path"), custom_test_frameworks,
"custom test frameworks are an unstable feature",
),
// RFC #1268
gated!(marker, Normal, template!(Word), marker_trait_attr, experimental!(marker)),
gated!(
thread_local, Normal, template!(Word),
"`#[thread_local]` is an experimental feature, and does not currently handle destructors",
),
gated!(no_core, CrateLevel, template!(Word), experimental!(no_core)),
// RFC 2412
gated!(
optimize, Normal, template!(List: "size|speed"), optimize_attribute,
experimental!(optimize),
),
// RFC 2867
gated!(instruction_set, Normal, template!(List: "set"), isa_attribute, experimental!(instruction_set)),
gated!(ffi_returns_twice, Normal, template!(Word), experimental!(ffi_returns_twice)),
gated!(ffi_pure, Normal, template!(Word), experimental!(ffi_pure)),
gated!(ffi_const, Normal, template!(Word), experimental!(ffi_const)),
gated!(
register_attr, CrateLevel, template!(List: "attr1, attr2, ..."),
experimental!(register_attr),
),
gated!(
register_tool, CrateLevel, template!(List: "tool1, tool2, ..."),
experimental!(register_tool),
),
gated!(cmse_nonsecure_entry, Normal, template!(Word), experimental!(cmse_nonsecure_entry)),
// RFC 2632
gated!(
default_method_body_is_const, Normal, template!(Word), const_trait_impl,
"`default_method_body_is_const` is a temporary placeholder for declaring default bodies \
as `const`, which may be removed or renamed in the future."
),
// ==========================================================================
// Internal attributes: Stability, deprecation, and unsafe:
// ==========================================================================
ungated!(feature, CrateLevel, template!(List: "name1, name1, ...")),
// FIXME(#14407) -- only looked at on-demand so we can't
// guarantee they'll have already been checked.
ungated!(
rustc_deprecated, Normal,
template!(List: r#"since = "version", reason = "...""#)
),
// FIXME(#14407)
ungated!(stable, Normal, template!(List: r#"feature = "name", since = "version""#)),
// FIXME(#14407)
ungated!(
unstable, Normal,
template!(List: r#"feature = "name", reason = "...", issue = "N""#),
),
// FIXME(#14407)
ungated!(rustc_const_unstable, Normal, template!(List: r#"feature = "name""#)),
// FIXME(#14407)
ungated!(rustc_const_stable, Normal, template!(List: r#"feature = "name""#)),
gated!(
allow_internal_unstable, Normal, template!(Word, List: "feat1, feat2, ..."),
"allow_internal_unstable side-steps feature gating and stability checks",
),
gated!(
rustc_allow_const_fn_unstable, Normal, template!(Word, List: "feat1, feat2, ..."),
"rustc_allow_const_fn_unstable side-steps feature gating and stability checks"
),
gated!(
allow_internal_unsafe, Normal, template!(Word),
"allow_internal_unsafe side-steps the unsafe_code lint",
),
// ==========================================================================
// Internal attributes: Type system related:
// ==========================================================================
gated!(fundamental, Normal, template!(Word), experimental!(fundamental)),
gated!(
may_dangle, Normal, template!(Word), dropck_eyepatch,
"`may_dangle` has unstable semantics and may be removed in the future",
),
// ==========================================================================
// Internal attributes: Runtime related:
// ==========================================================================
rustc_attr!(rustc_allocator, Normal, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_allocator_nounwind, Normal, template!(Word), IMPL_DETAIL),
gated!(alloc_error_handler, Normal, template!(Word), experimental!(alloc_error_handler)),
gated!(
default_lib_allocator, Normal, template!(Word), allocator_internals,
experimental!(default_lib_allocator),
),
gated!(
needs_allocator, Normal, template!(Word), allocator_internals,
experimental!(needs_allocator),
),
gated!(panic_runtime, Normal, template!(Word), experimental!(panic_runtime)),
gated!(needs_panic_runtime, Normal, template!(Word), experimental!(needs_panic_runtime)),
gated!(
compiler_builtins, Normal, template!(Word),
"the `#[compiler_builtins]` attribute is used to identify the `compiler_builtins` crate \
which contains compiler-rt intrinsics and will never be stable",
),
gated!(
profiler_runtime, Normal, template!(Word),
"the `#[profiler_runtime]` attribute is used to identify the `profiler_builtins` crate \
which contains the profiler runtime and will never be stable",
),
// ==========================================================================
// Internal attributes, Linkage:
// ==========================================================================
gated!(
linkage, Normal, template!(NameValueStr: "external|internal|..."),
"the `linkage` attribute is experimental and not portable across platforms",
),
rustc_attr!(rustc_std_internal_symbol, Normal, template!(Word), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Macro related:
// ==========================================================================
rustc_attr!(
rustc_builtin_macro, Normal,
template!(Word, List: "name, /*opt*/ attributes(name1, name2, ...)"),
IMPL_DETAIL,
),
rustc_attr!(rustc_proc_macro_decls, Normal, template!(Word), INTERNAL_UNSTABLE),
rustc_attr!(
rustc_macro_transparency, Normal,
template!(NameValueStr: "transparent|semitransparent|opaque"),
"used internally for testing macro hygiene",
),
// ==========================================================================
// Internal attributes, Diagnostics related:
// ==========================================================================
rustc_attr!(
rustc_on_unimplemented, Normal,
template!(
List: r#"/*opt*/ message = "...", /*opt*/ label = "...", /*opt*/ note = "...""#,
NameValueStr: "message"
),
INTERNAL_UNSTABLE
),
// Enumerates "identity-like" conversion methods to suggest on type mismatch.
rustc_attr!(rustc_conversion_suggestion, Normal, template!(Word), INTERNAL_UNSTABLE),
// Prevents field reads in the marked trait or method to be considered
// during dead code analysis.
rustc_attr!(rustc_trivial_field_reads, Normal, template!(Word), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Const related:
// ==========================================================================
rustc_attr!(rustc_promotable, Normal, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_legacy_const_generics, Normal, template!(List: "N"), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Layout related:
// ==========================================================================
rustc_attr!(
rustc_layout_scalar_valid_range_start, Normal, template!(List: "value"),
"the `#[rustc_layout_scalar_valid_range_start]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
rustc_attr!(
rustc_layout_scalar_valid_range_end, Normal, template!(List: "value"),
"the `#[rustc_layout_scalar_valid_range_end]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
rustc_attr!(
rustc_nonnull_optimization_guaranteed, Normal, template!(Word),
"the `#[rustc_nonnull_optimization_guaranteed]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
// ==========================================================================
// Internal attributes, Misc:
// ==========================================================================
gated!(
lang, Normal, template!(NameValueStr: "name"), lang_items,
"language items are subject to change",
),
(
sym::rustc_diagnostic_item,
Normal,
template!(NameValueStr: "name"),
Gated(
Stability::Unstable,
sym::rustc_attrs,
"diagnostic items compiler internal support for linting",
cfg_fn!(rustc_attrs),
),
),
gated!(
// Used in resolve:
prelude_import, Normal, template!(Word),
"`#[prelude_import]` is for use by rustc only",
),
gated!(
rustc_paren_sugar, Normal, template!(Word), unboxed_closures,
"unboxed_closures are still evolving",
),
rustc_attr!(
rustc_inherit_overflow_checks, Normal, template!(Word),
"the `#[rustc_inherit_overflow_checks]` attribute is just used to control \
overflow checking behavior of several libcore functions that are inlined \
across crates and will never be stable",
),
rustc_attr!(rustc_reservation_impl, Normal, template!(NameValueStr: "reservation message"),
"the `#[rustc_reservation_impl]` attribute is internally used \
for reserving for `for<T> From<!> for T` impl"
),
rustc_attr!(
rustc_test_marker, Normal, template!(Word),
"the `#[rustc_test_marker]` attribute is used internally to track tests",
),
rustc_attr!(
rustc_unsafe_specialization_marker, Normal, template!(Word),
"the `#[rustc_unsafe_specialization_marker]` attribute is used to check specializations"
),
rustc_attr!(
rustc_specialization_trait, Normal, template!(Word),
"the `#[rustc_specialization_trait]` attribute is used to check specializations"
),
rustc_attr!(
rustc_main, Normal, template!(Word),
"the `#[rustc_main]` attribute is used internally to specify test entry point function",
),
rustc_attr!(
rustc_skip_array_during_method_dispatch, Normal, template!(Word),
"the `#[rustc_skip_array_during_method_dispatch]` attribute is used to exclude a trait \
from method dispatch when the receiver is an array, for compatibility in editions < 2021."
),
// ==========================================================================
// Internal attributes, Testing:
// ==========================================================================
rustc_attr!(TEST, rustc_outlives, Normal, template!(Word)),
rustc_attr!(TEST, rustc_capture_analysis, Normal, template!(Word)),
rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word)),
rustc_attr!(TEST, rustc_variance, Normal, template!(Word)),
rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ...")),
rustc_attr!(TEST, rustc_regions, Normal, template!(Word)),
rustc_attr!(
TEST, rustc_error, Normal,
template!(Word, List: "delay_span_bug_from_inside_query")
),
rustc_attr!(TEST, rustc_dump_user_substs, Normal, template!(Word)),
rustc_attr!(TEST, rustc_evaluate_where_clauses, Normal, template!(Word)),
rustc_attr!(TEST, rustc_if_this_changed, Normal, template!(Word, List: "DepNode")),
rustc_attr!(TEST, rustc_then_this_would_need, Normal, template!(List: "DepNode")),
rustc_attr!(
TEST, rustc_clean, Normal,
template!(List: r#"cfg = "...", /*opt*/ label = "...", /*opt*/ except = "...""#),
),
rustc_attr!(
TEST, rustc_partition_reused, Normal,
template!(List: r#"cfg = "...", module = "...""#),
),
rustc_attr!(
TEST, rustc_partition_codegened, Normal,
template!(List: r#"cfg = "...", module = "...""#),
),
rustc_attr!(
TEST, rustc_expected_cgu_reuse, Normal,
template!(List: r#"cfg = "...", module = "...", kind = "...""#),
),
rustc_attr!(TEST, rustc_synthetic, Normal, template!(Word)),
rustc_attr!(TEST, rustc_symbol_name, Normal, template!(Word)),
rustc_attr!(TEST, rustc_polymorphize_error, Normal, template!(Word)),
rustc_attr!(TEST, rustc_def_path, Normal, template!(Word)),
rustc_attr!(TEST, rustc_mir, Normal, template!(List: "arg1, arg2, ...")),
rustc_attr!(TEST, rustc_dump_program_clauses, Normal, template!(Word)),
rustc_attr!(TEST, rustc_dump_env_program_clauses, Normal, template!(Word)),
rustc_attr!(TEST, rustc_object_lifetime_default, Normal, template!(Word)),
rustc_attr!(TEST, rustc_dump_vtable, Normal, template!(Word)),
rustc_attr!(TEST, rustc_dummy, Normal, template!(Word /* doesn't matter*/)),
gated!(
omit_gdb_pretty_printer_section, Normal, template!(Word),
"the `#[omit_gdb_pretty_printer_section]` attribute is just used for the Rust test suite",
),
];
pub fn deprecated_attributes() -> Vec<&'static BuiltinAttribute> {
BUILTIN_ATTRIBUTES.iter().filter(|(.., gate)| gate.is_deprecated()).collect()
}
pub fn is_builtin_attr_name(name: Symbol) -> bool {
BUILTIN_ATTRIBUTE_MAP.get(&name).is_some()
}
pub static BUILTIN_ATTRIBUTE_MAP: SyncLazy<FxHashMap<Symbol, &BuiltinAttribute>> =
SyncLazy::new(|| {
let mut map = FxHashMap::default();
for attr in BUILTIN_ATTRIBUTES.iter() {
if map.insert(attr.0, attr).is_some() {
panic!("duplicate builtin attribute `{}`", attr.0);
}
}
map
});
| AttributeTemplate |
lib.rs | extern crate nom_sql;
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn parse_queryset(queries: Vec<String>) -> (i32, i32) {
let mut parsed_ok = Vec::new();
let mut parsed_err = 0;
for query in queries.iter() {
println!("Trying to parse '{}': ", &query);
match nom_sql::parser::parse_query(&query) {
Ok(_) => {
println!("ok");
parsed_ok.push(query);
}
Err(_) => {
println!("failed");
parsed_err += 1;
}
}
}
println!("Parsing failed: {} queries", parsed_err);
println!("Parsed successfully: {} queries", parsed_ok.len());
println!("\nSuccessfully parsed queries:");
for q in parsed_ok.iter() {
println!("{:?}", q);
}
(parsed_ok.len() as i32, parsed_err)
}
fn test_queries_from_file(f: &Path, name: &str) -> Result<i32, i32> {
let mut f = File::open(f).unwrap();
let mut s = String::new();
// Load queries
f.read_to_string(&mut s).unwrap();
let lines: Vec<String> = s
.lines()
.filter(|l| !l.is_empty() && !l.starts_with("#"))
.map(|l| {
if !(l.ends_with("\n") || l.ends_with(";")) {
String::from(l) + "\n"
} else {
String::from(l)
}
}).collect();
println!("Loaded {} {} queries", lines.len(), name);
// Try parsing them all
let (ok, _) = parse_queryset(lines);
// For the moment, we're always good
Ok(ok)
}
fn parse_file(path: &str) -> (i32, i32) {
let mut f = File::open(Path::new(path)).unwrap();
let mut s = String::new();
// Load queries
f.read_to_string(&mut s).unwrap();
let lines: Vec<&str> = s
.lines()
.map(str::trim)
.filter(|l| {
!l.is_empty()
&& !l.starts_with("#")
&& !l.starts_with("--")
&& !l.starts_with("DROP")
&& !l.starts_with("/*")
}).collect();
let mut q = String::new();
let mut queries = Vec::new();
for l in lines {
if !l.ends_with(";") {
q.push_str(l);
} else {
// end of query
q.push_str(l);
queries.push(q.clone());
q = String::new();
}
}
println!("Loaded {} table definitions", queries.len());
// Try parsing them all
parse_queryset(queries)
}
#[test]
fn hotcrp_queries() {
assert!(test_queries_from_file(Path::new("tests/hotcrp-queries.txt"), "HotCRP").is_ok());
}
#[test]
fn hyrise_test_queries() {
assert!(test_queries_from_file(Path::new("tests/hyrise-test-queries.txt"), "HyRise").is_ok());
}
#[test]
fn tpcw_test_queries() {
assert!(test_queries_from_file(Path::new("tests/tpc-w-queries.txt"), "TPC-W").is_ok());
}
#[test]
fn tpcw_test_tables() {
let res = test_queries_from_file(Path::new("tests/tpc-w-tables.txt"), "TPC-W tables");
assert!(res.is_ok());
// There are 10 tables
assert_eq!(res.unwrap(), 10);
}
#[test]
fn finkelstein82_test_queries() {
let res = test_queries_from_file(Path::new("tests/finkelstein82.txt"), "Finkelstein 1982");
assert!(res.is_ok());
// There are 3 tables and 6 queries
assert_eq!(res.unwrap(), 9);
}
#[test]
fn hotcrp_schema() {
let mut f = File::open(Path::new("tests/hotcrp-schema.txt")).unwrap();
let mut s = String::new();
// Load queries
f.read_to_string(&mut s).unwrap();
let lines: Vec<&str> = s
.lines()
.map(str::trim)
.filter(|l| {
!l.is_empty() && !l.starts_with("#") && !l.starts_with("--") && !l.starts_with("DROP")
}).collect();
let mut q = String::new();
let mut queries = Vec::new();
for l in lines {
// remove inline comments, bah
let l = match l.find("#") {
None => l,
Some(pos) => &l[0..pos - 1],
};
if !l.ends_with(";") {
q.push_str(l);
} else {
// end of query
q.push_str(l);
queries.push(q.clone());
q = String::new();
}
}
println!("Loaded {} table definitions", queries.len());
// Try parsing them all
let (ok, fail) = parse_queryset(queries);
// There are 24 CREATE TABLE queries in the schema
assert_eq!(ok, 24);
assert_eq!(fail, 0);
}
#[test]
fn mediawiki_schema() {
let (ok, fail) = parse_file("tests/mediawiki-schema.txt");
// There are 17 CREATE TABLE queries in the schema
assert_eq!(ok, 17);
assert_eq!(fail, 0);
}
#[test]
fn | () {
let (ok, fail) = parse_file("tests/comments.txt");
// There are 2 CREATE TABLE queries in the schema
assert_eq!(ok, 2);
assert_eq!(fail, 0);
}
#[test]
fn parse_autoincrement() {
let (ok, fail) = parse_file("tests/autoincrement.txt");
// There is 1 CREATE TABLE queries in the schema
assert_eq!(ok, 1);
assert_eq!(fail, 0);
}
#[test]
fn parse_select() {
let (ok, fail) = parse_file("tests/select.txt");
assert_eq!(fail, 0);
assert_eq!(ok, 24);
}
| parse_comments |
mod.rs | pub use control_flow_graph::*;
pub use meanify::*; |
pub mod control_flow_graph;
pub mod meanify;
pub mod variable_table; | |
apps.py | from django.apps import AppConfig
class | (AppConfig):
name = 'CVgallery'
| CvgalleryConfig |
loss_scale.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains LossScale classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@six.add_metaclass(abc.ABCMeta)
@tf_export('train.experimental.LossScale')
class LossScale(trackable.Trackable):
"""Loss scale base class.
Loss scaling is a process that multiplies the loss by a multiplier called the
loss scale, and divides each gradient by the same multiplier. The pseudocode
for this process is:
```
loss = ...
loss *= loss_scale
grads = gradients(loss, vars)
grads /= loss_scale
```
Mathematically, loss scaling has no effect, but can help avoid numerical
underflow in intermediate gradients when float16 tensors are used for mixed
precision training. By multiplying the loss, each intermediate gradient will
have the same multiplier applied.
Instances of this class represent a loss scale. Calling instances of this
class returns the loss scale as a scalar float32 tensor, while method
`update()` updates the loss scale depending on the values of the gradients.
Optimizers use instances of this class to scale loss and gradients.
"""
def __init__(self):
"""Initializes the loss scale class."""
self._weights = {}
@abc.abstractmethod
def __call__(self):
"""Returns the current loss scale as a scalar `float32` tensor."""
pass
@abc.abstractmethod
def update(self, grads):
"""Updates the value of the loss scale.
The loss scale will be potentially updated, based on the value of `grads`.
The tensor returned by calling this class is only updated when this function
is evaluated.
In eager mode, this directly updates the loss scale, so that calling
`__call__` will return the newly updated loss scale. In graph mode,
this returns an op that, when evaluated, updates the loss scale.
This function also returns a `should_apply_gradients` bool. If False,
gradients should not be applied to the variables that step, as nonfinite
gradients were found, and the loss scale has been be updated to reduce the
chance of finding nonfinite gradients in the next step. Some loss scale
classes will always return True, as they cannot adjust themselves in
response to nonfinite gradients.
When a DistributionStrategy is used, this function may only be called in a
cross-replica context.
Args:
grads: A list of unscaled gradients, each which is the gradient of the
loss with respect to a weight. The gradients should have already been
divided by the loss scale being before passed to this function. 'None'
gradients are accepted, and are ignored.
Returns:
update_op: In eager mode, None. In graph mode, an op to update the loss
scale.
should_apply_gradients: Either a bool or a scalar boolean tensor. If
False, the caller should skip applying `grads` to the variables this
step.
"""
pass
def _add_weight(self, name, initial_value, dtype=None):
"""Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
"""
variable = variable_scope.variable(
initial_value=initial_value,
name=name,
dtype=dtype,
trainable=False,
use_resource=True,
synchronization=variables.VariableSynchronization.AUTO,
# Set aggregation to NONE, as loss scaling variables should never be
# aggregated.
aggregation=variables.VariableAggregation.NONE)
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
key = (name, graph_key)
if self._weights.get(key, None) is not None:
raise RuntimeError('Duplicate variables detected. {}'.format(key))
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
return variable
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific weights to save."""
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
weights = []
for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):
if g == graph_key:
weights.append(trackable.TrackableReference(name=name, ref=v))
return super(LossScale, self)._checkpoint_dependencies + weights
def _lookup_dependency(self, name):
"""From Trackable. Find a weight in the current graph."""
unconditional = super(LossScale, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
return self._weights.get((name, graph_key), None)
@abc.abstractmethod
def get_config(self):
"""Returns the config of this loss scale."""
pass
@classmethod
def from_config(cls, config):
"""Creates the LossScale from its config."""
return cls(**config)
def get_loss_scale_weights(loss_scale):
return loss_scale._weights.values() # pylint: disable=protected-access
@tf_export('train.experimental.FixedLossScale')
class FixedLossScale(LossScale):
"""Loss scale with a fixed value.
The loss scale is not updated for the lifetime of instances of this class.
A given instance of this class always returns the same number when called.
"""
def __init__(self, loss_scale_value):
"""Creates the fixed loss scale.
Args:
loss_scale_value: A Python float. Its ideal value varies depending on
models to run. Choosing a too small loss_scale might affect model
quality; a too big loss_scale might cause inf or nan. There is no single
right loss_scale to apply. There is no harm choosing a relatively big
number as long as no nan or inf is encountered in training.
Raises:
ValueError: If loss_scale is less than 1.
"""
super(FixedLossScale, self).__init__()
if not isinstance(loss_scale_value, six.integer_types + (float,)):
raise ValueError('loss_scale_value must be a Python int or float.')
if loss_scale_value < 1:
raise ValueError('loss_scale_value must be at least 1.')
# It's important we do not create tensors in the constructor, as such
# tensors might be on a different device or tf.function vs when the tensor
# is used. This would hurt performance. Therefore, we do not create a tensor
# from loss_scale_value, but instead leave it as a Python float.
# TODO(reedwm): Also do not create tensors in the DynamicLossScale
# constructor.
self._loss_scale_value = float(loss_scale_value)
def __call__(self):
|
def update(self, grads):
del grads
return control_flow_ops.no_op(), True
def get_config(self):
return {'loss_scale_value': self._loss_scale_value}
def _is_all_finite(grads):
"""Returns a scalar boolean tensor indicating if all gradients are finite."""
is_finite_per_grad = [
math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None
]
return math_ops.reduce_all(is_finite_per_grad)
def _op_in_graph_mode(tensor):
"""Returns the tensor's op in graph mode, or the tensor in eager mode.
This is useful because sometimes an op is needed in graph mode instead of a
tensor. In eager mode, there are no ops.
Args:
tensor: A tensor.
Returns:
The tensor's op in graph mode. The tensor in eager mode.
"""
if context.executing_eagerly():
return tensor
return tensor.op
def _assign_if_finite(var, value):
"""Assigns a value to a variable if the value is finite."""
return control_flow_ops.cond(
math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),
control_flow_ops.no_op)
@tf_export('train.experimental.DynamicLossScale')
class DynamicLossScale(LossScale):
"""Loss scale that dynamically adjusts itself.
Dynamic loss scaling works by adjusting the loss scale as training progresses.
The goal is to keep the loss scale as high as possible without overflowing the
gradients. As long as the gradients do not overflow, raising the loss scale
never hurts.
The algorithm starts by setting the loss scale to an initial value. Every N
steps that the gradients are finite, the loss scale is increased by some
factor. However, if a NaN or Inf gradient is found, the gradients for that
step are not applied, and the loss scale is decreased by the factor. This
process tends to keep the loss scale as high as possible without gradients
overflowing.
"""
def __init__(self,
initial_loss_scale=2 ** 15, # See docstring for why this is big.
increment_period=2000,
multiplier=2.):
"""Creates the dynamic loss scale.
Args:
initial_loss_scale: A Python float. The loss scale to use at the
beginning. It's better to start this at a very high number, because a
loss scale that is too high gets lowered far more quickly than a loss
scale that is to low gets raised. The default is 2 ** 15, which is
approximately half the maximum float16 value.
increment_period: Increases loss scale every `increment_period`
consecutive steps that finite gradients are encountered. If a nonfinite
gradient is encountered, the count is reset back to zero.
multiplier: The multiplier to use when increasing or decreasing the loss
scale.
"""
super(DynamicLossScale, self).__init__()
self._initial_loss_scale = float(initial_loss_scale)
self._increment_period = int(increment_period)
self._multiplier = float(multiplier)
self._current_loss_scale = self._add_weight(
name='current_loss_scale',
dtype=dtypes.float32,
initial_value=self._initial_loss_scale)
# The number of consecutive steps with finite gradients since the last
# nonfinite gradient or change in loss scale.
self._num_good_steps = self._add_weight(
name='good_steps', dtype=dtypes.int64, initial_value=0)
@property
def initial_loss_scale(self):
return self._initial_loss_scale
@property
def increment_period(self):
return self._increment_period
@property
def multiplier(self):
return self._multiplier
def __call__(self):
return self._current_loss_scale
def update(self, grads):
"""Updates loss scale based on if gradients are finite in current step."""
if distribution_strategy_context.has_strategy():
distribution = distribution_strategy_context.get_cross_replica_context()
def get_is_finite(grads):
is_finite = _is_all_finite(grads)
# We cast to float, because we cannot reduce booleans with
# DistributionStrategy.
return math_ops.cast(is_finite, dtypes.float32)
is_finite_float = distribution.extended.call_for_each_replica(
get_is_finite, args=(grads,))
reduced_is_finite_float = distribution.reduce(reduce_util.ReduceOp.SUM,
is_finite_float, axis=None)
is_finite = math_ops.equal(reduced_is_finite_float,
distribution.num_replicas_in_sync)
else:
is_finite = _is_all_finite(grads)
def update_if_finite_grads():
"""Update assuming the gradients are finite."""
def incr_loss_scale():
new_loss_scale = self._current_loss_scale * self._multiplier
return control_flow_ops.group(
_assign_if_finite(self._current_loss_scale, new_loss_scale),
self._num_good_steps.assign(0))
return control_flow_ops.cond(
self._num_good_steps + 1 >= self._increment_period,
incr_loss_scale, lambda: _op_in_graph_mode(
self._num_good_steps.assign_add(1)))
def update_if_not_finite_grads():
"""Update assuming the gradients are nonfinite."""
new_loss_scale = math_ops.maximum(
self._current_loss_scale / self._multiplier, 1)
return control_flow_ops.group(
self._num_good_steps.assign(0),
self._current_loss_scale.assign(new_loss_scale))
update_op = control_flow_ops.cond(is_finite, update_if_finite_grads,
update_if_not_finite_grads)
should_apply_gradients = is_finite
return update_op, should_apply_gradients
def get_config(self):
return {
'initial_loss_scale': self.initial_loss_scale,
'increment_period': self.increment_period,
'multiplier': self.multiplier,
}
def get(identifier):
"""Get a loss scale object."""
if isinstance(identifier, six.integer_types + (float,)):
return FixedLossScale(identifier)
if identifier == 'dynamic':
return DynamicLossScale()
if isinstance(identifier, LossScale):
return identifier
elif identifier is None:
return None
else:
raise ValueError('Could not interpret loss scale identifier: %s' %
identifier)
| return ops.convert_to_tensor(self._loss_scale_value) |
connect.js | // Copyright © 2020 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { connect } from 'react-redux'
import { replace } from 'connected-react-router'
import api from '@console/api'
import { getCollaborator } from '@console/store/actions/collaborators'
import {
selectSelectedApplicationId,
selectApplicationRights,
selectApplicationPseudoRights,
selectApplicationRightsFetching,
selectApplicationRightsError,
} from '@console/store/selectors/applications'
import {
selectUserCollaborator,
selectOrganizationCollaborator,
selectCollaboratorFetching,
selectCollaboratorError,
} from '@console/store/selectors/collaborators' | const appId = selectSelectedApplicationId(state)
const { collaboratorId, collaboratorType } = props.match.params
const collaborator =
collaboratorType === 'user'
? selectUserCollaborator(state)
: selectOrganizationCollaborator(state)
const fetching = selectApplicationRightsFetching(state) || selectCollaboratorFetching(state)
const error = selectApplicationRightsError(state) || selectCollaboratorError(state)
return {
collaboratorId,
collaboratorType,
collaborator,
appId,
rights: selectApplicationRights(state),
pseudoRights: selectApplicationPseudoRights(state),
fetching,
error,
}
}
const mapDispatchToProps = (dispatch, ownProps) => ({
getCollaborator: (appId, collaboratorId, isUser) => {
dispatch(getCollaborator('application', appId, collaboratorId, isUser))
},
redirectToList: appId => {
dispatch(replace(`/applications/${appId}/collaborators`))
},
updateCollaborator: (appId, patch) => api.application.collaborators.update(appId, patch),
removeCollaborator: (appId, patch) => api.application.collaborators.update(appId, patch),
})
const mergeProps = (stateProps, dispatchProps, ownProps) => ({
...stateProps,
...dispatchProps,
...ownProps,
getCollaborator: () =>
dispatchProps.getCollaborator(
stateProps.appId,
stateProps.collaboratorId,
stateProps.collaboratorType === 'user',
),
redirectToList: () => dispatchProps.redirectToList(stateProps.appId),
updateCollaborator: patch => api.application.collaborators.update(stateProps.appId, patch),
removeCollaborator: patch => api.application.collaborators.update(stateProps.appId, patch),
})
export default ApplicationCollaboratorEdit =>
connect(
mapStateToProps,
mapDispatchToProps,
mergeProps,
)(ApplicationCollaboratorEdit) |
const mapStateToProps = (state, props) => { |
main.go | // SPDX-License-Identifier: Unlicense OR MIT
package main
import (
"fmt"
"log"
"net/http"
"os"
"strings"
)
func | () {
http.HandleFunc("/", vanityHandler)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
// vanityHandler serves git location meta headers for the go tool.
func vanityHandler(w http.ResponseWriter, r *http.Request) {
if www := "www."; strings.HasPrefix(r.URL.Host, www) {
r.URL.Host = r.URL.Host[len(www):]
http.Redirect(w, r, r.URL.String(), http.StatusMovedPermanently)
return
}
if r.URL.Query().Get("go-get") == "1" {
fmt.Fprintf(w, `<html><head>
<meta name="go-import" content="gioui.org git https://git.sr.ht/~eliasnaur/gio">
<meta name="go-source" content="gioui.org https://git.sr.ht/~eliasnaur/gio https://git.sr.ht/~eliasnaur/gio/tree/master{/dir} https://git.sr.ht/~eliasnaur/gio/tree/master{/dir}/{file}#L{line}">
</head></html>`)
return
}
switch r.URL.Path {
case "/":
http.Redirect(w, r, "https://git.sr.ht/~eliasnaur/gio", http.StatusFound)
default:
http.NotFound(w, r)
}
}
| main |
scenario_3484_test.go | // Code is generated by ucloud-model, DO NOT EDIT IT.
package tests
import (
"testing"
"time"
"github.com/ucloud/ucloud-sdk-go/services/ipsecvpn"
"github.com/ucloud/ucloud-sdk-go/services/unet"
"github.com/ucloud/ucloud-sdk-go/services/vpc"
"github.com/ucloud/ucloud-sdk-go/ucloud"
"github.com/ucloud/ucloud-sdk-go/ucloud/utest/driver"
"github.com/ucloud/ucloud-sdk-go/ucloud/utest/utils"
"github.com/ucloud/ucloud-sdk-go/ucloud/utest/validation"
)
func | (t *testing.T) {
spec.ParallelTest(t, &driver.Scenario{
PreCheck: func() {
testAccPreCheck(t)
},
Id: "3484",
Vars: func(scenario *driver.Scenario) map[string]interface{} {
return map[string]interface{}{
"Region": "th-bkk",
}
},
Owners: []string{"[email protected]"},
Title: "IPSecVPN自动化回归-国际机房-不支持UpdateVPNGateway",
Steps: []*driver.Step{
testStep3484CreateVPC01,
testStep3484CreateSubnet02,
testStep3484GetVPNGatewayPrice03,
testStep3484CreateVPNGateway04,
testStep3484AllocateEIP05,
testStep3484BindEIP06,
testStep3484DescribeVPNGateway07,
testStep3484GetVPNGatewayUpgradePrice08,
testStep3484CreateRemoteVPNGateway09,
testStep3484DescribeRemoteVPNGateway10,
testStep3484CreateVPNTunnel11,
testStep3484DescribeVPNTunnel12,
testStep3484UpdateVPNTunnelAttribute13,
testStep3484DeleteVPNGateway14,
testStep3484DeleteRemoteVPNGateway15,
testStep3484DeleteVPNTunnel16,
testStep3484DeleteVPNGateway17,
testStep3484DeleteRemoteVPNGateway18,
testStep3484ReleaseEIP19,
testStep3484DeleteSubnet20,
testStep3484DeleteVPC21,
},
})
}
var testStep3484CreateVPC01 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("VPC")
if err != nil {
return nil, err
}
client := c.(*vpc.VPCClient)
req := client.NewCreateVPCRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
"Network": []interface{}{
"192.168.0.0/16",
},
"Name": "ipsecvpn-vpc",
})
if err != nil {
return nil, err
}
resp, err := client.CreateVPC(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("vpc_id", step.Must(utils.GetValue(resp, "VPCId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "创建VPC",
FastFail: false,
}
var testStep3484CreateSubnet02 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("VPC")
if err != nil {
return nil, err
}
client := c.(*vpc.VPCClient)
req := client.NewCreateSubnetRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPCId": step.Scenario.GetVar("vpc_id"),
"SubnetName": "ipsecvpn-subnet",
"Subnet": "192.168.11.0",
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.CreateSubnet(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("subnet_id", step.Must(utils.GetValue(resp, "SubnetId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(10) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "创建子网",
FastFail: false,
}
var testStep3484GetVPNGatewayPrice03 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("")
if err != nil {
return nil, err
}
client := c.(*ucloud.Client)
req := client.NewGenericRequest()
_ = req.SetAction("GetVPNGatewayPrice")
err = req.SetPayload(map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
"Grade": "Standard",
})
if err != nil {
return nil, err
}
resp, err := client.GenericInvoke(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "获取VPN价格",
FastFail: false,
}
var testStep3484CreateVPNGateway04 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewCreateVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNGatewayName": "auto_apitest",
"VPCId": step.Scenario.GetVar("vpc_id"),
"Region": step.Scenario.GetVar("Region"),
"Grade": "Standard",
})
if err != nil {
return nil, err
}
resp, err := client.CreateVPNGateway(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("vpngw_id", step.Must(utils.GetValue(resp, "VPNGatewayId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "新建VPN网关",
FastFail: false,
}
var testStep3484AllocateEIP05 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("UNet")
if err != nil {
return nil, err
}
client := c.(*unet.UNetClient)
req := client.NewAllocateEIPRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
"OperatorName": "International",
"Bandwidth": 2,
})
if err != nil {
return nil, err
}
resp, err := client.AllocateEIP(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("eip_id", step.Must(utils.GetValue(resp, "EIPSet.0.EIPId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "申请弹性IP",
FastFail: false,
}
var testStep3484BindEIP06 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("UNet")
if err != nil {
return nil, err
}
client := c.(*unet.UNetClient)
req := client.NewBindEIPRequest()
err = utils.SetRequest(req, map[string]interface{}{
"ResourceType": "vpngw",
"ResourceId": step.Scenario.GetVar("vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
"EIPId": step.Scenario.GetVar("eip_id"),
})
if err != nil {
return nil, err
}
resp, err := client.BindEIP(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "绑定弹性IP",
FastFail: false,
}
var testStep3484DescribeVPNGateway07 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDescribeVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DescribeVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(5) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "获取VPN网关信息",
FastFail: false,
}
var testStep3484GetVPNGatewayUpgradePrice08 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("")
if err != nil {
return nil, err
}
client := c.(*ucloud.Client)
req := client.NewGenericRequest()
_ = req.SetAction("GetVPNGatewayUpgradePrice")
err = req.SetPayload(map[string]interface{}{
"VPNGatewayId": step.Scenario.GetVar("vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
"Grade": "Enhanced",
})
if err != nil {
return nil, err
}
resp, err := client.GenericInvoke(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "获取VPN网关规格改动价格",
FastFail: false,
}
var testStep3484CreateRemoteVPNGateway09 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewCreateRemoteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"RemoteVPNGatewayName": "auto_apitest",
"RemoteVPNGatewayAddr": "10.1.1.0",
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.CreateRemoteVPNGateway(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("remote_vpngw_id", step.Must(utils.GetValue(resp, "RemoteVPNGatewayId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "新建客户VPN网关",
FastFail: false,
}
var testStep3484DescribeRemoteVPNGateway10 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDescribeRemoteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DescribeRemoteVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "获取客户VPN网关信息",
FastFail: false,
}
var testStep3484CreateVPNTunnel11 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewCreateVPNTunnelRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNTunnelName": "auto_apitest",
"VPNGatewayId": step.Scenario.GetVar("vpngw_id"),
"RemoteVPNGatewayId": step.Scenario.GetVar("remote_vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
"IPSecRemoteSubnets": []interface{}{
"10.1.1.0/24",
},
"IPSecProtocol": "ah",
"IPSecPFSDhGroup": 15,
"IPSecLocalSubnetIds": []interface{}{
step.Scenario.GetVar("subnet_id"),
},
"IKEPreSharedKey": "test",
"IKEExchangeMode": "main",
"IKEDhGroup": 15,
})
if err != nil {
return nil, err
}
resp, err := client.CreateVPNTunnel(req)
if err != nil {
return resp, err
}
step.Scenario.SetVar("vpn_tunnel_id", step.Must(utils.GetValue(resp, "VPNTunnelId")))
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(5) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "新建VPN隧道",
FastFail: false,
}
var testStep3484DescribeVPNTunnel12 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDescribeVPNTunnelRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DescribeVPNTunnel(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "获取VPN隧道信息",
FastFail: false,
}
var testStep3484UpdateVPNTunnelAttribute13 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewUpdateVPNTunnelAttributeRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNTunnelId": step.Scenario.GetVar("vpn_tunnel_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.UpdateVPNTunnelAttribute(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "更新VPN隧道属性",
FastFail: false,
}
var testStep3484DeleteVPNGateway14 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDeleteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNGatewayId": step.Scenario.GetVar("vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 66007, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "删除VPN网关",
FastFail: false,
}
var testStep3484DeleteRemoteVPNGateway15 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDeleteRemoteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"RemoteVPNGatewayId": step.Scenario.GetVar("remote_vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteRemoteVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 66032, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "删除客户VPN网关",
FastFail: false,
}
var testStep3484DeleteVPNTunnel16 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDeleteVPNTunnelRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNTunnelId": step.Scenario.GetVar("vpn_tunnel_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteVPNTunnel(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "删除VPN隧道",
FastFail: false,
}
var testStep3484DeleteVPNGateway17 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDeleteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPNGatewayId": step.Scenario.GetVar("vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(5) * time.Second,
MaxRetries: 3,
RetryInterval: 10 * time.Second,
Title: "删除VPN网关",
FastFail: false,
}
var testStep3484DeleteRemoteVPNGateway18 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("IPSecVPN")
if err != nil {
return nil, err
}
client := c.(*ipsecvpn.IPSecVPNClient)
req := client.NewDeleteRemoteVPNGatewayRequest()
err = utils.SetRequest(req, map[string]interface{}{
"RemoteVPNGatewayId": step.Scenario.GetVar("remote_vpngw_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteRemoteVPNGateway(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "删除客户VPN网关",
FastFail: false,
}
var testStep3484ReleaseEIP19 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("UNet")
if err != nil {
return nil, err
}
client := c.(*unet.UNetClient)
req := client.NewReleaseEIPRequest()
err = utils.SetRequest(req, map[string]interface{}{
"Region": step.Scenario.GetVar("Region"),
"EIPId": step.Scenario.GetVar("eip_id"),
})
if err != nil {
return nil, err
}
resp, err := client.ReleaseEIP(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 0,
RetryInterval: 0 * time.Second,
Title: "释放弹性IP",
FastFail: false,
}
var testStep3484DeleteSubnet20 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("VPC")
if err != nil {
return nil, err
}
client := c.(*vpc.VPCClient)
req := client.NewDeleteSubnetRequest()
err = utils.SetRequest(req, map[string]interface{}{
"SubnetId": step.Scenario.GetVar("subnet_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteSubnet(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(0) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "删除子网",
FastFail: false,
}
var testStep3484DeleteVPC21 = &driver.Step{
Invoker: func(step *driver.Step) (interface{}, error) {
c, err := step.LoadFixture("VPC")
if err != nil {
return nil, err
}
client := c.(*vpc.VPCClient)
req := client.NewDeleteVPCRequest()
err = utils.SetRequest(req, map[string]interface{}{
"VPCId": step.Scenario.GetVar("vpc_id"),
"Region": step.Scenario.GetVar("Region"),
})
if err != nil {
return nil, err
}
resp, err := client.DeleteVPC(req)
if err != nil {
return resp, err
}
return resp, nil
},
Validators: func(step *driver.Step) []driver.TestValidator {
return []driver.TestValidator{
validation.Builtins.NewValidator("RetCode", 0, "str_eq"),
}
},
StartupDelay: time.Duration(10) * time.Second,
MaxRetries: 3,
RetryInterval: 1 * time.Second,
Title: "删除VPC",
FastFail: false,
}
| TestScenario3484 |
util.rs | //! Internal Utilities
//!
//! This module contains function for use by molt only.
use crate::tokenizer::Tokenizer;
pub fn | (ch: char) -> bool {
ch.is_alphanumeric() || ch == '_'
}
/// Reads the integer string from the head of the input. If the function returns `Some`,
/// the value is the integer string that was read, and the `ptr` points to the following
/// character. Otherwise the `ptr` will be unchanged.
///
/// The string may consist of:
///
/// * A unary plus or minus
/// * One or more decimal digits.
///
/// ## Notes
///
/// * The resulting string has the form of an integer, but might be out of the valid range.
pub fn read_int(ptr: &mut Tokenizer) -> Option<String> {
let mut p = ptr.clone();
let mut result = String::new();
let mut missing_digits = true;
// FIRST, skip a unary operator.
if p.is('+') || p.is('-') {
result.push(p.next().unwrap());
}
// NEXT, skip a "0x".
let mut radix = 10;
if p.is('0') {
result.push(p.next().unwrap());
if p.is('x') {
result.push(p.next().unwrap());
radix = 16;
} else {
missing_digits = false;
}
}
// NEXT, read the digits
while p.has(|ch| ch.is_digit(radix)) {
missing_digits = false;
result.push(p.next().unwrap());
}
if result.is_empty() || missing_digits {
None
} else {
ptr.skip_over(result.len());
Some(result)
}
}
/// Reads the floating point string from the head of the input. If the function returns `Some`,
/// the value is the string that was read, and the `ptr` points to the following character.
/// Otherwise the `ptr` will be unchanged.
///
/// The string will consist of:
///
/// * Possibly, a unary plus/minus
/// * "Inf" (case insensitive), -OR-
/// * A number:
/// * Some number of decimal digits, optionally containing a ".".
/// * An optional exponent beginning with "e" or "E"
/// * The exponent may contain a + or -, followed by some number of digits.
///
/// ## Notes
///
/// * The resulting string has the form of a floating point number but might be out of the
/// valid range.
pub fn read_float(ptr: &mut Tokenizer) -> Option<String> {
let mut p = ptr.clone();
let mut result = String::new();
let mut missing_mantissa = true;
let mut missing_exponent = false;
// FIRST, skip a unary operator.
if p.is('+') || p.is('-') {
result.push(p.next().unwrap());
}
// NEXT, looking for Inf
if p.is('I') || p.is('i') {
result.push(p.next().unwrap());
if p.is('N') || p.is('n') {
result.push(p.next().unwrap());
} else {
return None;
}
if p.is('F') || p.is('f') {
result.push(p.next().unwrap());
// Update the pointer.
ptr.skip_over(result.len());
return Some(result);
} else {
return None;
}
}
// NEXT, get any integer digits
while p.has(|ch| ch.is_digit(10)) {
missing_mantissa = false;
result.push(p.next().unwrap());
}
// NEXT, get any fractional part.
if p.is('.') {
result.push(p.next().unwrap());
while p.has(|ch| ch.is_digit(10)) {
missing_mantissa = false;
result.push(p.next().unwrap());
}
}
// NEXT, get any exponent.
if p.is('e') || p.is('E') {
missing_exponent = true;
result.push(p.next().unwrap());
if p.is('+') || p.is('-') {
result.push(p.next().unwrap());
}
while p.has(|ch| ch.is_digit(10)) {
missing_exponent = false;
result.push(p.next().unwrap());
}
}
if result.is_empty() || missing_mantissa || missing_exponent {
None
} else {
// Update the pointer.
ptr.skip_over(result.len());
Some(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_util_read_int() {
let mut p = Tokenizer::new("abc");
assert_eq!(None, read_int(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("-abc");
assert_eq!(None, read_int(&mut p));
assert_eq!(Some('-'), p.peek());
let mut p = Tokenizer::new("+abc");
assert_eq!(None, read_int(&mut p));
assert_eq!(Some('+'), p.peek());
let mut p = Tokenizer::new("123");
assert_eq!(Some("123".into()), read_int(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("123abc");
assert_eq!(Some("123".into()), read_int(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("+123abc");
assert_eq!(Some("+123".into()), read_int(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("-123abc");
assert_eq!(Some("-123".into()), read_int(&mut p));
assert_eq!(Some('a'), p.peek());
}
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_util_read_float() {
let mut p = Tokenizer::new("abc");
assert_eq!(None, read_float(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("-abc");
assert_eq!(None, read_float(&mut p));
assert_eq!(Some('-'), p.peek());
let mut p = Tokenizer::new("+abc");
assert_eq!(None, read_float(&mut p));
assert_eq!(Some('+'), p.peek());
let mut p = Tokenizer::new("123");
assert_eq!(Some("123".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("123abc");
assert_eq!(Some("123".into()), read_float(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("123.");
assert_eq!(Some("123.".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new(".123");
assert_eq!(Some(".123".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("123.123");
assert_eq!(Some("123.123".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("1e5");
assert_eq!(Some("1e5".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("1e+5");
assert_eq!(Some("1e+5".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("1e-5");
assert_eq!(Some("1e-5".into()), read_float(&mut p));
assert_eq!(None, p.peek());
let mut p = Tokenizer::new("1.1e1a");
assert_eq!(Some("1.1e1".into()), read_float(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("+123abc");
assert_eq!(Some("+123".into()), read_float(&mut p));
assert_eq!(Some('a'), p.peek());
let mut p = Tokenizer::new("-123abc");
assert_eq!(Some("-123".into()), read_float(&mut p));
assert_eq!(Some('a'), p.peek());
}
}
| is_varname_char |
ParseResult.py | import json
from collections import defaultdict
def is_eq(pred,gold):
return pred==gold
class GenResult(object):
def __init__(self, idx, lay, tgt,disf_frags, tags):
self.idx = idx
self.lay = lay
self.tgt = tgt
#print(tags)
self.tgt_tags=tags
self.gold_tgt=[]
self.disf_frags=disf_frags
self.correct = {}
self.one_grams=[]
self.two_grams=[]
self.disflen_lessthanone=0
def ngrams(self, n, text):
if len(text)<n:
return []
padded = text
return [' '.join(padded[i:i + n]) for i in range(len(text)-n+1)]
def frac(self, predngram, goldngram, length, gold_sent):
c=0
for pred in predngram:
if not pred in goldngram:
c+=1
#print(pred,gold_sent)
c=c/length
return c
def | (self,src,disfs):
onegram = self.ngrams(1, src)
twogram = self.ngrams(2, src)
# print('init:',disfs,gold['sent'])
for disf in disfs:
# print(disfs)
if len(disf) > 0:
# print('ngram:',self.ngrams(1,disf),onegram)
self.one_grams.append(self.frac(self.ngrams(1, disf), onegram, len(disf), src))
else:
self.disflen_lessthanone += 1
if len(disf) > 1:
self.two_grams.append(self.frac(self.ngrams(2, disf), twogram, len(disf), src))
def eval(self, gold, gold_diversity=False):
self.gold_tgt = gold['sent']
self.gold_label = gold['sent_tag']
if is_eq(self.lay, gold['src_label']):
self.correct['lay'] = 1
else:
self.correct['lay'] = 0
# else:
# print(' '.join(gold['src']))
# print('pred:', self.lay)
# print('gold:', gold['lay'])
# print('')
if is_eq(self.tgt, gold['sent']):
self.correct['tgt'] = 1
else:
self.correct['tgt'] = 0
if gold_diversity:
disfs = gold['disf_frags']
else:
disfs = self.disf_frags
self.eval_diversity(gold['src'],disfs)
# if self.correct['lay'] == 1 and self.correct['tgt'] == 1 and ('NUMBER' in self.lay and 'STRING' in self.lay and 'NAME' in self.lay):
# if self.correct['lay'] == 1 and self.correct['tgt'] == 0:
# print(' '.join(gold['src']))
# print('pred_lay:', ' '.join(self.lay))
# print('gold_lay:', ' '.join(gold['lay']))
# print('pred_tgt:', ' '.join(self.tgt))
# print('gold_tgt:', ' '.join(gold['tgt']))
# print('')
| eval_diversity |
client.go | package client
//
// Copyright (c) 2019 ARM Limited.
//
// SPDX-License-Identifier: MIT
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
import (
"errors"
"net/http"
"bytes"
"io/ioutil"
"encoding/json"
"time"
"strings"
"context"
"fmt"
. "devicedb/raft"
. "devicedb/error"
. "devicedb/cluster"
"devicedb/rest"
)
const DefaultClientTimeout = time.Second * 10
type ErrorStatusCode struct {
StatusCode int
Message string
}
func (errorStatus *ErrorStatusCode) Error() string {
return errorStatus.Message
}
type ClientConfig struct {
Timeout time.Duration
}
var EClientTimeout = errors.New("Client request timed out")
type Client struct {
httpClient *http.Client
}
func NewClient(config ClientConfig) *Client {
if config.Timeout == 0 |
return &Client{
httpClient: &http.Client{
Timeout: config.Timeout,
},
}
}
func (client *Client) sendRequest(ctx context.Context, httpVerb string, endpointURL string, body []byte) ([]byte, error) {
request, err := http.NewRequest(httpVerb, endpointURL, bytes.NewReader(body))
if err != nil {
return nil, err
}
request = request.WithContext(ctx)
resp, err := client.httpClient.Do(request)
if err != nil {
if strings.Contains(err.Error(), "Timeout") {
return nil, EClientTimeout
}
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
errorMessage, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return nil, &ErrorStatusCode{ Message: string(errorMessage), StatusCode: resp.StatusCode }
}
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return responseBody, nil
}
// Use an existing cluster member to bootstrap the addition of another node
// to that cluster. host and port indicate the address of the existing cluster
// member while nodeAddress contains the ID, host name and port of the new
// cluster member
//
// Return Values:
// EClientTimeout: The request to the node timed out
func (client *Client) AddNode(ctx context.Context, memberAddress PeerAddress, newMemberConfig NodeConfig) error {
encodedNodeConfig, _ := json.Marshal(newMemberConfig)
_, err := client.sendRequest(ctx, "POST", memberAddress.ToHTTPURL("/cluster/nodes"), encodedNodeConfig)
if _, ok := err.(*ErrorStatusCode); ok {
var dbError DBerror
parseErr := json.Unmarshal([]byte(err.(*ErrorStatusCode).Message), &dbError)
if parseErr == nil {
return dbError
}
}
return err
}
// Ask a cluster member to initiate the removal of some node from its cluster.
// host and port indicate the address of the initiator node while nodeID is
// the ID of the node that should be removed.
//
// Return Values:
// EClientTimeout: The request to the node timed out
func (client *Client) RemoveNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64, replacementNodeID uint64, decommission, forwarded bool) error {
var queryString = ""
endpoint := memberAddress.ToHTTPURL("/cluster/nodes/" + fmt.Sprintf("%d", nodeID))
if forwarded {
queryString += "forwarded=true&"
}
if decommission {
queryString += "decommission=true&"
}
if replacementNodeID != 0 {
queryString += fmt.Sprintf("replace=%d&", replacementNodeID)
}
if len(queryString) != 0 {
// take off the last &
queryString = queryString[:len(queryString) - 1]
}
endpoint += "?" + queryString
_, err := client.sendRequest(ctx, "DELETE", endpoint, []byte{ })
return err
}
func (client *Client) DecommissionNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64) error {
return client.RemoveNode(ctx, memberAddress, nodeID, 0, true, false)
}
func (client *Client) ForceRemoveNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64) error {
return client.RemoveNode(ctx, memberAddress, nodeID, 0, false, false)
}
func (client *Client) ReplaceNode(ctx context.Context, memberAddress PeerAddress, nodeID uint64, replacementNodeID uint64) error {
return client.RemoveNode(ctx, memberAddress, nodeID, replacementNodeID, false, false)
}
func (client *Client) MerkleTreeStats(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string) (rest.MerkleTree, error) {
endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle", siteID, bucketName))
response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ })
if err != nil {
return rest.MerkleTree{}, err
}
var merkleTree rest.MerkleTree
if err := json.Unmarshal(response, &merkleTree); err != nil {
return rest.MerkleTree{}, err
}
return merkleTree, nil
}
func (client *Client) MerkleTreeNode(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string, nodeID uint32) (rest.MerkleNode, error) {
endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle/nodes/%d", siteID, bucketName, nodeID))
response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ })
if err != nil {
return rest.MerkleNode{}, err
}
var merkleNode rest.MerkleNode
if err := json.Unmarshal(response, &merkleNode); err != nil {
return rest.MerkleNode{}, err
}
return merkleNode, nil
}
func (client *Client) MerkleTreeNodeKeys(ctx context.Context, memberAddress PeerAddress, siteID string, bucketName string, nodeID uint32) (rest.MerkleKeys, error) {
endpoint := memberAddress.ToHTTPURL(fmt.Sprintf("/sites/%s/buckets/%s/merkle/nodes/%d/keys", siteID, bucketName, nodeID))
response, err := client.sendRequest(ctx, "GET", endpoint, []byte{ })
if err != nil {
return rest.MerkleKeys{}, err
}
var merkleKeys rest.MerkleKeys
if err := json.Unmarshal(response, &merkleKeys); err != nil {
return rest.MerkleKeys{}, err
}
return merkleKeys, nil
}
| {
config.Timeout = DefaultClientTimeout
} |
grpc.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1p2beta1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property |
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(
self,
) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse,
]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_images" not in self._stubs:
self._stubs["batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/BatchAnnotateImages",
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs["batch_annotate_images"]
@property
def async_batch_annotate_files(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation
]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run async image detection and annotation for a list of generic
files (e.g. PDF) which may contain multiple pages and multiple
images per page. Progress and results can be retrieved through
the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_files" not in self._stubs:
self._stubs["async_batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1p2beta1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_files"]
__all__ = ("ImageAnnotatorGrpcTransport",) | def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations. |
weakref.rs | //! Implementation in line with the python `weakref` module.
//!
//! See also:
//! - [python weakref module](https://docs.python.org/3/library/weakref.html)
//! - [rust weak struct](https://doc.rust-lang.org/std/rc/struct.Weak.html)
//!
use crate::{PyObjectRef, VirtualMachine};
fn | (obj: PyObjectRef) -> usize {
PyObjectRef::weak_count(&obj)
}
fn _weakref_getweakrefs(_obj: PyObjectRef) -> Vec<PyObjectRef> {
// TODO: implement this, may require a different gc
Vec::new()
}
fn _weakref_remove_dead_weakref(_obj: PyObjectRef, _key: PyObjectRef) {
// TODO
}
pub fn make_module(vm: &VirtualMachine) -> PyObjectRef {
let ctx = &vm.ctx;
py_module!(vm, "_weakref", {
"ref" => ctx.types.weakref_type.clone(),
"proxy" => ctx.types.weakproxy_type.clone(),
"getweakrefcount" => named_function!(ctx, _weakref, getweakrefcount),
"getweakrefs" => named_function!(ctx, _weakref, getweakrefs),
"ReferenceType" => ctx.types.weakref_type.clone(),
"ProxyType" => ctx.types.weakproxy_type.clone(),
"CallableProxyType" => ctx.types.weakproxy_type.clone(),
"_remove_dead_weakref" => named_function!(ctx, _weakref, remove_dead_weakref),
})
}
| _weakref_getweakrefcount |
policy.go | package rest
import (
"fmt"
"github.com/Aptomi/aptomi/pkg/api"
"github.com/Aptomi/aptomi/pkg/client/rest/http"
"github.com/Aptomi/aptomi/pkg/config"
"github.com/Aptomi/aptomi/pkg/engine"
"github.com/Aptomi/aptomi/pkg/runtime"
"github.com/sirupsen/logrus"
)
type policyClient struct {
cfg *config.Client |
func (client *policyClient) Show(gen runtime.Generation) (*engine.PolicyData, error) {
response, err := client.httpClient.GET(fmt.Sprintf("/policy/gen/%d", gen), engine.TypePolicyData)
if err != nil {
return nil, err
}
if serverError, ok := response.(*api.ServerError); ok {
return nil, fmt.Errorf("server error: %s", serverError.Error)
}
return response.(*engine.PolicyData), nil
}
func (client *policyClient) Apply(updated []runtime.Object, noop bool, logLevel logrus.Level) (*api.PolicyUpdateResult, error) {
response, err := client.httpClient.POSTSlice(fmt.Sprintf("/policy/noop/%t/loglevel/%s", noop, logLevel.String()), api.TypePolicyUpdateResult, updated)
if err != nil {
return nil, err
}
if serverError, ok := response.(*api.ServerError); ok {
return nil, fmt.Errorf("server error: %s", serverError.Error)
}
return response.(*api.PolicyUpdateResult), nil
}
func (client *policyClient) Delete(updated []runtime.Object, noop bool, logLevel logrus.Level) (*api.PolicyUpdateResult, error) {
response, err := client.httpClient.DELETESlice(fmt.Sprintf("/policy/noop/%t/loglevel/%s", noop, logLevel.String()), api.TypePolicyUpdateResult, updated)
if err != nil {
return nil, err
}
if serverError, ok := response.(*api.ServerError); ok {
return nil, fmt.Errorf("server error: %s", serverError.Error)
}
return response.(*api.PolicyUpdateResult), nil
} | httpClient http.Client
} |
apps.py | from django.apps import AppConfig
| class SearchKeywordConfig(AppConfig):
name = 'Search_Keyword' |
|
mod.rs | use std::sync::Mutex;
use sanskrit_common::errors::*;
use sanskrit_common::model::{ModuleLink, SlicePtr, ValueRef, Hash, Ptr};
use sanskrit_common::arena::{HeapArena, VirtualHeapArena};
use sanskrit_interpreter::model::{ValueSchema, Kind, Entry, Adt, RuntimeType};
use std::collections::BTreeMap;
use sanskrit_runtime::system::SystemContext;
use std::cell::Cell;
use sanskrit_common::hashing::HashingDomain;
use sanskrit_sled_store::SledStore;
use sanskrit_runtime::direct_stored::{StatefulEntryStoreVerifier, SystemDataManager, StatefulEntryStoreExecutor};
use sanskrit_common::encoding::{VirtualSize, ParserAllocator, Parser};
use sanskrit_runtime::model::{BundleWithHash, BaseTransactionBundle};
use sanskrit_runtime::CONFIG;
use externals::crypto::{join_hash, plain_hash, ecdsa_verify};
use sanskrit_compile::externals::{CompilationExternals, CompilationResult};
use sanskrit_interpreter::externals::{RuntimeExternals, ExecutionInterface};
pub mod i8;
pub mod i16;
pub mod i32;
pub mod i64;
pub mod i128;
pub mod u8;
pub mod u16;
pub mod u32;
pub mod u64;
pub mod u128;
pub mod data;
pub mod ids;
pub mod eddsa;
pub mod _unsafe;
pub mod crypto;
pub trait External:Sync{
fn compile_lit<'b,'h>(&self, data_idx: u8, data:SlicePtr<'b,u8>, caller: &Hash, alloc:&'b HeapArena<'h>) -> Result<CompilationResult<'b>>;
fn get_literal_checker<'b,'h>(&self, data_idx: u8, len:u16, alloc:&'b HeapArena<'h>) -> Result<ValueSchema<'b>>;
fn compile_call<'b,'h>(&self, fun_idx: u8, params:SlicePtr<'b,ValueRef>, caller:&Hash, alloc:&'b HeapArena<'h>) -> Result<CompilationResult<'b>>;
}
lazy_static! {
pub static ref EXT_MAP: Mutex<BTreeMap<Hash, &'static dyn External>> = Mutex::new(BTreeMap::new());
}
lazy_static! {
pub static ref SYS_HASH: Mutex<Cell<Hash>> = Mutex::new(Cell::new([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]));
}
lazy_static! {
pub static ref EDDSA_HASH: Mutex<Cell<Hash>> = Mutex::new(Cell::new([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]));
}
pub fn get_ed_dsa_module() -> Hash {EDDSA_HASH.lock().unwrap().get()}
lazy_static! {
pub static ref SYS_MODS: [fn(Hash)->();16] = [
|h|{EXT_MAP.lock().unwrap().insert(h,i8::EXT_I8);}, //0
|h|{EXT_MAP.lock().unwrap().insert(h,i16::EXT_I16);}, //1
|h|{EXT_MAP.lock().unwrap().insert(h,i32::EXT_I32);}, //2
|h|{EXT_MAP.lock().unwrap().insert(h,i64::EXT_I64);}, //3
|h|{EXT_MAP.lock().unwrap().insert(h,i128::EXT_I128);}, //4
|h|{EXT_MAP.lock().unwrap().insert(h,u8::EXT_U8);}, //5
|h|{EXT_MAP.lock().unwrap().insert(h,u16::EXT_U16);}, //6
|h|{EXT_MAP.lock().unwrap().insert(h,u32::EXT_U32);}, //7
|h|{EXT_MAP.lock().unwrap().insert(h,u64::EXT_U64);}, //8
|h|{EXT_MAP.lock().unwrap().insert(h,u128::EXT_U128);}, //9
|h|{EXT_MAP.lock().unwrap().insert(h,data::EXT_DATA);}, //10
|h|{EXT_MAP.lock().unwrap().insert(h,ids::EXT_IDS);}, //11
|h|{SYS_HASH.lock().unwrap().set(h);}, //12
|h|{EXT_MAP.lock().unwrap().insert(h,eddsa::EXT_ECDSA);},
|h|{EXT_MAP.lock().unwrap().insert(h,_unsafe::EXT_UNSAFE);}, //14
|h|{EDDSA_HASH.lock().unwrap().set(h);}, //15
];
}
pub struct ServerExternals;
impl CompilationExternals for ServerExternals {
fn compile_call<'b, 'h>(module: &ModuleLink, fun_idx: u8, params: SlicePtr<'b, ValueRef>, caller: &[u8; 20], alloc: &'b HeapArena<'h>) -> Result<CompilationResult<'b>> {
match EXT_MAP.lock().unwrap().get(&module.to_hash()) {
None => error(|| "Implementation for external module is missing"),
Some(ref imp) => imp.compile_call(fun_idx, params, caller, alloc)
}
}
fn compile_lit<'b, 'h>(module: &ModuleLink, data_idx: u8, data: SlicePtr<'b, u8>, caller: &[u8; 20], alloc: &'b HeapArena<'h>) -> Result<CompilationResult<'b>> {
match EXT_MAP.lock().unwrap().get(&module.to_hash()) {
None => error(|| "Implementation for external module is missing"),
Some(ref imp) => imp.compile_lit(data_idx, data, caller, alloc)
}
}
fn get_literal_checker<'b, 'h>(module: &ModuleLink, data_idx: u8, len: u16, alloc: &'b HeapArena<'h>) -> Result<ValueSchema<'b>> {
match EXT_MAP.lock().unwrap().get(&module.to_hash()) {
None => error(|| "Implementation for external module is missing"),
Some(ref imp) => imp.get_literal_checker(data_idx, len, alloc)
}
}
}
impl RuntimeExternals for ServerExternals {
fn typed_system_call<'interpreter, 'transaction:'interpreter, 'heap:'transaction, I:ExecutionInterface<'interpreter, 'transaction, 'heap>>(interface:&mut I, id:u8, kind:Kind, values: &[ValueRef], tail:bool) -> Result<()>{
match id {
//Hash
0 => plain_hash(interface, kind, values[0], tail),
_ => unreachable!()
}
}
fn system_call<'interpreter, 'transaction:'interpreter, 'heap:'transaction, I:ExecutionInterface<'interpreter, 'transaction, 'heap>>(interface:&mut I, id:u8, values: &[ValueRef], tail:bool) -> Result<()>{
match id {
//Derive
0 => join_hash(interface, values[0], values[1], HashingDomain::Derive, tail),
//EcDsaVerify
1 => ecdsa_verify(interface, values[0], values[1], values[2], tail),
_ => unreachable!()
}
}
}
pub struct ServerSystemDataManager;
impl<'c> SystemDataManager<BundleWithHash<'c>> for ServerSystemDataManager {
fn providable_size(typ: Ptr<RuntimeType>) -> Result<u32> |
fn providable_gas(typ: Ptr<RuntimeType>) -> Result<u64> {
match *typ {
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 1 => {
let hash_alloc = (13 + 20/50) as u64;
let pack = 13 + (6 as u64);
Ok(hash_alloc + pack)
}
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 2 => {
let hash_alloc = (13 + 20/50) as u64;
let pack = 13 + (6 as u64);
let hash_cost = 65;
Ok(hash_alloc + pack + hash_cost)
}
_ => return error(||"Provided value parameter must be of a supported type")
}
}
fn is_chain_value(typ: Ptr<RuntimeType>) -> bool {
match *typ {
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 0 => true,
_ => false
}
}
//This means we can only provide 1 value per Txt
fn provided_value_key(typ: Ptr<RuntimeType>, section_no:u8, txt_no:u8) -> Option<Vec<u8>> {
match *typ {
//This means we can only provide 1 value per Txt
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 2 => Some(vec![section_no,txt_no]),
//For the rest (TxData we can provide as many copies as we want)
_ => None
}
}
fn create_provided_value<'a, 'h>(bundle: &BundleWithHash, typ: Ptr<RuntimeType>, alloc: &'a VirtualHeapArena<'h>, block_no: u64, section_no:u8, txt_no:u8) -> Result<Entry<'a>> {
match *typ {
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 1 => {
Ok(Entry{adt: Adt(0,alloc.copy_alloc_slice(&[
Entry {data: alloc.copy_alloc_slice(&bundle.bundle_hash)?},
Entry {u64: block_no},
Entry {u8: section_no},
Entry {u8: txt_no},
])?)})
},
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 2 => {
let mut context = HashingDomain::Derive.get_domain_hasher();
//fill the hash with bunlde hash value
context.update(&bundle.bundle_hash);
//fill the hash with section & txt indexes
context.update(&[section_no,txt_no]);
Ok(Entry{adt: Adt(0,alloc.copy_alloc_slice(&[
//calc the Hash
Entry {data: context.alloc_finalize(&alloc)?},
Entry {u64: 0},
])?)})
},
_ => error(||"Requested value is not providable")
}
}
}
pub struct ServerSystem;
impl<'c> SystemContext<'c> for ServerSystem {
type CE = ServerExternals;
type RE = ServerExternals;
type S = SledStore;
type B = BundleWithHash<'c>;
type VC = StatefulEntryStoreVerifier<Self::B,ServerSystemDataManager>;
type EC = StatefulEntryStoreExecutor<Self::B,ServerSystemDataManager>;
fn parse_bundle<A: ParserAllocator>(data: &[u8], alloc: &'c A) -> Result<Self::B> {
let txt_bundle:BaseTransactionBundle = Parser::parse_fully(data, CONFIG.max_structural_dept, alloc)?;
let bundle_hash = HashingDomain::Bundle.hash(&data[..txt_bundle.core.byte_size.unwrap()]);
Ok(BundleWithHash {
txt_bundle,
bundle_hash,
})
}
} | {
match *typ {
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 1 => {
Ok((Hash::SIZE + 4*Entry::SIZE) as u32)
}
RuntimeType::Custom { module, offset, .. } if module == SYS_HASH.lock().unwrap().get() && offset == 2 => {
Ok((Hash::SIZE + 2*Entry::SIZE) as u32)
}
_ => return error(||"Provided value parameter must be of a supported type")
}
} |
pomis2_57be95a71b575624c33c6ffe64e50d6e.py | import numpy as np
import warnings
import baselines.common.tf_util as U
import tensorflow as tf
import time
from baselines.common import zipsame, colorize
from contextlib import contextmanager
from collections import deque
from baselines import logger
from baselines.common.cg import cg
from baselines.pomis2.memory import Memory
from baselines.common.centralized_sampler import traj_segment_generator
from baselines.pois.utils import cluster_rewards
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))
def update_epsilon(delta_bound, epsilon_old, max_increase=2.):
if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:
return epsilon_old * max_increase
else:
return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))
def line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):
epsilon = 1.
epsilon_old = 0.
delta_bound_old = -np.inf
bound_init = evaluate_bound()
theta_old = theta_init
for i in range(max_line_search_ite):
theta = theta_init + epsilon * alpha * natural_gradient
set_parameter(theta)
bound = evaluate_bound()
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
return theta_old, epsilon_old, delta_bound_old, i + 1
delta_bound = bound - bound_init
epsilon_old = epsilon
epsilon = update_epsilon(delta_bound, epsilon_old)
if delta_bound <= delta_bound_old + delta_bound_tol:
if delta_bound_old < 0.:
return theta_init, 0., 0., i+1
else:
return theta_old, epsilon_old, delta_bound_old, i+1
delta_bound_old = delta_bound
theta_old = theta
return theta_old, epsilon_old, delta_bound_old, i+1
def line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):
low = 0.
high = None
bound_init = evaluate_loss()
delta_bound_old = 0.
theta_opt = theta_init
i_opt = 0
delta_bound_opt = 0.
epsilon_opt = 0.
epsilon = 1.
for i in range(max_line_search_ite):
theta = theta_init + epsilon * natural_gradient * alpha
set_parameter(theta)
bound = evaluate_loss()
delta_bound = bound - bound_init
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
if np.isnan(bound) or delta_bound <= delta_bound_opt:
high = epsilon
else:
low = epsilon
theta_opt = theta
delta_bound_opt = delta_bound
i_opt = i
epsilon_opt = epsilon
epsilon_old = epsilon
if high is None:
epsilon *= 2
else:
epsilon = (low + high) / 2.
if abs(epsilon_old - epsilon) < 1e-12:
break
return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1
def optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):
theta = theta_old = theta_init
improvement = improvement_old = 0.
set_parameter(theta)
'''
bound_init = evaluate_loss()
import scipy.optimize as opt
def func(x):
set_parameter(x)
return -evaluate_loss()
def grad(x):
set_parameter(x)
return -evaluate_gradient().astype(np.float64)
theta, bound, d = opt.fmin_l_bfgs_b(func=func,
fprime=grad,
x0=theta_init.astype(np.float64),
maxiter=100,
)
print(bound_init, bound)
print(d)
set_parameter(theta)
improvement = bound_init + bound
return theta, improvement
'''
fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'
titlestr = '%6s %10s %10s %18s %18s %18s %18s'
print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))
for i in range(max_offline_ite):
bound = evaluate_loss()
gradient = evaluate_gradient()
if np.any(np.isnan(gradient)):
warnings.warn('Got NaN gradient! Stopping!')
set_parameter(theta_old)
return theta_old, improvement
if np.isnan(bound):
warnings.warn('Got NaN bound! Stopping!')
set_parameter(theta_old)
return theta_old, improvement_old
if evaluate_natural_gradient is not None:
natural_gradient = evaluate_natural_gradient(gradient)
else:
natural_gradient = gradient
if np.dot(gradient, natural_gradient) < 0:
warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')
natural_gradient = gradient
gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))
if gradient_norm < gradient_tol:
print('stopping - gradient norm < gradient_tol')
return theta, improvement
alpha = 1. / gradient_norm ** 2
theta_old = theta
improvement_old = improvement
theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)
set_parameter(theta)
improvement += delta_bound
print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))
if delta_bound < bound_tol:
print('stopping - delta bound < bound_tol')
return theta, improvement
return theta, improvement
def learn(env, make_policy, *,
n_episodes,
horizon,
delta,
gamma,
max_iters,
sampler=None,
use_natural_gradient=False, #can be 'exact', 'approximate'
fisher_reg=1e-2,
iw_method='is',
iw_norm='none',
bound='J',
line_search_type='parabola',
save_weights=0,
improvement_tol=0.,
center_return=False,
render_after=None,
max_offline_iters=100,
callback=None,
clipping=False,
entropy='none',
positive_return=False,
reward_clustering='none',
capacity=10,
warm_start=True):
np.set_printoptions(precision=3)
max_samples = horizon * n_episodes
if line_search_type == 'binary':
line_search = line_search_binary
elif line_search_type == 'parabola':
line_search = line_search_parabola
else:
raise ValueError()
# Building the environment
ob_space = env.observation_space
ac_space = env.action_space
# Creating the memory buffer
memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,
ob_space=ob_space, ac_space=ac_space)
# Building the target policy and saving its parameters
pi = make_policy('pi', ob_space, ac_space)
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]
shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]
n_parameters = sum(shapes)
# Building a set of behavioral policies
behavioral_policies = memory.build_policies(make_policy, pi)
# Placeholders
ob_ = ob = U.get_placeholder_cached(name='ob')
ac_ = pi.pdtype.sample_placeholder([None], name='ac')
mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')
rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')
disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')
clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))
gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')
iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')
active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')
losses_with_name = []
# Total number of trajectories
N_total = tf.reduce_sum(active_policies) * n_episodes
# Split operations
disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])
rew_split = tf.reshape(rew_ * mask_, [-1, horizon])
mask_split = tf.reshape(mask_, [-1, horizon])
# Policy densities
target_log_pdf = pi.pd.logp(ac_) * mask_
target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])
behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies]) # Shape is (capacity, ntraj*horizon)
behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])
# Compute renyi divergencies and sum over time, then exponentiate
emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])
emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))
# Compute arithmetic and harmonic mean of emp_d2
emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)
emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)
emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)
# Return processing: clipping, centering, discounting
ep_return = clustered_rew_ #tf.reduce_sum(mask_split * disc_rew_split, axis=1)
if clipping:
rew_split = tf.clip_by_value(rew_split, -1, 1)
if center_return:
ep_return = ep_return - tf.reduce_mean(ep_return)
rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))
discounter = [pow(gamma, i) for i in range(0, horizon)] # Decreasing gamma
discounter_tf = tf.constant(discounter)
disc_rew_split = rew_split * discounter_tf
# Reward statistics
return_mean = tf.reduce_mean(ep_return)
return_std = U.reduce_std(ep_return)
return_max = tf.reduce_max(ep_return)
return_min = tf.reduce_min(ep_return)
return_abs_max = tf.reduce_max(tf.abs(ep_return))
return_step_max = tf.reduce_max(tf.abs(rew_split)) # Max step reward
return_step_mean = tf.abs(tf.reduce_mean(rew_split))
positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))
negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))
return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)
losses_with_name.extend([(return_mean, 'InitialReturnMean'),
(return_max, 'InitialReturnMax'),
(return_min, 'InitialReturnMin'),
(return_std, 'InitialReturnStd'),
(emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),
(emp_d2_harmonic, 'EmpiricalD2Harmonic'),
(return_step_max, 'ReturnStepMax'),
(return_step_maxmin, 'ReturnStepMaxmin')])
if iw_method == 'is':
# Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)
target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)
behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)
# To avoid numerical instability, compute the inversed ratio
log_ratio = target_log_pdf_split - behavioral_log_pdfs_split
inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)
iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)
# Compute also the balance-heuristic weights
iw_split = tf.reshape(iw, (memory.capacity, -1))
iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)
losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))
losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))
losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))
losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))
# Get the probability by exponentiation
#target_pdf_episode = tf.exp(target_log_pdf_episode)
#behavioral_pdf_episode = tf.exp(behavioral_log_pdf_episode)
# Get the denominator by averaging over behavioral policies
#behavioral_pdf_mixture = tf.reduce_mean(behavioral_pdf_episode, axis=0) + 1e-24
#iw = target_pdf_episode / behavioral_pdf_mixture
iwn = iw / n_episodes
# Compute the J
w_return_mean = tf.reduce_sum(ep_return * iwn)
# Empirical D2 of the mixture and relative ESS
ess_renyi_arithmetic = N_total / emp_d2_arithmetic
ess_renyi_harmonic = N_total / emp_d2_harmonic
# Log quantities
losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),
(tf.reduce_min(iw), 'MinIW'),
(tf.reduce_mean(iw), 'MeanIW'),
(U.reduce_std(iw), 'StdIW'),
(tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),
(tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),
(ess_renyi_arithmetic, 'ESSRenyiArithmetic'),
(ess_renyi_harmonic, 'ESSRenyiHarmonic')])
else:
raise NotImplementedError()
if bound == 'J':
bound_ = w_return_mean
elif bound == 'max-d2-harmonic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max
elif bound == 'max-d2-arithmetic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max
else:
raise NotImplementedError()
# Policy entropy for exploration
ent = pi.pd.entropy()
meanent = tf.reduce_mean(ent)
losses_with_name.append((meanent, 'MeanEntropy'))
# Add policy entropy bonus
if entropy != 'none':
scheme, v1, v2 = entropy.split(':')
if scheme == 'step':
entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))
losses_with_name.append((entcoeff, 'EntropyCoefficient'))
entbonus = entcoeff * meanent
bound_ = bound_ + entbonus
elif scheme == 'lin':
ip = tf.cast(iter_number_ / max_iters, tf.float32)
entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))
losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))
entbonus = entcoeff_decay * meanent
bound_ = bound_ + entbonus
elif scheme == 'exp':
ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)
losses_with_name.append((ent_f, 'EntropyCoefficient'))
bound_ = bound_ + ent_f * meanent
else:
raise Exception('Unrecognized entropy scheme.')
losses_with_name.append((w_return_mean, 'ReturnMeanIW'))
losses_with_name.append((bound_, 'Bound'))
losses, loss_names = map(list, zip(*losses_with_name))
'''
if use_natural_gradient:
p = tf.placeholder(dtype=tf.float32, shape=[None])
target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)
grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)
dot_product = tf.reduce_sum(grad_logprob * p)
hess_logprob = U.flatgrad(dot_product, var_list)
compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])
'''
assert_ops = tf.group(*tf.get_collection('asserts'))
print_ops = tf.group(*tf.get_collection('prints'))
compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])
compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)
#compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])
set_parameter = U.SetFromFlat(var_list)
get_parameter = U.GetFlat(var_list)
policy_reinit = tf.variables_initializer(var_list)
if sampler is None:
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
U.initialize()
# Starting optimizing
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=n_episodes)
rewbuffer = deque(maxlen=n_episodes)
while True:
iters_so_far += 1
if iters_so_far == 50:
print('=== CHANGED GAMMA TO 1.0')
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
if render_after is not None and iters_so_far % render_after == 0:
if hasattr(env, 'render'):
render(env, pi, horizon)
if callback:
callback(locals(), globals())
if iters_so_far >= max_iters:
print('Finished...')
break
logger.log('********** Iteration %i ************' % iters_so_far)
theta = get_parameter()
with timed('sampling'):
seg = sampler.collect(theta)
lens, rets = seg['ep_lens'], seg['ep_rets']
lenbuffer.extend(lens)
rewbuffer.extend(rets)
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
# Adding batch of trajectories to memory
memory.add_trajectory_batch(seg)
# Get multiple batches from memory
seg_with_memory = memory.get_trajectories()
# Get clustered reward
reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))
ep_reward = np.sum(reward_matrix, axis=1)
ep_reward = cluster_rewards(ep_reward, reward_clustering)
args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],
seg_with_memory['ac'],
seg_with_memory['rew'],
seg_with_memory['disc_rew'],
ep_reward,
seg_with_memory['mask'],
iters_so_far,
memory.get_active_policies_mask())
def eval | loss = compute_bound(*args)
return loss[0]
def evaluate_gradient():
gradient = compute_grad(*args)
return gradient[0]
if use_natural_gradient:
def evaluate_fisher_vector_prod(x):
return compute_linear_operator(x, *args)[0] + fisher_reg * x
def evaluate_natural_gradient(g):
return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)
else:
evaluate_natural_gradient = None
with timed('summaries before'):
logger.record_tabular("Iteration", iters_so_far)
logger.record_tabular("InitialBound", evaluate_loss())
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if save_weights > 0 and iters_so_far % save_weights == 0:
logger.record_tabular('Weights', str(get_parameter()))
import pickle
file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')
pickle.dump(theta, file)
if not warm_start or memory.get_current_load() == capacity:
# Optimize
with timed("offline optimization"):
theta, improvement = optimize_offline(theta,
set_parameter,
line_search,
evaluate_loss,
evaluate_gradient,
evaluate_natural_gradient,
max_offline_ite=max_offline_iters)
set_parameter(theta)
print(theta)
with timed('summaries after'):
meanlosses = np.array(compute_losses(*args))
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
else:
# Reinitialize the policy
tf.get_default_session().run(policy_reinit)
logger.dump_tabular()
env.close()
| uate_loss():
|
lib.rs | //! Invokes an instruction and returns an error, the instruction invoked
//! uses the instruction data provided and all the accounts
use solana_program::{
account_info::AccountInfo,
entrypoint::ProgramResult,
instruction::{AccountMeta, Instruction},
program::invoke,
pubkey::Pubkey,
};
solana_program::entrypoint!(process_instruction);
#[allow(clippy::unnecessary_wraps)]
fn process_instruction(
_program_id: &Pubkey,
accounts: &[AccountInfo],
instruction_data: &[u8],
) -> ProgramResult {
let to_call = accounts[0].key;
let infos = accounts;
let instruction = Instruction {
accounts: accounts[1..]
.iter()
.map(|acc| AccountMeta {
pubkey: *acc.key, | })
.collect(),
data: instruction_data.to_owned(),
program_id: *to_call,
};
let _ = invoke(&instruction, infos);
Ok(())
} | is_signer: acc.is_signer,
is_writable: acc.is_writable, |
48_Rotate_Image.py | '''
48. Rotate Image Medium
You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
'''
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
self.transpose(matrix)
self.reflex(matrix)
def | (self, matrix):
# Since matrix size nxn
m_len = len(matrix)
for r in range(m_len):
for c in range(r, m_len):
matrix[c][r], matrix[r][c] = matrix[r][c], matrix[c][r]
# Reflex matrix by middle vertical axis
def reflex(self, matrix):
for r in range(len(matrix)):
for c in range(len(matrix)//2):
matrix[r][c], matrix[r][len(matrix)-1-c] = matrix[r][len(matrix)-1-c], matrix[r][c]
| transpose |
css-plugin.ts | /*
* Copyright (C) 2019 CaMnter [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { IPlugin } from "./i-plugin";
import BasePlugin from "./base-plugin";
import { CssType } from "../type/css-type";
import { AppletType } from "../type/applet-type"; |
/**
* @author CaMnter
*/
class CssPlugin extends BasePlugin {
private _targetCss?: CssType;
private _expectCss?: CssType;
constructor(target: AppletType,
expect: AppletType) {
super(target, expect);
}
run(code: string | undefined | null): string {
if (!code || '' === code) {
throw new Error(`CssPlugin # constructor #「code」error: ${ code }`);
}
this.checkAppletType('_target', '_targetCss', this._target);
this.checkAppletType('_expect', '_expectCss', this._expect);
if (!this._targetCss || !this._expectCss) {
throw new Error(`CssPlugin # run # missing CssType「this.targetCss」: ${ this._targetCss }「this.expectCss」: ${ this._expectCss }`);
}
this._result = cssTransform(code, this._targetCss, this._expectCss);
return this._result;
}
checkAppletType(name: string, targetCssName: string, target: AppletType,): void {
const _this = this as any;
switch (target) {
case AppletType.wx:
_this[`${ targetCssName }`] = CssType.wxss;
break;
case AppletType.my:
_this[`${ targetCssName }`] = CssType.acss;
break;
default:
throw new Error(`CssPlugin # checkAppletType # atypical applet type「${ name }」: ${ this._target }`);
break;
}
}
get targetCss(): CssType | undefined {
return this._targetCss;
}
get expectCss(): CssType | undefined {
return this._expectCss;
}
}
export default CssPlugin; | import { cssTransform } from "../css/css-transform"; |
bn256_pubkey.rs | use bls_signatures_rs::bn256::Bn256;
use bls_signatures_rs::MultiSignature;
use bls_signatures_rs::bn256::PublicKey;
use bn::{arith, pairing_batch, AffineG1, AffineG2, Fq, Fq2, Fr, Group, Gt, G1, G2};
extern crate hex_slice;
use hex_slice::AsHex;
extern crate hex;
use std::env;
fn main() {
let args: Vec<String> = env::args().collect();
println!("{:?}", args);
let mut secret_key = hex::decode("c9afa9d845ba75166b5c215767b1d6934e50c3db36e89b127b8a622b120f6721").unwrap();
if (args.len() > 1) {
secret_key = hex::decode(&args[1]).unwrap();
}
// Derive public key from secret key
let public_key = Bn256.derive_public_key(&secret_key).unwrap();
println!("Compressed Public Key In Hex\n {:02X?}\n\n", &public_key);
let pubKeyUncompVec = PublicKey::from_compressed(&public_key).unwrap().to_uncompressed().unwrap();
let mut encoded = hex::encode(&pubKeyUncompVec[0..32]);
println!("Uncompressed Public Key X Real In Hex of Length {} \n {:02X?}\n\n", encoded.len(), &encoded);
encoded = hex::encode(&pubKeyUncompVec[32..64]);
println!("Uncompressed Public Key X Im In Hex of Length {} \n {:02X?}\n\n", encoded.len(), &encoded);
encoded = hex::encode(&pubKeyUncompVec[64..96]);
println!("Uncompressed Public Key Y Real In Hex of Length {} \n {:02X?}\n\n", encoded.len(), &encoded);
encoded = hex::encode(&pubKeyUncompVec[96..128]);
println!("Uncompressed Public Key Y Im In Hex of Length {} \n {:02X?}\n\n", encoded.len(), &encoded);
} | ||
slicePlane2.py | """Slice a Volume with multiple planes
Make low values of the scalar completely transparent""" | slices = []
for i in range(4):
sl = vol.slicePlane(origin=[150,150,i*50+50], normal=(0,-1,1))
slices.append(sl)
amap = [0, 1, 1, 1, 1] # hide low value points giving them alpha 0
mslices = merge(slices) # merge all slices into a single Mesh
mslices.cmap('hot_r', alpha=amap).lighting('off').addScalarBar3D()
show(vol, mslices, __doc__, axes=1) | from vedo import *
vol = Volume(dataurl+'embryo.slc').alpha([0,0,0.5]).c('k')
|
test_easyipc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @brief This module has unit tests for the classes of EasyIPC.
# @author Luis C. Garcia-Peraza Herrera ([email protected]).
# @date 25 June 2020.
import unittest
import os
import sys
import numpy as np
# My imports
import easyipc
class | (unittest.TestCase):
def test_pipe(self):
data = [np.random.rand(1000, 1000) for i in range(100)]
newpid = os.fork()
if newpid == 0:
client = easyipc.Pipe('hoho')
client.connect()
client.send_whatever({'Hello': 'from the client'})
for i in range(len(data)):
client.send_array(data[i])
else:
server = easyipc.Pipe('hoho')
server.listen()
whatever = None
while whatever is None:
whatever = server.recv_whatever(blocking=False)
self.assertTrue(whatever['Hello'] == 'from the client')
for i in range(len(data)):
data_back = server.recv_array()
self.assertTrue(np.sum(data[i] - data_back) == 0)
if __name__ == '__main__':
unittest.main()
| TestEasyIPC |
mod.rs | // Copyright Materialize, Inc. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Logging dataflows for events generated by various subsystems.
pub mod differential;
pub mod materialized;
pub mod timely;
use ::timely::dataflow::operators::capture::{Event, EventPusher};
use dataflow_types::logging::{DifferentialLog, LogVariant, MaterializedLog, TimelyLog};
use repr::Timestamp;
use std::time::Duration;
/// Logs events as a timely stream, with progress statements.
pub struct BatchLogger<T, E, P>
where
P: EventPusher<Timestamp, (Duration, E, T)>,
{
/// Time in milliseconds of the current expressed capability.
time_ms: Timestamp,
event_pusher: P,
_phantom: ::std::marker::PhantomData<(E, T)>,
/// Each time is advanced to the strictly next millisecond that is a multiple of this granularity.
/// This means we should be able to perform the same action on timestamp capabilities, and only
/// flush buffers when this timestamp advances.
granularity_ms: u64,
/// A stash for data that does not yet need to be sent.
buffer: Vec<(Duration, E, T)>,
}
impl<T, E, P> BatchLogger<T, E, P>
where
P: EventPusher<Timestamp, (Duration, E, T)>,
{
/// Creates a new batch logger.
pub fn new(event_pusher: P, granularity_ms: u64) -> Self |
/// Publishes a batch of logged events and advances the capability.
pub fn publish_batch(&mut self, time: &Duration, data: &mut Vec<(Duration, E, T)>) {
let new_time_ms =
(((time.as_millis() as Timestamp) / self.granularity_ms) + 1) * self.granularity_ms;
if !data.is_empty() {
// If we don't need to grow our buffer, move
if data.len() > self.buffer.capacity() - self.buffer.len() {
self.event_pusher.push(Event::Messages(
self.time_ms as Timestamp,
self.buffer.drain(..).collect(),
));
}
self.buffer.extend(data.drain(..));
}
if self.time_ms < new_time_ms {
// Flush buffered events that may need to advance.
self.event_pusher.push(Event::Messages(
self.time_ms as Timestamp,
self.buffer.drain(..).collect(),
));
// In principle we can buffer up until this point, if that is appealing to us.
// We could buffer more aggressively if the logging granularity were exposed
// here, as the forward ticks would be that much less frequent.
self.event_pusher
.push(Event::Progress(vec![(new_time_ms, 1), (self.time_ms, -1)]));
}
self.time_ms = new_time_ms;
}
}
impl<T, E, P> Drop for BatchLogger<T, E, P>
where
P: EventPusher<Timestamp, (Duration, E, T)>,
{
fn drop(&mut self) {
self.event_pusher
.push(Event::Progress(vec![(self.time_ms as Timestamp, -1)]));
}
}
| {
BatchLogger {
time_ms: 0,
event_pusher,
_phantom: ::std::marker::PhantomData,
granularity_ms,
buffer: Vec::with_capacity(1024),
}
} |
circle_bucket.js | 'use strict';
const Bucket = require('../bucket');
const createVertexArrayType = require('../vertex_array_type');
const createElementArrayType = require('../element_array_type');
const loadGeometry = require('../load_geometry');
const EXTENT = require('../extent');
const circleInterface = {
layoutVertexArrayType: createVertexArrayType([
{name: 'a_pos', components: 2, type: 'Int16'}
]),
elementArrayType: createElementArrayType(),
paintAttributes: [
{property: 'circle-color', type: 'Uint8'},
{property: 'circle-radius', type: 'Uint16', multiplier: 10},
{property: 'circle-blur', type: 'Uint16', multiplier: 10},
{property: 'circle-opacity', type: 'Uint8', multiplier: 255}
]
};
function addCircleVertex(layoutVertexArray, x, y, extrudeX, extrudeY) {
layoutVertexArray.emplaceBack(
(x * 2) + ((extrudeX + 1) / 2),
(y * 2) + ((extrudeY + 1) / 2));
}
/** | * Each corner has a pos that is the center of the circle and an extrusion
* vector that is where it points.
* @private
*/
class CircleBucket extends Bucket {
constructor(options) {
super(options, circleInterface);
}
addFeature(feature) {
const arrays = this.arrays;
for (const ring of loadGeometry(feature)) {
for (const point of ring) {
const x = point.x;
const y = point.y;
// Do not include points that are outside the tile boundaries.
if (x < 0 || x >= EXTENT || y < 0 || y >= EXTENT) continue;
// this geometry will be of the Point type, and we'll derive
// two triangles from it.
//
// ┌─────────┐
// │ 3 2 │
// │ │
// │ 0 1 │
// └─────────┘
const segment = arrays.prepareSegment(4);
const index = segment.vertexLength;
addCircleVertex(arrays.layoutVertexArray, x, y, -1, -1);
addCircleVertex(arrays.layoutVertexArray, x, y, 1, -1);
addCircleVertex(arrays.layoutVertexArray, x, y, 1, 1);
addCircleVertex(arrays.layoutVertexArray, x, y, -1, 1);
arrays.elementArray.emplaceBack(index, index + 1, index + 2);
arrays.elementArray.emplaceBack(index, index + 3, index + 2);
segment.vertexLength += 4;
segment.primitiveLength += 2;
}
}
arrays.populatePaintArrays(feature.properties);
}
}
module.exports = CircleBucket; | * Circles are represented by two triangles.
* |
drawing_utils.rs | use tui::layout::Rect;
use crate::app;
use std::{
cmp::{max, min},
time::Instant,
};
/// Return a (hard)-width vector for column widths.
///
/// * `total_width` is the, well, total width available. **NOTE:** This function automatically
/// takes away 2 from the width as part of the left/right
/// bounds.
/// * `hard_widths` is inflexible column widths. Use a `None` to represent a soft width.
/// * `soft_widths_min` is the lower limit for a soft width. Use `None` if a hard width goes there.
/// * `soft_widths_max` is the upper limit for a soft width, in percentage of the total width. Use
/// `None` if a hard width goes there.
/// * `soft_widths_desired` is the desired soft width. Use `None` if a hard width goes there.
/// * `left_to_right` is a boolean whether to go from left to right if true, or right to left if
/// false.
///
/// **NOTE:** This function ASSUMES THAT ALL PASSED SLICES ARE OF THE SAME SIZE.
///
/// **NOTE:** The returned vector may not be the same size as the slices, this is because including
/// 0-constraints breaks tui-rs.
pub fn get_column_widths(
total_width: u16, hard_widths: &[Option<u16>], soft_widths_min: &[Option<u16>],
soft_widths_max: &[Option<f64>], soft_widths_desired: &[Option<u16>], left_to_right: bool,
) -> Vec<u16> {
debug_assert!(
hard_widths.len() == soft_widths_min.len(),
"hard width length != soft width min length!"
);
debug_assert!(
soft_widths_min.len() == soft_widths_max.len(),
"soft width min length != soft width max length!"
);
debug_assert!(
soft_widths_max.len() == soft_widths_desired.len(),
"soft width max length != soft width desired length!"
);
if total_width > 2 {
let initial_width = total_width - 2;
let mut total_width_left = initial_width;
let mut column_widths: Vec<u16> = vec![0; hard_widths.len()];
let range: Vec<usize> = if left_to_right {
(0..hard_widths.len()).collect()
} else {
(0..hard_widths.len()).rev().collect()
};
for itx in &range {
if let Some(Some(hard_width)) = hard_widths.get(*itx) {
// Hard width...
let space_taken = min(*hard_width, total_width_left);
// TODO [COLUMN MOVEMENT]: Remove this
if *hard_width > space_taken {
break;
}
column_widths[*itx] = space_taken;
total_width_left -= space_taken;
total_width_left = total_width_left.saturating_sub(1);
} else if let (
Some(Some(soft_width_max)),
Some(Some(soft_width_min)),
Some(Some(soft_width_desired)),
) = (
soft_widths_max.get(*itx),
soft_widths_min.get(*itx),
soft_widths_desired.get(*itx),
) {
// Soft width...
let soft_limit = max(
if soft_width_max.is_sign_negative() {
*soft_width_desired
} else {
(*soft_width_max * initial_width as f64).ceil() as u16
},
*soft_width_min,
);
let space_taken = min(min(soft_limit, *soft_width_desired), total_width_left);
// TODO [COLUMN MOVEMENT]: Remove this
if *soft_width_min > space_taken {
break;
}
column_widths[*itx] = space_taken;
total_width_left -= space_taken;
total_width_left = total_width_left.saturating_sub(1);
}
}
while let Some(0) = column_widths.last() {
column_widths.pop();
}
if !column_widths.is_empty() {
// Redistribute remaining.
let amount_per_slot = total_width_left / column_widths.len() as u16;
total_width_left %= column_widths.len() as u16;
for (index, width) in column_widths.iter_mut().enumerate() {
if index < total_width_left.into() {
*width += amount_per_slot + 1;
} else {
*width += amount_per_slot;
}
}
}
column_widths
} else {
vec![]
}
}
pub fn get_search_start_position(
num_columns: usize, cursor_direction: &app::CursorDirection, cursor_bar: &mut usize,
current_cursor_position: usize, is_force_redraw: bool,
) -> usize {
if is_force_redraw {
*cursor_bar = 0;
}
match cursor_direction {
app::CursorDirection::Right => {
if current_cursor_position < *cursor_bar + num_columns {
// If, using previous_scrolled_position, we can see the element
// (so within that and + num_rows) just reuse the current previously scrolled position
*cursor_bar
} else if current_cursor_position >= num_columns {
// Else if the current position past the last element visible in the list, omit
// until we can see that element
*cursor_bar = current_cursor_position - num_columns;
*cursor_bar
} else {
// Else, if it is not past the last element visible, do not omit anything
0
}
}
app::CursorDirection::Left => {
if current_cursor_position <= *cursor_bar {
// If it's past the first element, then show from that element downwards
*cursor_bar = current_cursor_position;
} else if current_cursor_position >= *cursor_bar + num_columns {
*cursor_bar = current_cursor_position - num_columns;
}
// Else, don't change what our start position is from whatever it is set to!
*cursor_bar
}
}
}
pub fn get_start_position(
num_rows: usize, scroll_direction: &app::ScrollDirection, scroll_position_bar: &mut usize,
currently_selected_position: usize, is_force_redraw: bool,
) -> usize {
if is_force_redraw {
*scroll_position_bar = 0;
}
match scroll_direction {
app::ScrollDirection::Down => {
if currently_selected_position < *scroll_position_bar + num_rows {
// If, using previous_scrolled_position, we can see the element
// (so within that and + num_rows) just reuse the current previously scrolled position
*scroll_position_bar
} else if currently_selected_position >= num_rows {
// Else if the current position past the last element visible in the list, omit
// until we can see that element
*scroll_position_bar = currently_selected_position - num_rows;
*scroll_position_bar
} else {
// Else, if it is not past the last element visible, do not omit anything
0
}
}
app::ScrollDirection::Up => {
if currently_selected_position <= *scroll_position_bar {
// If it's past the first element, then show from that element downwards
*scroll_position_bar = currently_selected_position;
} else if currently_selected_position >= *scroll_position_bar + num_rows {
*scroll_position_bar = currently_selected_position - num_rows;
}
// Else, don't change what our start position is from whatever it is set to!
*scroll_position_bar
}
}
}
/// Calculate how many bars are to be drawn within basic mode's components.
pub fn calculate_basic_use_bars(use_percentage: f64, num_bars_available: usize) -> usize {
std::cmp::min(
(num_bars_available as f64 * use_percentage / 100.0).round() as usize,
num_bars_available,
)
}
/// Determine whether a graph x-label should be hidden.
pub fn should_hide_x_label(
always_hide_time: bool, autohide_time: bool, timer: &mut Option<Instant>, draw_loc: Rect,
) -> bool {
use crate::constants::*;
if always_hide_time || (autohide_time && timer.is_none()) {
true
} else if let Some(time) = timer {
if Instant::now().duration_since(*time).as_millis() < AUTOHIDE_TIMEOUT_MILLISECONDS.into() {
false
} else {
*timer = None;
true
}
} else {
draw_loc.height < TIME_LABEL_HEIGHT_LIMIT
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_get_start_position() |
#[test]
fn test_calculate_basic_use_bars() {
// Testing various breakpoints and edge cases.
assert_eq!(calculate_basic_use_bars(0.0, 15), 0);
assert_eq!(calculate_basic_use_bars(1.0, 15), 0);
assert_eq!(calculate_basic_use_bars(5.0, 15), 1);
assert_eq!(calculate_basic_use_bars(10.0, 15), 2);
assert_eq!(calculate_basic_use_bars(40.0, 15), 6);
assert_eq!(calculate_basic_use_bars(45.0, 15), 7);
assert_eq!(calculate_basic_use_bars(50.0, 15), 8);
assert_eq!(calculate_basic_use_bars(100.0, 15), 15);
assert_eq!(calculate_basic_use_bars(150.0, 15), 15);
}
#[test]
fn test_should_hide_x_label() {
use crate::constants::*;
use std::time::{Duration, Instant};
use tui::layout::Rect;
let rect = Rect::new(0, 0, 10, 10);
let small_rect = Rect::new(0, 0, 10, 6);
let mut under_timer = Some(Instant::now());
let mut over_timer =
Instant::now().checked_sub(Duration::from_millis(AUTOHIDE_TIMEOUT_MILLISECONDS + 100));
assert!(should_hide_x_label(true, false, &mut None, rect));
assert!(should_hide_x_label(false, true, &mut None, rect));
assert!(should_hide_x_label(false, false, &mut None, small_rect));
assert!(!should_hide_x_label(
false,
true,
&mut under_timer,
small_rect
));
assert!(under_timer.is_some());
assert!(should_hide_x_label(
false,
true,
&mut over_timer,
small_rect
));
assert!(over_timer.is_none());
}
#[test]
fn test_zero_width() {
assert_eq!(
get_column_widths(
0,
&[Some(1), None, None],
&[None, Some(1), Some(2)],
&[None, Some(0.125), Some(0.5)],
&[None, Some(10), Some(10)],
true
),
vec![],
);
}
#[test]
fn test_two_width() {
assert_eq!(
get_column_widths(
2,
&[Some(1), None, None],
&[None, Some(1), Some(2)],
&[None, Some(0.125), Some(0.5)],
&[None, Some(10), Some(10)],
true
),
vec![],
);
}
#[test]
fn test_non_zero_width() {
assert_eq!(
get_column_widths(
16,
&[Some(1), None, None],
&[None, Some(1), Some(2)],
&[None, Some(0.125), Some(0.5)],
&[None, Some(10), Some(10)],
true
),
vec![2, 2, 7],
);
}
}
| {
use crate::app::ScrollDirection::{self, Down, Up};
fn test(
bar: usize, num: usize, direction: ScrollDirection, selected: usize, force: bool,
expected_posn: usize, expected_bar: usize,
) {
let mut bar = bar;
assert_eq!(
get_start_position(num, &direction, &mut bar, selected, force),
expected_posn
);
assert_eq!(bar, expected_bar);
}
// Scrolling down from start
test(0, 10, Down, 0, false, 0, 0);
// Simple scrolling down
test(0, 10, Down, 1, false, 0, 0);
// Scrolling down from the middle high up
test(0, 10, Down, 5, false, 0, 0);
// Scrolling down into boundary
test(0, 10, Down, 11, false, 1, 1);
// Scrolling down from the with non-zero bar
test(5, 10, Down, 15, false, 5, 5);
// Force redraw scrolling down (e.g. resize)
test(5, 15, Down, 15, true, 0, 0);
// Test jumping down
test(1, 10, Down, 20, true, 10, 10);
// Scrolling up from bottom
test(10, 10, Up, 20, false, 10, 10);
// Simple scrolling up
test(10, 10, Up, 19, false, 10, 10);
// Scrolling up from the middle
test(10, 10, Up, 10, false, 10, 10);
// Scrolling up into boundary
test(10, 10, Up, 9, false, 9, 9);
// Force redraw scrolling up (e.g. resize)
test(5, 10, Up, 15, true, 5, 5);
// Test jumping up
test(10, 10, Up, 0, false, 0, 0);
} |
area.ts | export class Area {
_id: string = '';
chName: string = '';
chPinyin: string = ''; | } |
constructor(values: Object = {}) {
Object.assign(this, values);
} |
ft_args.py | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_type: Optional[str] = field(
default=None,
metadata={"help": "The type of task to train on: 'document' or 'user' -level"},
)
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: 'stance', 'sentiment', 'age', 'ope', or 'ner'"},
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
return_entity_level_metrics: bool = field(
default=False, metadata={"help": "NER return entity level metrics or not"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
use_history_output: bool = field(
default=False, metadata={"help": "Should use the history output from Ar_HuLM for FT tasks predictions (regression/user-level tasks mainly) or not."}
)
save_preds_labels: bool = field(
default=False, metadata={"help": "Should save the predictions and labels into text files or not."}
)
num_labels: Optional[int] = field(
default=None,
metadata={
"help": "Number of classification labels when fine tuning a 'document' type task."
},
)
train_table: Optional[str] = field(
default=None,
metadata={"help": "The input training data table in a csv or pickle file (path to the file)."})
dev_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data table in a csv or pickle file (path to the file) to validate the model during training."},
)
test_table: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data table in a csv or pickle file (path to the file) to evaluate the trained model for perplexity."},
)
db: Optional[str] = field(
default=None,
metadata={"help": "The database where input training data table resides. (a mysql database)."}
)
hostname: Optional[str] = field(
default=None,
metadata={"help": "The host name or IP where the (mysql) database resides."}
)
max_train_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training blocks to this "
"value if set."
},
)
max_val_blocks: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation blocks to this "
"value if set." | },
)
block_size: Optional[int] = field(
default=1024,
metadata={
"help": "Optional input block sequence length after tokenization "
"(batched into instances of max_train_blocks/max_val_blocks , each of size block_size"
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
disable_hulm_batching: bool = field(
default=False, metadata={"help": "Batch the dataset as a flat list ([users, blocks * block_size]) instead of hulm style batching, i.e., [users, blocks, block_size] dimensions."}
)
agg_type: Optional[str] = field(
default=None,
metadata={
"help": "One of 'last', 'sum', 'avg', 'masked_last', 'masked_avg', 'masked_sum'"
"When using user_states/history for downstream tasks, what kind of "
"user_states/history aggregation to use. Currently, used only when saving states for users."
}
)
train_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training data pickle file."})
train_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input training users' historical data pickle file."})
dev_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev data pickle file."})
dev_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input dev users' historical data pickle file."})
test_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test data pickle file."})
test_hist_pkl: Optional[str] = field(
default=None,
metadata={"help": "The input test users' historical data pickle file."})
def __post_init__(self):
if self.task_type is None or (self.task_type != 'user' and self.task_type != 'document'):
raise ValueError("Need to define task type as one of 'document' or 'user'")
if self.num_labels is None:
raise ValueError('num_labels required to fine-tune downstream tasks!')
if self.train_table is None and (self.dev_table is None and self.test_table is None):
raise ValueError("Need a training/validation (dev or test) table.")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
init_seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of model initialization."})
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
use_qh05_wts: bool = field(
default=False,
metadata={
"help": "Whether to use (at 'layer_ins') pretrained query, key, value weights followed by"
"query weights (for concatenated query and history) initialized with 0.5 mean, instead of,"
"newly initialized query (for concatenated hidden states and history) and key weights"
}
)
use_hart_no_hist: bool = field(
default=False,
metadata={"help": "Whether to use HaRT model with no available historcal context."},
)
freeze_model: bool = field(
default=False, metadata={"help": "Freeze the transformer module of the model. Train only classification layer."}
)
load_non_PT_hulm_model: bool = field(
default=False, metadata={"help": "Whether to use a non-pretrained hulm model or not"}
)
add_history: bool = field(
default=False, metadata={"help": "Whether to use history (and history recurrence) or not."}
)
initial_history: Optional[str] = field(
default=None, metadata={"help": "A .pt file containing a reasonable initial history embedding as a pytorch tensor."}
)
#TODO: following args should ideally be a part of training_args
metric_for_early_stopping: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
early_stopping_patience: int = field(
default=3,
metadata={
"help": "To be used with `metric_for_early_stopping`."
"To stop training when the specified `metric_for_early_stopping` worsens for"
"`early_stopping_patience` evaluation calls."
}
)
early_stopping_threshold: Optional[float] = field(
default=0.0,
metadata={
"help": "Use with `metric_for_early_stopping` and `early_stopping_patience` to denote how"
"much the specified metric must improve to satisfy early stopping conditions."
}
)
search_params: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna`` or ``Ray Tune``"}
)
use_ray: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``Ray Tune``"}
)
use_optuna: bool = field(
default=False, metadata={"help": "To enable Hyperparameters search using ``optuna``"}
)
num_trials: Optional[int] = field(
default=10,
metadata={
"help": "Number of trials to run when 'search_params' is true."
},
) | |
isBalanceTree_test.go | package btree
import (
"fmt"
"testing"
)
var bbt *Node
func init() {
bbt = &Node{Value: 1}
bbt.Left = &Node{Value: 2}
bbt.Right = &Node{Value: 3}
bbt.Left.Left = &Node{Value: 4}
bbt.Left.Right = &Node{Value: 5}
bbt.Right.Left = &Node{Value: 6}
bbt.Right.Right = &Node{Value: 7}
}
func | (t *testing.T) {
want := true
t.Log(fmt.Sprintf("IsBalanceTree ? want: %v", want))
r := IsBalanceTree(bbt)
if r == want {
t.Log("IsBalanceTree is ok")
} else {
t.Error("IsBalanceTree is not ok, result:", r)
}
PrintTree(bbt)
}
| TestIsBalanceTree |
0012_webhookevent.py | # Generated by Django 3.2.5 on 2021-09-21 12:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_alter_paymentorder_name'),
] | operations = [
migrations.CreateModel(
name='WebhookEvent',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_id', models.CharField(max_length=200, verbose_name='Customer ID')),
('event_type', models.CharField(max_length=200, verbose_name='Event Type')),
('data_obj', models.JSONField(verbose_name='Data Object')),
('event_info', models.JSONField(verbose_name='Full Event Data')),
],
),
] | |
token_owner_record.rs | //! Token Owner Record Account
use std::slice::Iter;
use crate::{
addins::voter_weight::get_voter_weight_record_data_for_token_owner_record,
error::GovernanceError,
state::{
enums::GovernanceAccountType, governance::GovernanceConfig, realm::Realm,
realm_config::get_realm_config_data_for_realm,
},
PROGRAM_AUTHORITY_SEED,
};
use borsh::{BorshDeserialize, BorshSchema, BorshSerialize};
use solana_program::{
account_info::{next_account_info, AccountInfo},
program_error::ProgramError,
program_pack::IsInitialized,
pubkey::Pubkey,
};
use spl_governance_tools::account::{get_account_data, AccountMaxSize};
/// Governance Token Owner Record
/// Account PDA seeds: ['governance', realm, token_mint, token_owner ]
#[repr(C)]
#[derive(Clone, Debug, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub struct TokenOwnerRecord {
/// Governance account type
pub account_type: GovernanceAccountType,
/// The Realm the TokenOwnerRecord belongs to
pub realm: Pubkey,
/// Governing Token Mint the TokenOwnerRecord holds deposit for
pub governing_token_mint: Pubkey,
/// The owner (either single or multisig) of the deposited governing SPL Tokens
/// This is who can authorize a withdrawal of the tokens
pub governing_token_owner: Pubkey,
/// The amount of governing tokens deposited into the Realm
/// This amount is the voter weight used when voting on proposals
pub governing_token_deposit_amount: u64,
/// The number of votes cast by TokenOwner but not relinquished yet
/// Every time a vote is cast this number is increased and it's always decreased when relinquishing a vote regardless of the vote state
pub unrelinquished_votes_count: u32,
/// The total number of votes cast by the TokenOwner
/// If TokenOwner withdraws vote while voting is still in progress total_votes_count is decreased and the vote doesn't count towards the total
pub total_votes_count: u32,
/// The number of outstanding proposals the TokenOwner currently owns
/// The count is increased when TokenOwner creates a proposal
/// and decreased once it's either voted on (Succeeded or Defeated) or Cancelled
/// By default it's restricted to 1 outstanding Proposal per token owner
pub outstanding_proposal_count: u8,
/// Reserved space for future versions
pub reserved: [u8; 7],
/// A single account that is allowed to operate governance with the deposited governing tokens
/// It can be delegated to by the governing_token_owner or current governance_delegate
pub governance_delegate: Option<Pubkey>,
}
impl AccountMaxSize for TokenOwnerRecord {
fn get_max_size(&self) -> Option<usize> {
Some(154)
}
}
impl IsInitialized for TokenOwnerRecord {
fn is_initialized(&self) -> bool {
self.account_type == GovernanceAccountType::TokenOwnerRecord
}
}
impl TokenOwnerRecord {
/// Checks whether the provided Governance Authority signed transaction
pub fn assert_token_owner_or_delegate_is_signer(
&self,
governance_authority_info: &AccountInfo,
) -> Result<(), ProgramError> {
if governance_authority_info.is_signer {
if &self.governing_token_owner == governance_authority_info.key {
return Ok(());
}
if let Some(governance_delegate) = self.governance_delegate {
if &governance_delegate == governance_authority_info.key {
return Ok(());
}
};
}
Err(GovernanceError::GoverningTokenOwnerOrDelegateMustSign.into())
}
/// Asserts TokenOwner has enough tokens to be allowed to create proposal and doesn't have any outstanding proposals
pub fn | (
&self,
realm_data: &Realm,
config: &GovernanceConfig,
voter_weight: u64,
) -> Result<(), ProgramError> {
let min_weight_to_create_proposal =
if self.governing_token_mint == realm_data.community_mint {
config.min_community_tokens_to_create_proposal
} else if Some(self.governing_token_mint) == realm_data.config.council_mint {
config.min_council_tokens_to_create_proposal
} else {
return Err(GovernanceError::InvalidGoverningTokenMint.into());
};
if voter_weight < min_weight_to_create_proposal {
return Err(GovernanceError::NotEnoughTokensToCreateProposal.into());
}
// The number of outstanding proposals is currently restricted to 10
// If there is a need to change it in the future then it should be added to realm or governance config
if self.outstanding_proposal_count >= 10 {
return Err(GovernanceError::TooManyOutstandingProposals.into());
}
Ok(())
}
/// Asserts TokenOwner has enough tokens to be allowed to create governance
pub fn assert_can_create_governance(
&self,
realm_data: &Realm,
voter_weight: u64,
) -> Result<(), ProgramError> {
let min_weight_to_create_governance =
if self.governing_token_mint == realm_data.community_mint {
realm_data.config.min_community_tokens_to_create_governance
} else if Some(self.governing_token_mint) == realm_data.config.council_mint {
// For council tokens it's enough to be in possession of any number of tokens
1
} else {
return Err(GovernanceError::InvalidGoverningTokenMint.into());
};
if voter_weight < min_weight_to_create_governance {
return Err(GovernanceError::NotEnoughTokensToCreateGovernance.into());
}
Ok(())
}
/// Asserts TokenOwner can withdraw tokens from Realm
pub fn assert_can_withdraw_governing_tokens(&self) -> Result<(), ProgramError> {
if self.unrelinquished_votes_count > 0 {
return Err(
GovernanceError::AllVotesMustBeRelinquishedToWithdrawGoverningTokens.into(),
);
}
if self.outstanding_proposal_count > 0 {
return Err(
GovernanceError::AllProposalsMustBeFinalisedToWithdrawGoverningTokens.into(),
);
}
Ok(())
}
/// Decreases outstanding_proposal_count
pub fn decrease_outstanding_proposal_count(&mut self) {
// Previous versions didn't use the count and it can be already 0
// TODO: Remove this check once all outstanding proposals on mainnet are resolved
if self.outstanding_proposal_count != 0 {
self.outstanding_proposal_count =
self.outstanding_proposal_count.checked_sub(1).unwrap();
}
}
/// Resolves voter's weight using either the amount deposited into the realm or weight provided by voter weight addin (if configured)
pub fn resolve_voter_weight(
&self,
program_id: &Pubkey,
account_info_iter: &mut Iter<AccountInfo>,
realm: &Pubkey,
realm_data: &Realm,
) -> Result<u64, ProgramError> {
// if the realm uses addin for community voter weight then use the externally provided weight
if realm_data.config.use_community_voter_weight_addin
&& realm_data.community_mint == self.governing_token_mint
{
let realm_config_info = next_account_info(account_info_iter)?;
let voter_weight_record_info = next_account_info(account_info_iter)?;
let realm_config_data =
get_realm_config_data_for_realm(program_id, realm_config_info, realm)?;
let voter_weight_record_data = get_voter_weight_record_data_for_token_owner_record(
&realm_config_data.community_voter_weight_addin.unwrap(),
voter_weight_record_info,
self,
)?;
voter_weight_record_data.assert_is_up_to_date()?;
Ok(voter_weight_record_data.voter_weight)
} else {
Ok(self.governing_token_deposit_amount)
}
}
}
/// Returns TokenOwnerRecord PDA address
pub fn get_token_owner_record_address(
program_id: &Pubkey,
realm: &Pubkey,
governing_token_mint: &Pubkey,
governing_token_owner: &Pubkey,
) -> Pubkey {
Pubkey::find_program_address(
&get_token_owner_record_address_seeds(realm, governing_token_mint, governing_token_owner),
program_id,
)
.0
}
/// Returns TokenOwnerRecord PDA seeds
pub fn get_token_owner_record_address_seeds<'a>(
realm: &'a Pubkey,
governing_token_mint: &'a Pubkey,
governing_token_owner: &'a Pubkey,
) -> [&'a [u8]; 4] {
[
PROGRAM_AUTHORITY_SEED,
realm.as_ref(),
governing_token_mint.as_ref(),
governing_token_owner.as_ref(),
]
}
/// Deserializes TokenOwnerRecord account and checks owner program
pub fn get_token_owner_record_data(
program_id: &Pubkey,
token_owner_record_info: &AccountInfo,
) -> Result<TokenOwnerRecord, ProgramError> {
get_account_data::<TokenOwnerRecord>(program_id, token_owner_record_info)
}
/// Deserializes TokenOwnerRecord account and checks its PDA against the provided seeds
pub fn get_token_owner_record_data_for_seeds(
program_id: &Pubkey,
token_owner_record_info: &AccountInfo,
token_owner_record_seeds: &[&[u8]],
) -> Result<TokenOwnerRecord, ProgramError> {
let (token_owner_record_address, _) =
Pubkey::find_program_address(token_owner_record_seeds, program_id);
if token_owner_record_address != *token_owner_record_info.key {
return Err(GovernanceError::InvalidTokenOwnerRecordAccountAddress.into());
}
get_token_owner_record_data(program_id, token_owner_record_info)
}
/// Deserializes TokenOwnerRecord account and asserts it belongs to the given realm
pub fn get_token_owner_record_data_for_realm(
program_id: &Pubkey,
token_owner_record_info: &AccountInfo,
realm: &Pubkey,
) -> Result<TokenOwnerRecord, ProgramError> {
let token_owner_record_data = get_token_owner_record_data(program_id, token_owner_record_info)?;
if token_owner_record_data.realm != *realm {
return Err(GovernanceError::InvalidRealmForTokenOwnerRecord.into());
}
Ok(token_owner_record_data)
}
/// Deserializes TokenOwnerRecord account and asserts it belongs to the given realm and is for the given governing mint
pub fn get_token_owner_record_data_for_realm_and_governing_mint(
program_id: &Pubkey,
token_owner_record_info: &AccountInfo,
realm: &Pubkey,
governing_token_mint: &Pubkey,
) -> Result<TokenOwnerRecord, ProgramError> {
let token_owner_record_data =
get_token_owner_record_data_for_realm(program_id, token_owner_record_info, realm)?;
if token_owner_record_data.governing_token_mint != *governing_token_mint {
return Err(GovernanceError::InvalidGoverningMintForTokenOwnerRecord.into());
}
Ok(token_owner_record_data)
}
/// Deserializes TokenOwnerRecord account and checks its address is the give proposal_owner
pub fn get_token_owner_record_data_for_proposal_owner(
program_id: &Pubkey,
token_owner_record_info: &AccountInfo,
proposal_owner: &Pubkey,
) -> Result<TokenOwnerRecord, ProgramError> {
if token_owner_record_info.key != proposal_owner {
return Err(GovernanceError::InvalidProposalOwnerAccount.into());
}
get_token_owner_record_data(program_id, token_owner_record_info)
}
#[cfg(test)]
mod test {
use solana_program::borsh::{get_packed_len, try_from_slice_unchecked};
use super::*;
#[test]
fn test_max_size() {
let token_owner_record = TokenOwnerRecord {
account_type: GovernanceAccountType::TokenOwnerRecord,
realm: Pubkey::new_unique(),
governing_token_mint: Pubkey::new_unique(),
governing_token_owner: Pubkey::new_unique(),
governing_token_deposit_amount: 10,
governance_delegate: Some(Pubkey::new_unique()),
unrelinquished_votes_count: 1,
total_votes_count: 1,
outstanding_proposal_count: 1,
reserved: [0; 7],
};
let size = get_packed_len::<TokenOwnerRecord>();
assert_eq!(token_owner_record.get_max_size(), Some(size));
}
#[derive(Clone, Debug, PartialEq, BorshDeserialize, BorshSerialize, BorshSchema)]
pub struct TokenOwnerRecordV1 {
pub account_type: GovernanceAccountType,
pub realm: Pubkey,
pub governing_token_mint: Pubkey,
pub governing_token_owner: Pubkey,
pub governing_token_deposit_amount: u64,
pub unrelinquished_votes_count: u32,
pub total_votes_count: u32,
pub reserved: [u8; 8],
pub governance_delegate: Option<Pubkey>,
}
#[test]
fn test_deserialize_v1_0_account() {
let token_owner_record_v1_0 = TokenOwnerRecordV1 {
account_type: GovernanceAccountType::TokenOwnerRecord,
realm: Pubkey::new_unique(),
governing_token_mint: Pubkey::new_unique(),
governing_token_owner: Pubkey::new_unique(),
governing_token_deposit_amount: 10,
unrelinquished_votes_count: 2,
total_votes_count: 5,
reserved: [0; 8],
governance_delegate: Some(Pubkey::new_unique()),
};
let mut token_owner_record_v1_0_data = vec![];
token_owner_record_v1_0
.serialize(&mut token_owner_record_v1_0_data)
.unwrap();
let token_owner_record_v1_1_data: TokenOwnerRecord =
try_from_slice_unchecked(&token_owner_record_v1_0_data).unwrap();
assert_eq!(
token_owner_record_v1_0.account_type,
token_owner_record_v1_1_data.account_type
);
assert_eq!(0, token_owner_record_v1_1_data.outstanding_proposal_count);
assert_eq!(
token_owner_record_v1_0.governance_delegate,
token_owner_record_v1_1_data.governance_delegate
);
}
}
| assert_can_create_proposal |
matcher_test.go | /*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package xdsclient
import (
"context"
"testing"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/xds/matcher"
"google.golang.org/grpc/metadata"
)
func TestAndMatcherMatch(t *testing.T) {
tests := []struct {
name string
pm pathMatcher
hm matcher.HeaderMatcher
info iresolver.RPCInfo
want bool
}{
{
name: "both match",
pm: newPathExactMatcher("/a/b", false),
hm: matcher.NewHeaderExactMatcher("th", "tv"),
info: iresolver.RPCInfo{
Method: "/a/b",
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
},
want: true,
},
{
name: "both match with path case insensitive",
pm: newPathExactMatcher("/A/B", true),
hm: matcher.NewHeaderExactMatcher("th", "tv"),
info: iresolver.RPCInfo{
Method: "/a/b",
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
},
want: true,
},
{
name: "only one match",
pm: newPathExactMatcher("/a/b", false),
hm: matcher.NewHeaderExactMatcher("th", "tv"),
info: iresolver.RPCInfo{
Method: "/z/y",
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
},
want: false,
},
{
name: "both not match",
pm: newPathExactMatcher("/z/y", false),
hm: matcher.NewHeaderExactMatcher("th", "abc"),
info: iresolver.RPCInfo{
Method: "/a/b",
Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")),
},
want: false,
},
{
name: "fake header",
pm: newPathPrefixMatcher("/", false),
hm: matcher.NewHeaderExactMatcher("content-type", "fake"),
info: iresolver.RPCInfo{
Method: "/a/b",
Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs(
"content-type", "fake",
)),
},
want: true,
},
{
name: "binary header",
pm: newPathPrefixMatcher("/", false),
hm: matcher.NewHeaderPresentMatcher("t-bin", true),
info: iresolver.RPCInfo{
Method: "/a/b",
Context: grpcutil.WithExtraMetadata(
metadata.NewOutgoingContext(context.Background(), metadata.Pairs("t-bin", "123")), metadata.Pairs(
"content-type", "fake",
)),
},
// Shouldn't match binary header, even though it's in metadata.
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcher{tt.hm}, nil)
if got := a.Match(tt.info); got != tt.want {
t.Errorf("match() = %v, want %v", got, tt.want)
}
})
}
}
func | (t *testing.T) {
const fraction = 500000
fm := newFractionMatcher(fraction)
defer func() {
RandInt63n = grpcrand.Int63n
}()
// rand > fraction, should return false.
RandInt63n = func(n int64) int64 {
return fraction + 1
}
if matched := fm.match(); matched {
t.Errorf("match() = %v, want not match", matched)
}
// rand == fraction, should return true.
RandInt63n = func(n int64) int64 {
return fraction
}
if matched := fm.match(); !matched {
t.Errorf("match() = %v, want match", matched)
}
// rand < fraction, should return true.
RandInt63n = func(n int64) int64 {
return fraction - 1
}
if matched := fm.match(); !matched {
t.Errorf("match() = %v, want match", matched)
}
}
func (s) TestMatchTypeForDomain(t *testing.T) {
tests := []struct {
d string
want domainMatchType
}{
{d: "", want: domainMatchTypeInvalid},
{d: "*", want: domainMatchTypeUniversal},
{d: "bar.*", want: domainMatchTypePrefix},
{d: "*.abc.com", want: domainMatchTypeSuffix},
{d: "foo.bar.com", want: domainMatchTypeExact},
{d: "foo.*.com", want: domainMatchTypeInvalid},
}
for _, tt := range tests {
if got := matchTypeForDomain(tt.d); got != tt.want {
t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want)
}
}
}
func (s) TestMatch(t *testing.T) {
tests := []struct {
name string
domain string
host string
wantTyp domainMatchType
wantMatched bool
}{
{name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
{name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false},
{name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true},
{name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true},
{name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false},
{name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true},
{name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false},
{name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true},
{name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched {
t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched)
}
})
}
}
| TestFractionMatcherMatch |
item.ts | interface Item {
id: string, | image_url: string
}
export default Item; | title: string, |
Lab1.js | /*
* Lab1.js
* Brett Ratner
*
* The purpose of this assignment is to get familiar with the prompt, and alert
* functions. As well as taking in information that is given to us by the user
* and being able to manipulate that data and convert it into different data types.
*/
| //Calculation 1
//Asks the user for the first number and stores it in a.
var a = prompt("type a numeric value");
//Asks the user for the second number and stores it in b.
var b = prompt("type another numeric value");
var c;
//Takes the user input and does math to it
c = Math.sqrt(a*a + b*b);
//produces an alert box to tell the user the what the output is
alert(c);
//Calculation 2
//Asks the user to enter in a total number in seconds
var seconds = prompt("Enter number of total seconds");
//takes the string that was entered in and turns it into an integer
seconds = parseInt(seconds);
//calculates how many seconds in a minute
var minutes = Math.floor(seconds/60);
//calculate how many minutes in a hour
var hours = Math.floor(minutes/60);
// takes the left over seconds that are not a multiple of 60
minutes = minutes%60;
// takes the left over minutes that are not a multiple of 60
seconds = seconds%60;
//displays an alert box to tell the user what the output is
alert( hours +" hours " + minutes + " min " + minSec + " sec");
//Calculation 3
//Asks the user to enter a string that is separated by commas
var stringOfValues = prompt("enter a string that uses a comma for a delimiter");
// Creates an array
var array = new Array();
// Sets the content in the array to be the string split up by commas
array = stringOfValues.split(',');
var newString;
//puts the sting back together from the array, however it now separates each element with a ';' in stead of a comma
newString = array.join(';');
//displays an alert box to tell the user what the output is
alert("The new String is: " + newString);
//Calculation 4
//Asks the user to enter a date
var inputDate = prompt("Enter a date:");
//takes the users input and stores it as a date object
var newDate = new Date(inputDate);
//takes users inpout and stores it into milliseconds
var milli = Date.parse(inputDate);
//adds one days worth milliseconds to the user inputed date in miliseconds
milli += 60*60*1000*24;
//stores the new date in milliseconds
var newDate2 = new Date(milli);
//puts the new date back into a string for the user
var stringDate = newDate2.toDateString();
//displays an alert box to tell the user what the output is
alert("The new date, that is one day later is " + stringDate); | |
build.rs | fn main() {
// FIXME: It would be nice to run this only when tests are run.
println!("cargo:rerun-if-changed=tests/pod.c");
| let libspa = libs.get_by_name("libspa").unwrap();
cc::Build::new()
.file("tests/pod.c")
.shared_flag(true)
.flag("-Wno-missing-field-initializers")
.includes(&libspa.include_paths)
.compile("pod");
} | let libs = system_deps::Config::new()
.probe()
.expect("Cannot find libspa"); |
doc.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package cloudhsm provides the client and types for making API
// requests to Amazon CloudHSM.
//
// This is documentation for AWS CloudHSM Classic. For more information, see
// AWS CloudHSM Classic FAQs (http://aws.amazon.com/cloudhsm/faqs-classic/),
// the AWS CloudHSM Classic User Guide (http://docs.aws.amazon.com/cloudhsm/classic/userguide/),
// and the AWS CloudHSM Classic API Reference (http://docs.aws.amazon.com/cloudhsm/classic/APIReference/).
//
// For information about the current version of AWS CloudHSM, see AWS CloudHSM
// (http://aws.amazon.com/cloudhsm/), the AWS CloudHSM User Guide (http://docs.aws.amazon.com/cloudhsm/latest/userguide/),
// and the AWS CloudHSM API Reference (http://docs.aws.amazon.com/cloudhsm/latest/APIReference/).
//
// See https://docs.aws.amazon.com/goto/WebAPI/cloudhsm-2014-05-30 for more information on this service.
//
// See cloudhsm package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudhsm/
//
// Using the Client
//
// To contact Amazon CloudHSM with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently. | // https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Amazon CloudHSM client CloudHSM for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/cloudhsm/#New
package cloudhsm | //
// See the SDK's documentation for more information on how to use the SDK. |
provider.rs | use crate::color::ColorScheme;
use crate::error::{ErrorKind, Result};
use async_std::{fs, prelude::*};
use dirs;
use failure::ResultExt;
use futures::future;
use std::path::PathBuf;
use surf::{middleware::HttpClient, Request};
/// A GitHub repository that provides color schemes.
pub struct Provider {
user_name: String,
repo_name: String,
list_path: String,
extension: String,
}
impl Provider {
/// Returns a provider for `mbadolato/iTerm2-Color-Schemes`.
pub fn iterm() -> Self {
Provider::new(
"mbadolato",
"iTerm2-Color-Schemes",
"schemes",
".itermcolors",
)
}
/// Returns a provider for `Mayccoll/Gogh`.
pub fn gogh() -> Self {
Provider::new("Mayccoll", "Gogh", "themes", ".sh")
}
/// Returns a provider instance.
fn new(user_name: &str, repo_name: &str, list_path: &str, extension: &str) -> Self {
Provider {
user_name: user_name.to_string(),
repo_name: repo_name.to_string(),
list_path: list_path.to_string(),
extension: extension.to_string(),
}
}
/// Fetches the raw content of the color scheme for the given name.
pub async fn get(&self, name: &str) -> Result<ColorScheme> {
let req = surf::get(&self.individual_url(name));
let body = http_get(req).await?;
self.parse_color_scheme(&body)
}
/// Returns all color schemes in the provider.
///
/// This function caches color schemes in the file system.
pub async fn list(self) -> Result<Vec<(String, ColorScheme)>> {
match self.read_color_schemes().await {
Ok(color_schemes) => {
if color_schemes.len() > 0 {
return Ok(color_schemes);
}
}
_ => {}
}
// If there are no cached files, download them.
self.download_all().await?;
self.read_color_schemes().await
}
/// Download color scheme files into the cache directory.
pub async fn download_all(&self) -> Result<()> {
let repo_dir = self.repo_dir()?;
eprintln!(
"Downloading color schemes into {}",
repo_dir.to_str().unwrap()
);
// Create the cache directory if it doesn't exist.
fs::create_dir_all(&repo_dir)
.await
.context(ErrorKind::CreateDirAll)?;
let list_req = surf::get(&self.list_url());
let list_body = http_get(list_req).await?;
let items = json::parse(&list_body).context(ErrorKind::ParseJson)?;
// Download and save color scheme files.
let mut futures = Vec::new();
let client = surf::Client::new();
for item in items.members() {
let filename = item["name"].as_str().unwrap();
// Ignoring files starting with `_` for Gogh.
if filename.starts_with('_') || !filename.ends_with(&self.extension) {
continue;
}
let name = filename.replace(&self.extension, "");
let req = client.get(&self.individual_url(&name));
futures.push(self.download_color_scheme(req, name));
// Download files in batches.
//
// If this requests all files in parallel, the HTTP client (isahc) throws the
// following error:
//
// HTTP request error: ConnectFailed: failed to connect to the server
//
// isahc doesn't limit the number of connections per client by default, but
// it exposes an API to limit it. However, surf doesn't expose the API.
if futures.len() > 10 {
future::try_join_all(futures).await?;
futures = Vec::new();
}
}
Ok(())
}
/// Read color schemes from the cache directory.
async fn read_color_schemes(&self) -> Result<Vec<(String, ColorScheme)>> {
let mut entries = fs::read_dir(self.repo_dir()?)
.await
.context(ErrorKind::ReadDir)?;
// Collect futures and run them in parallel.
let mut futures = Vec::new();
while let Some(entry) = entries.next().await {
let dir_entry = entry.context(ErrorKind::ReadDirEntry)?;
let filename = dir_entry.file_name().into_string().unwrap();
let name = filename.replace(&self.extension, "").to_string();
futures.push(self.read_color_scheme(name));
}
let color_schemes = future::try_join_all(futures).await?;
Ok(color_schemes)
}
/// Reads a color scheme from the repository cache.
async fn read_color_scheme(&self, name: String) -> Result<(String, ColorScheme)> {
let file_path = self.individual_path(&name)?;
let body = fs::read_to_string(file_path)
.await
.context(ErrorKind::ReadFile)?;
let color_scheme = self.parse_color_scheme(&body)?;
Ok((name, color_scheme))
}
// TODO: Pass `Client` instead of `Request`. However, the ownership rule blocks it...
/// Downloads a color scheme file and save it in the cache directory.
async fn download_color_scheme<C: HttpClient>(
&self,
req: Request<C>,
name: String,
) -> Result<()> {
let body = http_get(req).await?;
fs::write(self.individual_path(&name)?, body)
.await
.context(ErrorKind::WriteFile)?;
Ok(())
}
/// The repository cache directory.
fn repo_dir(&self) -> Result<PathBuf> {
let mut repo_dir = dirs::cache_dir().ok_or(ErrorKind::NoCacheDir)?;
repo_dir.push("colortty");
repo_dir.push("repositories");
repo_dir.push(&self.user_name);
repo_dir.push(&self.repo_name);
Ok(repo_dir)
}
/// Returns the path for the given color scheme name.
fn individual_path(&self, name: &str) -> Result<PathBuf> {
let mut file_path = self.repo_dir()?;
file_path.push(name);
file_path.set_extension(&self.extension[1..]);
Ok(file_path)
}
/// Returns the URL for a color scheme on GitHub.
fn individual_url(&self, name: &str) -> String {
format!(
"https://raw.githubusercontent.com/{}/{}/master/{}/{}{}",
self.user_name, self.repo_name, self.list_path, name, self.extension
)
}
/// Returns the URL for the color scheme list on GitHub API.
fn list_url(&self) -> String {
format!(
"https://api.github.com/repos/{}/{}/contents/{}",
self.user_name, self.repo_name, self.list_path
)
}
/// Parses a color scheme data.
fn parse_color_scheme(&self, body: &str) -> Result<ColorScheme> { | } else {
ColorScheme::from_gogh(&body)
}
}
}
/// Returns the body of the given request.
///
/// Fails when the URL responds with non-200 status code. Sends `colortty` as `User-Agent` header
async fn http_get<C: HttpClient>(req: Request<C>) -> Result<String> {
let mut res = req
.set_header("User-Agent", "colortty")
.await
.map_err(|e| {
println!("HTTP request error: {}", e);
ErrorKind::HttpGet
})?;
if !res.status().is_success() {
println!("HTTP status code: {}", res.status());
return Err(ErrorKind::HttpGet.into());
}
// TODO: Propagate information from the original error.
let body = res.body_string().await.map_err(|_| ErrorKind::HttpGet)?;
Ok(body)
} | // TODO: Think about better abstraction.
if self.extension == ".itermcolors" {
ColorScheme::from_iterm(&body) |
Modal.test.js | import React from 'react';
import { shallow } from 'enzyme'; |
describe(`Modal`, () => {
const onClose = jest.fn();
it('should not render header content', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose}>
Message
</Modal>
);
const message = () => wrapper.find('Modal__StyleHeading');
expect(message().exists()).toBe(false);
});
it('should not render closable button', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose} closable={false}>
Message
</Modal>
);
const message = () => wrapper.find('Modal__StyledHeader button');
expect(message().exists()).toBe(false);
});
it('should render children content', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose}>
Message
</Modal>
);
const message = () => wrapper.find('Modal__StyledContent');
expect(message().text()).toBe('Message');
});
it('should render footer content', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose} footer={<p>Footer</p>}>
Message
</Modal>
);
const message = () => wrapper.find('Modal__StyledFooter p');
expect(message().text()).toBe('Footer');
});
it('should close the modal when clicking close button', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose} footer={<p>Footer</p>}>
Message
</Modal>
);
wrapper.find('Modal__StyledHeader button').simulate('click');
expect(onClose).toHaveBeenCalled();
});
it('should close the modal when clicking outside modal content', () => {
const wrapper = shallow(
<Modal isOpen={true} onClose={onClose} footer={<p>Footer</p>}>
Message
</Modal>
);
wrapper.find('Modal__StyledOverlay').simulate('click');
expect(onClose).toHaveBeenCalled();
});
}); | import Modal from '.'; |
rewrite_monitoring.py | #!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
rewrite_monitoring
================================================
This program rewrites a monitoring time series files into the correct
time order
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
|
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
| """
"""
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname)) |
sentiment_analysis.py | from textblob import TextBlob
pd.options.mode.chained_assignment = None # ignores the SettingWithCopy Warning
df = pd.read_csv('INPUT.csv', encoding = 'utf8')
df['polarity'] = 0.0
df['subjectivity'] = 0.0
for i in range(0, len(df.index)):
print(i)
blob = TextBlob(str(df['text'][i]))
df['subjectivity'][i] = blob.sentiment.subjectivity
df['polarity'][i] = blob.sentiment.polarity
print(df.head())
df.to_csv('OUTPUT.csv', encoding = 'utf8') | import pandas as pd |
|
es_IC.go | package es_IC
import (
"math"
"strconv"
"time"
"github.com/haiyiyun/validator/locales"
"github.com/haiyiyun/validator/locales/currency"
)
type es_IC struct {
locale string
pluralsCardinal []locales.PluralRule
pluralsOrdinal []locales.PluralRule
pluralsRange []locales.PluralRule
decimal string
group string
minus string
percent string
percentSuffix string
perMille string
timeSeparator string
inifinity string
currencies []string // idx = enum of currency code
currencyPositiveSuffix string
currencyNegativeSuffix string
monthsAbbreviated []string
monthsNarrow []string
monthsWide []string
daysAbbreviated []string
daysNarrow []string
daysShort []string
daysWide []string
periodsAbbreviated []string
periodsNarrow []string
periodsShort []string
periodsWide []string
erasAbbreviated []string
erasNarrow []string
erasWide []string
timezones map[string]string
}
// New returns a new instance of translator for the 'es_IC' locale
func | () locales.Translator {
return &es_IC{
locale: "es_IC",
pluralsCardinal: []locales.PluralRule{2, 6},
pluralsOrdinal: []locales.PluralRule{6},
pluralsRange: []locales.PluralRule{6},
decimal: ",",
group: ".",
minus: "-",
percent: "%",
perMille: "‰",
timeSeparator: ":",
inifinity: "∞",
currencies: []string{"ADP", "AED", "AFA", "AFN", "ALK", "ALL", "AMD", "ANG", "AOA", "AOK", "AON", "AOR", "ARA", "ARL", "ARM", "ARP", "ARS", "ATS", "AUD", "AWG", "AZM", "AZN", "BAD", "BAM", "BAN", "BBD", "BDT", "BEC", "BEF", "BEL", "BGL", "BGM", "BGN", "BGO", "BHD", "BIF", "BMD", "BND", "BOB", "BOL", "BOP", "BOV", "BRB", "BRC", "BRE", "BRL", "BRN", "BRR", "BRZ", "BSD", "BTN", "BUK", "BWP", "BYB", "BYN", "BYR", "BZD", "CAD", "CDF", "CHE", "CHF", "CHW", "CLE", "CLF", "CLP", "CNH", "CNX", "CNY", "COP", "COU", "CRC", "CSD", "CSK", "CUC", "CUP", "CVE", "CYP", "CZK", "DDM", "DEM", "DJF", "DKK", "DOP", "DZD", "ECS", "ECV", "EEK", "EGP", "ERN", "ESA", "ESB", "ESP", "ETB", "EUR", "FIM", "FJD", "FKP", "FRF", "GBP", "GEK", "GEL", "GHC", "GHS", "GIP", "GMD", "GNF", "GNS", "GQE", "GRD", "GTQ", "GWE", "GWP", "GYD", "HKD", "HNL", "HRD", "HRK", "HTG", "HUF", "IDR", "IEP", "ILP", "ILR", "ILS", "INR", "IQD", "IRR", "ISJ", "ISK", "ITL", "JMD", "JOD", "JPY", "KES", "KGS", "KHR", "KMF", "KPW", "KRH", "KRO", "KRW", "KWD", "KYD", "KZT", "LAK", "LBP", "LKR", "LRD", "LSL", "LTL", "LTT", "LUC", "LUF", "LUL", "LVL", "LVR", "LYD", "MAD", "MAF", "MCF", "MDC", "MDL", "MGA", "MGF", "MKD", "MKN", "MLF", "MMK", "MNT", "MOP", "MRO", "MRU", "MTL", "MTP", "MUR", "MVP", "MVR", "MWK", "MXN", "MXP", "MXV", "MYR", "MZE", "MZM", "MZN", "NAD", "NGN", "NIC", "NIO", "NLG", "NOK", "NPR", "NZD", "OMR", "PAB", "PEI", "PEN", "PES", "PGK", "PHP", "PKR", "PLN", "PLZ", "PTE", "PYG", "QAR", "RHD", "ROL", "RON", "RSD", "RUB", "RUR", "RWF", "SAR", "SBD", "SCR", "SDD", "SDG", "SDP", "SEK", "SGD", "SHP", "SIT", "SKK", "SLL", "SOS", "SRD", "SRG", "SSP", "STD", "STN", "SUR", "SVC", "SYP", "SZL", "THB", "TJR", "TJS", "TMM", "TMT", "TND", "TOP", "TPE", "TRL", "TRY", "TTD", "TWD", "TZS", "UAH", "UAK", "UGS", "UGX", "USD", "USN", "USS", "UYI", "UYP", "UYU", "UYW", "UZS", "VEB", "VEF", "VES", "VND", "VNN", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XEU", "XFO", "XFU", "XOF", "XPD", "XPF", "XPT", "XRE", "XSU", "XTS", "XUA", "XXX", "YDD", "YER", "YUD", "YUM", "YUN", "YUR", "ZAL", "ZAR", "ZMK", "ZMW", "ZRN", "ZRZ", "ZWD", "ZWL", "ZWR"},
percentSuffix: " ",
currencyPositiveSuffix: " ",
currencyNegativeSuffix: " ",
monthsAbbreviated: []string{"", "ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "sept.", "oct.", "nov.", "dic."},
monthsNarrow: []string{"", "E", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"},
monthsWide: []string{"", "enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"},
daysAbbreviated: []string{"dom.", "lun.", "mar.", "mié.", "jue.", "vie.", "sáb."},
daysNarrow: []string{"D", "L", "M", "X", "J", "V", "S"},
daysShort: []string{"DO", "LU", "MA", "MI", "JU", "VI", "SA"},
daysWide: []string{"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"},
periodsAbbreviated: []string{"a.\u00a0m.", "p.\u00a0m."},
periodsNarrow: []string{"a.\u00a0m.", "p.\u00a0m."},
periodsWide: []string{"a.\u00a0m.", "p.\u00a0m."},
erasAbbreviated: []string{"a. C.", "d. C."},
erasNarrow: []string{"", ""},
erasWide: []string{"antes de Cristo", "después de Cristo"},
timezones: map[string]string{"ACDT": "hora de verano de Australia central", "ACST": "hora estándar de Australia central", "ACWDT": "hora de verano de Australia centroccidental", "ACWST": "hora estándar de Australia centroccidental", "ADT": "hora de verano del Atlántico", "AEDT": "hora de verano de Australia oriental", "AEST": "hora estándar de Australia oriental", "AKDT": "hora de verano de Alaska", "AKST": "hora estándar de Alaska", "ARST": "hora de verano de Argentina", "ART": "hora estándar de Argentina", "AST": "hora estándar del Atlántico", "AWDT": "hora de verano de Australia occidental", "AWST": "hora estándar de Australia occidental", "BOT": "hora de Bolivia", "BT": "hora de Bután", "CAT": "hora de África central", "CDT": "hora de verano central", "CHADT": "hora de verano de Chatham", "CHAST": "hora estándar de Chatham", "CLST": "hora de verano de Chile", "CLT": "hora estándar de Chile", "COST": "hora de verano de Colombia", "COT": "hora estándar de Colombia", "CST": "hora estándar central", "ChST": "hora estándar de Chamorro", "EAT": "hora de África oriental", "ECT": "hora de Ecuador", "EDT": "hora de verano oriental", "EST": "hora estándar oriental", "GFT": "hora de la Guayana Francesa", "GMT": "hora del meridiano de Greenwich", "GST": "hora estándar del Golfo", "GYT": "hora de Guyana", "HADT": "hora de verano de Hawái-Aleutianas", "HAST": "hora estándar de Hawái-Aleutianas", "HAT": "hora de verano de Terranova", "HECU": "hora de verano de Cuba", "HEEG": "hora de verano de Groenlandia oriental", "HENOMX": "hora de verano del noroeste de México", "HEOG": "hora de verano de Groenlandia occidental", "HEPM": "hora de verano de San Pedro y Miquelón", "HEPMX": "hora de verano del Pacífico de México", "HKST": "hora de verano de Hong Kong", "HKT": "hora estándar de Hong Kong", "HNCU": "hora estándar de Cuba", "HNEG": "hora estándar de Groenlandia oriental", "HNNOMX": "hora estándar del noroeste de México", "HNOG": "hora estándar de Groenlandia occidental", "HNPM": "hora estándar de San Pedro y Miquelón", "HNPMX": "hora estándar del Pacífico de México", "HNT": "hora estándar de Terranova", "IST": "hora estándar de la India", "JDT": "hora de verano de Japón", "JST": "hora estándar de Japón", "LHDT": "hora de verano de Lord Howe", "LHST": "hora estándar de Lord Howe", "MDT": "hora de verano de las Montañas Rocosas", "MESZ": "hora de verano de Europa central", "MEZ": "hora estándar de Europa central", "MST": "hora estándar de las Montañas Rocosas", "MYT": "hora de Malasia", "NZDT": "hora de verano de Nueva Zelanda", "NZST": "hora estándar de Nueva Zelanda", "OESZ": "hora de verano de Europa oriental", "OEZ": "hora estándar de Europa oriental", "PDT": "hora de verano del Pacífico", "PST": "hora estándar del Pacífico", "SAST": "hora de Sudáfrica", "SGT": "hora de Singapur", "SRT": "hora de Surinam", "TMST": "hora de verano de Turkmenistán", "TMT": "hora estándar de Turkmenistán", "UYST": "hora de verano de Uruguay", "UYT": "hora estándar de Uruguay", "VET": "hora de Venezuela", "WARST": "hora de verano de Argentina occidental", "WART": "hora estándar de Argentina occidental", "WAST": "hora de verano de África occidental", "WAT": "hora estándar de África occidental", "WESZ": "hora de verano de Europa occidental", "WEZ": "hora estándar de Europa occidental", "WIB": "hora de Indonesia occidental", "WIT": "hora de Indonesia oriental", "WITA": "hora de Indonesia central", "∅∅∅": "hora de verano de Brasilia"},
}
}
// Locale returns the current translators string locale
func (es *es_IC) Locale() string {
return es.locale
}
// PluralsCardinal returns the list of cardinal plural rules associated with 'es_IC'
func (es *es_IC) PluralsCardinal() []locales.PluralRule {
return es.pluralsCardinal
}
// PluralsOrdinal returns the list of ordinal plural rules associated with 'es_IC'
func (es *es_IC) PluralsOrdinal() []locales.PluralRule {
return es.pluralsOrdinal
}
// PluralsRange returns the list of range plural rules associated with 'es_IC'
func (es *es_IC) PluralsRange() []locales.PluralRule {
return es.pluralsRange
}
// CardinalPluralRule returns the cardinal PluralRule given 'num' and digits/precision of 'v' for 'es_IC'
func (es *es_IC) CardinalPluralRule(num float64, v uint64) locales.PluralRule {
n := math.Abs(num)
if n == 1 {
return locales.PluralRuleOne
}
return locales.PluralRuleOther
}
// OrdinalPluralRule returns the ordinal PluralRule given 'num' and digits/precision of 'v' for 'es_IC'
func (es *es_IC) OrdinalPluralRule(num float64, v uint64) locales.PluralRule {
return locales.PluralRuleOther
}
// RangePluralRule returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for 'es_IC'
func (es *es_IC) RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) locales.PluralRule {
return locales.PluralRuleOther
}
// MonthAbbreviated returns the locales abbreviated month given the 'month' provided
func (es *es_IC) MonthAbbreviated(month time.Month) string {
return es.monthsAbbreviated[month]
}
// MonthsAbbreviated returns the locales abbreviated months
func (es *es_IC) MonthsAbbreviated() []string {
return es.monthsAbbreviated[1:]
}
// MonthNarrow returns the locales narrow month given the 'month' provided
func (es *es_IC) MonthNarrow(month time.Month) string {
return es.monthsNarrow[month]
}
// MonthsNarrow returns the locales narrow months
func (es *es_IC) MonthsNarrow() []string {
return es.monthsNarrow[1:]
}
// MonthWide returns the locales wide month given the 'month' provided
func (es *es_IC) MonthWide(month time.Month) string {
return es.monthsWide[month]
}
// MonthsWide returns the locales wide months
func (es *es_IC) MonthsWide() []string {
return es.monthsWide[1:]
}
// WeekdayAbbreviated returns the locales abbreviated weekday given the 'weekday' provided
func (es *es_IC) WeekdayAbbreviated(weekday time.Weekday) string {
return es.daysAbbreviated[weekday]
}
// WeekdaysAbbreviated returns the locales abbreviated weekdays
func (es *es_IC) WeekdaysAbbreviated() []string {
return es.daysAbbreviated
}
// WeekdayNarrow returns the locales narrow weekday given the 'weekday' provided
func (es *es_IC) WeekdayNarrow(weekday time.Weekday) string {
return es.daysNarrow[weekday]
}
// WeekdaysNarrow returns the locales narrow weekdays
func (es *es_IC) WeekdaysNarrow() []string {
return es.daysNarrow
}
// WeekdayShort returns the locales short weekday given the 'weekday' provided
func (es *es_IC) WeekdayShort(weekday time.Weekday) string {
return es.daysShort[weekday]
}
// WeekdaysShort returns the locales short weekdays
func (es *es_IC) WeekdaysShort() []string {
return es.daysShort
}
// WeekdayWide returns the locales wide weekday given the 'weekday' provided
func (es *es_IC) WeekdayWide(weekday time.Weekday) string {
return es.daysWide[weekday]
}
// WeekdaysWide returns the locales wide weekdays
func (es *es_IC) WeekdaysWide() []string {
return es.daysWide
}
// Decimal returns the decimal point of number
func (es *es_IC) Decimal() string {
return es.decimal
}
// Group returns the group of number
func (es *es_IC) Group() string {
return es.group
}
// Group returns the minus sign of number
func (es *es_IC) Minus() string {
return es.minus
}
// FmtNumber returns 'num' with digits/precision of 'v' for 'es_IC' and handles both Whole and Real numbers based on 'v'
func (es *es_IC) FmtNumber(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 2 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, es.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, es.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, es.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
return string(b)
}
// FmtPercent returns 'num' with digits/precision of 'v' for 'es_IC' and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
func (es *es_IC) FmtPercent(num float64, v uint64) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
l := len(s) + 5
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, es.decimal[0])
continue
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, es.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
b = append(b, es.percentSuffix...)
b = append(b, es.percent...)
return string(b)
}
// FmtCurrency returns the currency representation of 'num' with digits/precision of 'v' for 'es_IC'
func (es *es_IC) FmtCurrency(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := es.currencies[currency]
l := len(s) + len(symbol) + 4 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, es.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, es.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, es.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, es.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
b = append(b, es.currencyPositiveSuffix...)
b = append(b, symbol...)
return string(b)
}
// FmtAccounting returns the currency representation of 'num' with digits/precision of 'v' for 'es_IC'
// in accounting notation.
func (es *es_IC) FmtAccounting(num float64, v uint64, currency currency.Type) string {
s := strconv.FormatFloat(math.Abs(num), 'f', int(v), 64)
symbol := es.currencies[currency]
l := len(s) + len(symbol) + 4 + 1*len(s[:len(s)-int(v)-1])/3
count := 0
inWhole := v == 0
b := make([]byte, 0, l)
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '.' {
b = append(b, es.decimal[0])
inWhole = true
continue
}
if inWhole {
if count == 3 {
b = append(b, es.group[0])
count = 1
} else {
count++
}
}
b = append(b, s[i])
}
if num < 0 {
b = append(b, es.minus[0])
}
// reverse
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
b[i], b[j] = b[j], b[i]
}
if int(v) < 2 {
if v == 0 {
b = append(b, es.decimal...)
}
for i := 0; i < 2-int(v); i++ {
b = append(b, '0')
}
}
if num < 0 {
b = append(b, es.currencyNegativeSuffix...)
b = append(b, symbol...)
} else {
b = append(b, es.currencyPositiveSuffix...)
b = append(b, symbol...)
}
return string(b)
}
// FmtDateShort returns the short date representation of 't' for 'es_IC'
func (es *es_IC) FmtDateShort(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x2f}...)
b = strconv.AppendInt(b, int64(t.Month()), 10)
b = append(b, []byte{0x2f}...)
if t.Year() > 9 {
b = append(b, strconv.Itoa(t.Year())[2:]...)
} else {
b = append(b, strconv.Itoa(t.Year())[1:]...)
}
return string(b)
}
// FmtDateMedium returns the medium date representation of 't' for 'es_IC'
func (es *es_IC) FmtDateMedium(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20}...)
b = append(b, es.monthsAbbreviated[t.Month()]...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateLong returns the long date representation of 't' for 'es_IC'
func (es *es_IC) FmtDateLong(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20, 0x64, 0x65}...)
b = append(b, []byte{0x20}...)
b = append(b, es.monthsWide[t.Month()]...)
b = append(b, []byte{0x20, 0x64, 0x65}...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtDateFull returns the full date representation of 't' for 'es_IC'
func (es *es_IC) FmtDateFull(t time.Time) string {
b := make([]byte, 0, 32)
b = append(b, es.daysWide[t.Weekday()]...)
b = append(b, []byte{0x2c, 0x20}...)
b = strconv.AppendInt(b, int64(t.Day()), 10)
b = append(b, []byte{0x20, 0x64, 0x65}...)
b = append(b, []byte{0x20}...)
b = append(b, es.monthsWide[t.Month()]...)
b = append(b, []byte{0x20, 0x64, 0x65}...)
b = append(b, []byte{0x20}...)
if t.Year() > 0 {
b = strconv.AppendInt(b, int64(t.Year()), 10)
} else {
b = strconv.AppendInt(b, int64(-t.Year()), 10)
}
return string(b)
}
// FmtTimeShort returns the short time representation of 't' for 'es_IC'
func (es *es_IC) FmtTimeShort(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, es.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
return string(b)
}
// FmtTimeMedium returns the medium time representation of 't' for 'es_IC'
func (es *es_IC) FmtTimeMedium(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, es.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, es.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
return string(b)
}
// FmtTimeLong returns the long time representation of 't' for 'es_IC'
func (es *es_IC) FmtTimeLong(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, es.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, es.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20}...)
tz, _ := t.Zone()
b = append(b, tz...)
return string(b)
}
// FmtTimeFull returns the full time representation of 't' for 'es_IC'
func (es *es_IC) FmtTimeFull(t time.Time) string {
b := make([]byte, 0, 32)
b = strconv.AppendInt(b, int64(t.Hour()), 10)
b = append(b, es.timeSeparator...)
if t.Minute() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Minute()), 10)
b = append(b, es.timeSeparator...)
if t.Second() < 10 {
b = append(b, '0')
}
b = strconv.AppendInt(b, int64(t.Second()), 10)
b = append(b, []byte{0x20, 0x28}...)
tz, _ := t.Zone()
if btz, ok := es.timezones[tz]; ok {
b = append(b, btz...)
} else {
b = append(b, tz...)
}
b = append(b, []byte{0x29}...)
return string(b)
}
| New |
mode.py | import enum
class Mode(enum.IntEnum):
Normal = 0
ArcSource = 1
ArcTarget = 2
Simulation = 100
ModeStrings = {Mode.Normal: 'Editor: Normal',
Mode.ArcSource: 'Editor: Arc source',
Mode.ArcTarget: 'Editor: Arc target',
Mode.Simulation: 'Simulation',
}
class ModeSwitch:
| def __init__(self, main_window):
from .simulationcontroller import SimulationController # avoid circular dependency
self._mode = Mode.Normal
self.main_window = main_window
self.simulation_controller: SimulationController = main_window.simulation_controller
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, new_mode):
old_mode = self.mode
editor = self.main_window.editor
# cleanup modes
arc_modes = (Mode.ArcSource, Mode.ArcTarget)
if new_mode not in arc_modes and old_mode in arc_modes:
editor.cancel_arc_modes()
# actual change
self._mode = new_mode
self.main_window.mode_label.setText(ModeStrings[self._mode])
if self.mode == Mode.Normal:
self.main_window.item_properties.edits_enabled(True)
self.main_window.sim_buttons_enabled(False)
if self.mode == Mode.Simulation:
self.main_window.item_properties.edits_enabled(False)
self.main_window.sim_buttons_enabled(True)
self.simulation_controller.init_petrinet()
self.simulation_controller.reset()
self.simulation_controller.animate_timer.start(0)
if self.mode != Mode.Simulation and old_mode == Mode.Simulation:
self.simulation_controller.reset()
self.simulation_controller.animate_timer.stop()
self.main_window.editor.update_all_texts() |
|
segmentation.py | """
Created by Michele Bianco, 9 July 2021
"""
import numpy as np, pkg_resources
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras import backend as K
from tensorflow.python.ops import nn_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None):
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,labels, logits)
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %(logits.get_shape(), labels.get_shape()))
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels)
return tf.reduce_mean(balanced_cross_entropy)
def balanced_cross_entropy(y_true, y_pred):
"""
To decrease the number of false negatives, set beta>1. To decrease the number of false positives, set beta<1.
"""
beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon())
y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
y_pred = K.log(y_pred / (1 - y_pred))
return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta)
def iou(y_true, y_pred):
"""
Return the Intersection over Union (IoU) for a given label.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
label: the label to return the IoU for
Returns:
the IoU for the given label
"""
intersection = K.sum(K.abs(y_true * y_pred))
#intersection = K.sum(y_true * y_pred)
union = K.sum(y_true) + K.sum(y_pred) - intersection
# avoid divide by zero - if the union is zero, return 1, otherwise, return the intersection over union
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def dice_coef(y_true, y_pred, smooth=1):
"""
Dice = (2*|X & Y|)/ (|X|+ |Y|)
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2))
ref: https://arxiv.org/pdf/1606.04797v1.pdf
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
################################################################
class segunet21cm:
def __init__(self, tta=1, verbose=False):
|
def UniqueRows(self, arr):
""" Remove duplicate row array in 2D data
- arr (narray): array with duplicate row
Example:
>> d = np.array([[0,1,2],[0,1,2],[0,0,0],[0,0,2],[0,1,2]])
>> UniqueRows(d)
array([[0, 0, 0],
[0, 0, 2],
[0, 1, 2]])
"""
arr = np.array(arr)
if(arr.ndim == 2):
arr = np.ascontiguousarray(arr)
unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1]))
new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1]))
elif(arr.ndim == 1):
new_arr = np.array(list(dict.fromkeys(arr)))
return new_arr
def IndependentOperations(self, verbose=False):
''' How many unique manipulations (horzontal and vertical flip, rotation, etc...)
can we operate on a cube?
Each indipendent operation is considered as an additional rappresentation
of the same coeval data, so that it can be considered for errorbar with SegU-Net '''
data = np.array(range(3**3)).reshape((3,3,3))
func = [lambda a: a,
np.fliplr,
np.flipud,
lambda a: np.flipud(np.fliplr(a)),
lambda a: np.fliplr(np.flipud(a))]
axis = [0,1,2]
angl_rot = [0,1,2,3]
tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size))
tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))}
i = 0
for f in func:
cube = f(data)
for rotax in axis:
ax_tup = [0,1,2]
ax_tup.remove(rotax)
for rot in angl_rot:
tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten()
# function, axis of rotation, angle of rotation, slice index
tot_operations['opt%d' %i] = [f, rotax, rot]
i += 1
uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int)
uniq_operations = {}
for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat):
for itmdf, tot_mdf in enumerate(tot_manipl_data_flat):
if(all(uniq_mdf == tot_mdf)):
uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf]
break
assert uniq_manipl_data_flat.shape[0] == len(uniq_operations)
if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations)))
return uniq_operations
def prediction(self, x):
img_shape = x.shape
if(self.TTA == 2):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 1):
X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape)))
elif(self.TTA == 0):
X_tta = np.zeros((np.append(len(self.MANIP), img_shape)))
if(self.VERBOSE):
loop = tqdm(range(len(self.MANIP)))
else:
loop = range(len(self.MANIP))
for iopt in loop:
opt, rotax, rot = self.MANIP['opt%d' %iopt]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
cube = np.rot90(opt(x), k=rot, axes=ax_tup)
X = cube[np.newaxis, ..., np.newaxis]
for j in range(img_shape[0]):
if(self.TTA == 0):
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
else:
X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze()
X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze()
for itta in range(X_tta.shape[0]):
opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))]
ax_tup = [0,1,2]
ax_tup.remove(rotax)
X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup))
X_seg = np.round(np.mean(X_tta, axis=0))
X_err = np.std(X_tta, axis=0)
return X_seg, X_err
| """ SegU-Net: segmentation of 21cm images with U-shape network (Bianco et al. 2021, https://arxiv.org/abs/2102.06713)
- tta (int): default 0 (super-fast, no pixel-error map) implement the error map
with time-test aumentated techique in the prediction process
- verbose (bool): default False, activate verbosity
Description:
tta = 0 : fast (~7 sec), it tends to be a few percent less accurate (<2%) then the other two cases, no pixel-error map (no TTA manipulation)
tta = 1 : medium (~17 sec), accurate and preferable than tta=0, with pixel-error map (3 samples)
tta = 2 : slow (~10 min), accurate, with pixel-error map (~100 samples)
Returns:
- X_seg (ndarray) : recovered binary field (1 = neutral and 0 = ionized regions)
- X_err (ndarray) : pixel-error map of the recovered binary field
Example:
$ from tools21cm import segmentation
$ seg = segmentation.segunet21cm(tta=1, verbose=True) # load model (need to be done once)
$ Xseg, Xseg_err = seg.prediction(x=dT3)
Print of the Network's Configuration file:
[TRAINING]
BATCH_SIZE = 64
AUGMENT = NOISESMT
IMG_SHAPE = 128, 128
CHAN_SIZE = 256
DROPOUT = 0.05
KERNEL_SIZE = 3
EPOCHS = 100
LOSS = balanced_cross_entropy
METRICS = iou, dice_coef, binary_accuracy, binary_crossentropy
LR = 1e-3
RECOMP = False
GPUS = 2
PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/inputs/data2D_128_030920/
[RESUME]
RESUME_PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/outputs/new/02-10T23-52-36_128slice/
BEST_EPOCH = 56
RESUME_EPOCH = 66
"""
self.TTA = tta
self.VERBOSE = verbose
if(self.TTA == 2):
# slow
self.MANIP = self.IndependentOperations(verbose=self.VERBOSE)
elif(self.TTA == 1):
# fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
elif(self.TTA == 0):
# super-fast
self.MANIP = {'opt0': [lambda a: a, 0, 0]}
self.NR_MANIP = len(self.MANIP)
# load model
MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5')
if (os.path.exists(MODEL_NAME)):
pass
else:
if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME)
MODEL_EPOCH = 56
METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef}
self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS)
if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME) |
transport.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Abstract Transport
"""
import typing
import abc
from apt.transport.directorylisting import DirectoryListing
class Transport:
"""
Abstract class for retrieving information from repos
The functions 'exists' and 'open_read' are required to be implemented.
"""
@abc.abstractmethod
def exists(self, uri: str) -> bool:
"""
Returns whether a given uri exists.
:param str uri:
:return bool:
:raises URIMismatchError:
"""
@abc.abstractmethod
def | (self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for reading
:param string uri:
:return IO:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
@abc.abstractmethod
def open_write(self, uri: str) -> typing.IO:
"""
Opens a file as an IO-like for writing
This function is required to handle the operation of creating directories
if the underlying data store has such a concept.
:param string uri:
:return:
:raises NotImplementedError:
:raises URIMismatchError:
"""
@abc.abstractmethod
def list_directory(self, uri: str) -> DirectoryListing:
"""
Returns a list of files and directories in a directory
:param string uri:
:return List[str]:
:raises NotImplementedError:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
| open_read |
props.ts | export type DragNavProps = {
className: string;
onDragBegin: () => void;
onDragEnd: () => void;
}
export type DropNavProps = {
navigate: () => void;
className: string;
navigateTo: string;
}
export type SkillProps = {
skill: string;
level: number;
}
| export type SkillsProps = {
test?: string;
}
export type AboutContainerProps = {
title: string;
text: string;
} | |
isomorphic_strings.go | package string
import (
"reflect"
"sort"
)
// 同构字符串
func isIsomorphic(s string, t string) bool {
sm, tm | := make(map[byte]int), make(map[byte]int)
for i := 0; i < len(s); i++ {
sm[s[i]]++
tm[t[i]]++
}
var data1 []int
for _, v := range sm {
data1 = append(data1, v)
}
var data2 []int
for _, v := range tm {
data2 = append(data2, v)
}
sort.Ints(data1)
sort.Ints(data2)
return reflect.DeepEqual(data1, data2)
}
|
|
wasd.py | #!python
# coding=utf-8
import cfg, cmn, vqw
import cookielib, urllib, urllib2, sys, argparse, re, string
import simplem3u8 as sm3u8
import simplejson as json
from urllib2 import Request, urlopen, URLError
from random import random
cmnHandler = cmn.cmnHandler()
_url_re = re.compile(r"""
http(s)?://(\w+.)?wasd\.tv/
(?:
channel/(?P<channel_id>\d+)
)
(?:
/(?:videos/)?
(?P<video_id>\d+)
)?
""", re.VERBOSE)
class wasdAPIHandler:
def __init__(self):
self.baseurl = 'https://wasd.tv/api'
return None
def getURL(self, url):
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
retData = response.read()
response.close()
return retData
except URLError, e:
print e
return None
def getApiURL(self, url):
# Request the anon token and get the cookies
authUrl = "%s/auth/anon-token" % (self.baseurl)
cookies = cookielib.CookieJar()
handlers = [
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
authRequest = urllib2.Request(authUrl)
opener.open(authRequest)
# Request the endpoint
request = urllib2.Request(url)
request.add_header('User-Agent', cmnHandler.spoofAs('CHROME'))
try:
response = opener.open(request)
retData = response.read()
response.close()
return retData
except URLError, e:
print e
return None
def call(self, endpoint, query = None):
url = "%s/%s" % (self.baseurl, endpoint)
if (query):
queryArgs = urllib.urlencode(query)
url = "%s/%s?%s" % (self.baseurl, endpoint, queryArgs)
return self.getApiURL(url)
def getVideoInfoByID(self, videoId):
endpoint = "media-containers/%s" % (videoId)
query = {
"media_container_status": "RUNNING",
"limit": 1,
"offset": 0,
"channel_id": videoId,
"media_container_type": "SINGLE,COOP"
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getStreamInfoByID(self, streamId):
endpoint = "media-containers"
query = {
"media_container_status": "RUNNING",
"limit": 1,
"offset": 0,
"channel_id": streamId,
"media_container_type": "SINGLE,COOP"
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getTopGames(self, page = 0, limit = 50):
endpoint = "games"
query = {
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def searchByGameTitle(self, title, page = 0, limit = 50):
endpoint = "search/games"
query = {
"search_phrase": title,
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getTopStreamsByGameID(self, id, page = 0, limit = 50):
endpoint = "media-containers"
query = {
"media_container_status": "RUNNING",
"game_id": id,
"media_container_type": "SINGLE,COOP",
"order_direction": "DESC",
"order_type": "VIEWERS",
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
class | :
def parseURL(self, url):
return _url_re.match(url).groupdict()
def getVideoType(self, url):
types = self.parseURL(url)
return {'channel': types['channel_id'], 'video': types['video_id']}
def getPrefferedVideoURL(self, data):
sm3u8Parser = sm3u8.parseHandler()
playlists = sm3u8Parser.parse(data)
for quality in vqw.wasdVQW:
for idx in playlists:
if (playlists[idx]):
streamQuality = playlists[idx]
if (streamQuality['resolution'].find(quality) >= 0):
return playlists[idx]['uri']
sys.exit()
return None
def clearUri(self, uri):
uriSplit = uri.split('#')
return uriSplit[0]
def main(argv):
wasdApi = wasdAPIHandler()
helpers = helpersHandler()
if len(argv) == 0:
print "No arguments given. Use wasd.py -h for more info.\nThe script must be used from the shell."
sys.exit()
# Parse the arguments
argParser = argparse.ArgumentParser(description=cmnHandler.getScriptDescription('wasd.tv'), epilog=cmnHandler.getScriptEpilog(),
formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-u', '--url', action='store', dest='url', help='The video/channel url')
argParser.add_argument('-q', '--quality', action='store', dest='quality', help='Set the preffered video quality. This is optional. If not set or if it is not available the default quality weight will be used.')
argParser.add_argument('-tg', '--top-games', action='store_true', default=False, dest='topgames', help='Get a list of the current Top Games with live streams available, based on the number of viewers')
argParser.add_argument('-sg', '--search-game', action='store', dest='searchgame', help='Search for available streams based on game title/id')
argParser.add_argument('-shh', '--silence', action='store_true', default=False, dest='silence', help='If this is set, the script will not output anything, except of errors.')
args = argParser.parse_args()
if (args.silence != True):
cmnHandler.showIntroText()
if (args.url):
videoType = helpers.getVideoType(args.url)
if (args.quality):
vqw.wasdVQW.insert(0, args.quality)
if (args.topgames):
gamesList = wasdApi.getTopGames()
print "%-10s\t %-50s\t %-10s\t %-10s" % ('Game ID', 'Game', 'Viewers', 'Streams')
print "%s" % ('-'*200)
for game in gamesList['result']:
print "%-10s\t %-50s\t %-10d\t %-10d" % (game['game_id'], cmnHandler.uniStrip(game['game_name']), game['viewers_count'], game['stream_count'])
sys.exit()
if (args.searchgame):
gameTitle = args.searchgame
gameId = 0
try:
if int(gameTitle) >= 1:
gameId = gameTitle
except ValueError:
gameData = wasdApi.searchByGameTitle(gameTitle)
if gameData['result']['count'] > 1:
gamesList = gameData['result']['rows']
print "Found more than one game with the title %s. Select the one you want by the Game ID"
print "%-10s\t %-50s\t %-10s\t %-10s" % ('Game ID', 'Game', 'Viewers', 'Streams')
print "%s" % ('-'*200)
for game in gamesList:
print "%-10s\t %-50s\t %-10d\t %-10d" % (game['game_id'], cmnHandler.uniStrip(game['game_name']), game['viewers_count'], game['stream_count'])
else:
gameId = gameData['result']['rows'][0]['game_id']
if gameId > 0:
gameStreams = wasdApi.getTopStreamsByGameID(gameId)
if gameStreams:
print "%-36s\t %-10s\t %s" % ('URL', 'Viewers', 'Title')
print "%s" % ('-'*200)
for stream in gameStreams['result']:
streamUrl = "https://wasd.tv/channel/%s" % (stream['channel_id'])
print "%-36s\t %-10d\t %s" % (streamUrl, stream['media_container_streams'][0]['stream_current_viewers'], cmnHandler.uniStrip(stream['media_container_name']))
else:
print "No streams found for the game: %s" % (gameTitle)
sys.exit()
if (videoType['channel'] or videoType['video']):
if videoType['video']:
streamId = videoType['video']
streams = wasdApi.getVideoInfoByID(streamId)
stream_result = streams['result']
else:
streamId = videoType['channel']
streams = wasdApi.getStreamInfoByID(streamId)
stream_result = streams['result'][0]
stream_media = stream_result['media_container_streams'][0]['stream_media']
if (stream_media):
m3u8Response = wasdApi.getURL(stream_media[0]['media_meta']['media_url'])
if (m3u8Response):
uri = helpers.getPrefferedVideoURL(m3u8Response)
uri = helpers.clearUri(uri)
if uri:
if cfg.verbose and (args.silence != True):
print "%s" % (uri)
if cfg.autoplay:
cmnHandler.videoAutoplay(uri, 'list')
else:
print "Not valid video found"
else:
print "There is no video available!"
sys.exit()
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:]) | helpersHandler |
errors.go | package main
// AMQPError represents an error communicating with the AMQP broker.
type AMQPError struct {
Message string
}
func NewAMQPError(msg string) AMQPError {
return AMQPError{Message: msg}
}
func (e AMQPError) Error() string {
return e.Message
}
// RequestError represents an error due to an invalid request.
type RequestError struct {
Message string
}
func NewRequestError(msg string) RequestError {
return RequestError{Message: msg}
}
func (e RequestError) Error() string {
return e.Message
}
// RecordError represents an error processing a record.
type RecordError struct {
Message string
}
func | (msg string) RecordError {
return RecordError{Message: msg}
}
func (e RecordError) Error() string {
return e.Message
}
| NewRecordError |
weedfs_file_write.go | package mount
import (
"github.com/hanwen/go-fuse/v2/fuse"
"net/http"
"syscall"
)
/**
* Write data
*
* Write should return exactly the number of bytes requested
* except on error. An exception to this is when the file has
* been opened in 'direct_io' mode, in which case the return value
* of the write system call will reflect the return value of this
* operation. | * expected to reset the setuid and setgid bits.
*
* fi->fh will contain the value set by the open method, or will
* be undefined if the open method didn't set any value.
*
* Valid replies:
* fuse_reply_write
* fuse_reply_err
*
* @param req request handle
* @param ino the inode number
* @param buf data to write
* @param size number of bytes to write
* @param off offset to write to
* @param fi file information
*/
func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (written uint32, code fuse.Status) {
if wfs.IsOverQuota {
return 0, fuse.Status(syscall.ENOSPC)
}
fh := wfs.GetHandle(FileHandleId(in.Fh))
if fh == nil {
return 0, fuse.ENOENT
}
fh.Lock()
defer fh.Unlock()
entry := fh.entry
if entry == nil {
return 0, fuse.OK
}
entry.Content = nil
offset := int64(in.Offset)
entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize)))
// glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
fh.dirtyPages.AddPage(offset, data)
written = uint32(len(data))
if offset == 0 {
// detect mime type
fh.contentType = http.DetectContentType(data)
}
fh.dirtyMetadata = true
return written, fuse.OK
} | *
* Unless FUSE_CAP_HANDLE_KILLPRIV is disabled, this method is |
article.go | package schema
import (
"time"
"github.com/hamba/avro"
)
// ArticleSchema avro schema for the article.
const ArticleSchema = `{
"type": "record",
"name": "Article",
"namespace": "wme_poc.general.schema",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "identifier",
"type": "int"
},
{
"name": "date_modified",
"type": [
"null",
{
"type": "long",
"logicalType": "timestamp-micros"
}
]
},
{
"name": "version",
"type": [
"null",
"Version"
]
},
{
"name": "url",
"type": "string"
}
]
}`
| for _, schema := range []string{
VersionSchema,
} {
if _, err := avro.Parse(schema); err != nil {
return nil, err
}
}
return avro.Parse(ArticleSchema)
}
// Article schema for wikipedia article.
// Tries to compliant with https://schema.org/Article.
type Article struct {
Name string `json:"name" avro:"name"`
Identifier int `json:"identifier,omitempty" avro:"identifier"`
DateModified *time.Time `json:"date_modified,omitempty" avro:"date_modified"`
Version *Version `json:"version,omitempty" avro:"version"`
URL string `json:"url,omitempty" avro:"url"`
} | // NewArticleSchema create new article avro schema.
func NewArticleSchema() (avro.Schema, error) { |
attrib.go | // Copyright 2014,2015,2016,2017,2018,2019,2020,2021 SeukWon Kang ([email protected])
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package turnaction
// SleepBlockAct sleep condition prevent act
func (cmd TurnAction) SleepBlockAct() bool {
return attrib[cmd].sleepCancel
} | return attrib[cmd].needTurn
}
// TriggerTurn trigger tower/floor turn
func (cmd TurnAction) TriggerTurn() bool {
return attrib[cmd].triggerTurn
}
var attrib = [TurnAction_Count]struct {
sleepCancel bool
triggerTurn bool
needTurn float64
}{
Rebirth: {false, true, 0},
MoveFloor: {false, true, 1},
PassTurn: {false, true, 0},
Meditate: {false, true, 1},
KillSelf: {false, true, 1},
Move: {true, true, 1},
Attack: {true, true, 1.5},
AttackWide: {true, true, 3},
AttackLong: {true, true, 3},
Pickup: {true, true, 1},
Drop: {true, true, 1},
Equip: {true, true, 1},
UnEquip: {true, true, 1},
DrinkPotion: {true, true, 1},
ReadScroll: {true, true, 1},
Recycle: {true, true, 1},
EnterPortal: {true, true, 1},
ActTeleport: {false, true, 1},
} |
// NeedTurn need turn to act
func (cmd TurnAction) NeedTurn() float64 { |
decimator_spec.py | import numpy
import scipy.signal
from generate import *
def generate():
def process(factor, x):
|
vectors = []
x = random_complex64(256)
vectors.append(TestVector([2], [x], process(2, x), "2 Factor, 256 ComplexFloat32 input, 128 ComplexFloat32 output"))
vectors.append(TestVector([3], [x], process(3, x), "3 Factor, 256 ComplexFloat32 input, 85 ComplexFloat32 output"))
vectors.append(TestVector([4], [x], process(4, x), "4 Factor, 256 ComplexFloat32 input, 64 ComplexFloat32 output"))
vectors.append(TestVector([7], [x], process(7, x), "7 Factor, 256 ComplexFloat32 input, 36 ComplexFloat32 output"))
x = random_float32(256)
vectors.append(TestVector([2], [x], process(2, x), "2 Factor, 256 Float32 input, 128 Float32 output"))
vectors.append(TestVector([3], [x], process(3, x), "3 Factor, 256 Float32 input, 85 Float32 output"))
vectors.append(TestVector([4], [x], process(4, x), "4 Factor, 256 Float32 input, 64 Float32 output"))
vectors.append(TestVector([7], [x], process(7, x), "7 Factor, 256 Float32 input, 36 Float32 output"))
return CompositeSpec("DecimatorBlock", vectors, 1e-6)
| out = scipy.signal.decimate(x, factor, n=128 - 1, ftype='fir', zero_phase=False)
return [out.astype(type(x[0]))] |
helper.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package lockbasedtxmgr
import (
"fmt"
commonledger "github.com/hyperledger/fabric/common/ledger"
ledger "github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/storageutil"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/txmgr"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
"github.com/hyperledger/fabric/core/ledger/util"
"github.com/hyperledger/fabric/extensions/collections/pvtdatahandler"
"github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/ledger/queryresult"
"github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset"
"github.com/pkg/errors"
)
const (
queryReadsHashingEnabled = true
maxDegreeQueryReadsHashing = uint32(50)
)
type pvtDataHandler interface {
HandleGetPrivateData(txID, ns string, config *common.StaticCollectionConfig, key string) ([]byte, bool, error)
HandleGetPrivateDataMultipleKeys(txID, ns string, config *common.StaticCollectionConfig, keys []string) ([][]byte, bool, error)
}
type queryHelper struct {
txmgr *LockBasedTxMgr
collNameValidator *collNameValidator
rwsetBuilder *rwsetutil.RWSetBuilder
itrs []*resultsItr
err error
doneInvoked bool
pvtDataHandler pvtDataHandler
}
func newQueryHelper(txmgr *LockBasedTxMgr, rwsetBuilder *rwsetutil.RWSetBuilder, performCollCheck bool) *queryHelper {
helper := &queryHelper{txmgr: txmgr, rwsetBuilder: rwsetBuilder}
validator := newCollNameValidator(txmgr.ledgerid, txmgr.ccInfoProvider, &lockBasedQueryExecutor{helper: helper}, !performCollCheck)
helper.collNameValidator = validator
helper.pvtDataHandler = pvtdatahandler.New(txmgr.ledgerid, txmgr.collDataProvider)
return helper
}
func (h *queryHelper) getState(ns string, key string) ([]byte, []byte, error) {
if err := h.checkDone(); err != nil {
return nil, nil, err
}
versionedValue, err := h.txmgr.db.GetState(ns, key)
if err != nil {
return nil, nil, err
}
val, metadata, ver := decomposeVersionedValue(versionedValue)
if h.rwsetBuilder != nil {
h.rwsetBuilder.AddToReadSet(ns, key, ver)
}
return val, metadata, nil
}
func (h *queryHelper) getStateMultipleKeys(namespace string, keys []string) ([][]byte, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
versionedValues, err := h.txmgr.db.GetStateMultipleKeys(namespace, keys)
if err != nil {
return nil, nil
}
values := make([][]byte, len(versionedValues))
for i, versionedValue := range versionedValues {
val, _, ver := decomposeVersionedValue(versionedValue)
if h.rwsetBuilder != nil {
h.rwsetBuilder.AddToReadSet(namespace, keys[i], ver)
}
values[i] = val
}
return values, nil
}
func (h *queryHelper) getStateRangeScanIterator(namespace string, startKey string, endKey string) (ledger.QueryResultsIterator, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
itr, err := newResultsItr(
namespace,
startKey,
endKey,
nil,
h.txmgr.db,
h.rwsetBuilder,
queryReadsHashingEnabled,
maxDegreeQueryReadsHashing,
)
if err != nil {
return nil, err
}
h.itrs = append(h.itrs, itr)
return itr, nil
}
func (h *queryHelper) getStateRangeScanIteratorWithMetadata(namespace string, startKey string, endKey string, metadata map[string]interface{}) (ledger.QueryResultsIterator, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
itr, err := newResultsItr(
namespace,
startKey,
endKey,
metadata,
h.txmgr.db,
h.rwsetBuilder,
queryReadsHashingEnabled,
maxDegreeQueryReadsHashing,
)
if err != nil {
return nil, err
}
h.itrs = append(h.itrs, itr)
return itr, nil
}
func (h *queryHelper) executeQuery(namespace, query string) (commonledger.ResultsIterator, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
dbItr, err := h.txmgr.db.ExecuteQuery(namespace, query)
if err != nil {
return nil, err
}
return &queryResultsItr{DBItr: dbItr, RWSetBuilder: h.rwsetBuilder}, nil
}
func (h *queryHelper) executeQueryWithMetadata(namespace, query string, metadata map[string]interface{}) (ledger.QueryResultsIterator, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
dbItr, err := h.txmgr.db.ExecuteQueryWithMetadata(namespace, query, metadata)
if err != nil {
return nil, err
}
return &queryResultsItr{DBItr: dbItr, RWSetBuilder: h.rwsetBuilder}, nil
}
func (h *queryHelper) getPrivateData(ns, coll, key string) ([]byte, error) {
if err := h.validateCollName(ns, coll); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
var err error
var hashVersion *version.Height
var versionedValue *statedb.VersionedValue
if versionedValue, err = h.txmgr.db.GetPrivateData(ns, coll, key); err != nil {
return nil, err
}
// metadata is always nil for private data - because, the metadata is part of the hashed key (instead of raw key)
val, _, ver := decomposeVersionedValue(versionedValue)
keyHash := util.ComputeStringHash(key)
if hashVersion, err = h.txmgr.db.GetKeyHashVersion(ns, coll, keyHash); err != nil {
return nil, err
}
if !version.AreSame(hashVersion, ver) {
return nil, &txmgr.ErrPvtdataNotAvailable{Msg: fmt.Sprintf(
"private data matching public hash version is not available. Public hash version = %s, Private data version = %s",
hashVersion, ver)}
}
if h.rwsetBuilder != nil {
h.rwsetBuilder.AddToHashedReadSet(ns, coll, key, ver)
}
return val, nil
}
func (h *queryHelper) getPrivateDataValueHash(ns, coll, key string) (valueHash, metadataBytes []byte, err error) {
if err := h.validateCollName(ns, coll); err != nil {
return nil, nil, err
}
if err := h.checkDone(); err != nil {
return nil, nil, err
}
var versionedValue *statedb.VersionedValue
if versionedValue, err = h.txmgr.db.GetPrivateDataHash(ns, coll, key); err != nil {
return nil, nil, err
}
valHash, metadata, ver := decomposeVersionedValue(versionedValue)
if h.rwsetBuilder != nil {
h.rwsetBuilder.AddToHashedReadSet(ns, coll, key, ver)
}
return valHash, metadata, nil
}
func (h *queryHelper) getPrivateDataMultipleKeys(ns, coll string, keys []string) ([][]byte, error) {
if err := h.validateCollName(ns, coll); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
versionedValues, err := h.txmgr.db.GetPrivateDataMultipleKeys(ns, coll, keys)
if err != nil {
return nil, nil
}
values := make([][]byte, len(versionedValues))
for i, versionedValue := range versionedValues {
val, _, ver := decomposeVersionedValue(versionedValue)
if h.rwsetBuilder != nil {
h.rwsetBuilder.AddToHashedReadSet(ns, coll, keys[i], ver)
}
values[i] = val
}
return values, nil
}
func (h *queryHelper) getPrivateDataRangeScanIterator(namespace, collection, startKey, endKey string) (commonledger.ResultsIterator, error) {
if err := h.validateCollName(namespace, collection); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
dbItr, err := h.txmgr.db.GetPrivateDataRangeScanIterator(namespace, collection, startKey, endKey)
if err != nil {
return nil, err
}
return &pvtdataResultsItr{namespace, collection, dbItr}, nil
}
func (h *queryHelper) executeQueryOnPrivateData(namespace, collection, query string) (commonledger.ResultsIterator, error) {
if err := h.validateCollName(namespace, collection); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
dbItr, err := h.txmgr.db.ExecuteQueryOnPrivateData(namespace, collection, query)
if err != nil {
return nil, err
}
return &pvtdataResultsItr{namespace, collection, dbItr}, nil
}
func (h *queryHelper) getStateMetadata(ns string, key string) (map[string][]byte, error) {
if err := h.checkDone(); err != nil {
return nil, err
}
var metadataBytes []byte
var err error
if h.rwsetBuilder == nil {
// reads versions are not getting recorded, retrieve metadata value via optimized path
if metadataBytes, err = h.txmgr.db.GetStateMetadata(ns, key); err != nil {
return nil, err
}
} else {
if _, metadataBytes, err = h.getState(ns, key); err != nil {
return nil, err
}
}
return storageutil.DeserializeMetadata(metadataBytes)
}
func (h *queryHelper) getPrivateDataMetadata(ns, coll, key string) (map[string][]byte, error) {
if h.rwsetBuilder == nil {
// reads versions are not getting recorded, retrieve metadata value via optimized path
return h.getPrivateDataMetadataByHash(ns, coll, util.ComputeStringHash(key))
}
if err := h.validateCollName(ns, coll); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
_, metadataBytes, err := h.getPrivateDataValueHash(ns, coll, key)
if err != nil {
return nil, err
}
return storageutil.DeserializeMetadata(metadataBytes)
}
func (h *queryHelper) getPrivateDataMetadataByHash(ns, coll string, keyhash []byte) (map[string][]byte, error) {
if err := h.validateCollName(ns, coll); err != nil {
return nil, err
}
if err := h.checkDone(); err != nil {
return nil, err
}
if h.rwsetBuilder != nil {
// this requires to improve rwset builder to accept a keyhash
return nil, errors.New("retrieving private data metadata by keyhash is not supported in simulation. This function is only available for query as yet")
}
metadataBytes, err := h.txmgr.db.GetPrivateDataMetadataByHash(ns, coll, keyhash)
if err != nil {
return nil, err
}
return storageutil.DeserializeMetadata(metadataBytes)
}
func (h *queryHelper) done() {
if h.doneInvoked {
return
}
defer func() {
h.txmgr.commitRWLock.RUnlock()
h.doneInvoked = true
for _, itr := range h.itrs {
itr.Close()
}
}()
}
func (h *queryHelper) addRangeQueryInfo() {
for _, itr := range h.itrs {
if h.rwsetBuilder != nil {
results, hash, err := itr.rangeQueryResultsHelper.Done()
if err != nil {
h.err = err
return
}
if results != nil {
rwsetutil.SetRawReads(itr.rangeQueryInfo, results)
}
if hash != nil {
rwsetutil.SetMerkelSummary(itr.rangeQueryInfo, hash)
}
h.rwsetBuilder.AddToRangeQuerySet(itr.ns, itr.rangeQueryInfo)
}
}
}
func (h *queryHelper) checkDone() error {
if h.doneInvoked {
return errors.New("this instance should not be used after calling Done()")
}
return nil
}
func (h *queryHelper) validateCollName(ns, coll string) error {
return h.collNameValidator.validateCollName(ns, coll)
}
// resultsItr implements interface ledger.ResultsIterator
// this wraps the actual db iterator and intercept the calls
// to build rangeQueryInfo in the ReadWriteSet that is used
// for performing phantom read validation during commit
type resultsItr struct {
ns string
endKey string
dbItr statedb.ResultsIterator
rwSetBuilder *rwsetutil.RWSetBuilder
rangeQueryInfo *kvrwset.RangeQueryInfo
rangeQueryResultsHelper *rwsetutil.RangeQueryResultsHelper
}
func newResultsItr(ns string, startKey string, endKey string, metadata map[string]interface{},
db statedb.VersionedDB, rwsetBuilder *rwsetutil.RWSetBuilder, enableHashing bool, maxDegree uint32) (*resultsItr, error) {
var err error
var dbItr statedb.ResultsIterator
if metadata == nil {
dbItr, err = db.GetStateRangeScanIterator(ns, startKey, endKey)
} else {
dbItr, err = db.GetStateRangeScanIteratorWithMetadata(ns, startKey, endKey, metadata)
}
if err != nil {
return nil, err
}
itr := &resultsItr{ns: ns, dbItr: dbItr}
// it's a simulation request so, enable capture of range query info
if rwsetBuilder != nil {
itr.rwSetBuilder = rwsetBuilder
itr.endKey = endKey
// just set the StartKey... set the EndKey later below in the Next() method.
itr.rangeQueryInfo = &kvrwset.RangeQueryInfo{StartKey: startKey}
resultsHelper, err := rwsetutil.NewRangeQueryResultsHelper(enableHashing, maxDegree)
if err != nil {
return nil, err
}
itr.rangeQueryResultsHelper = resultsHelper
}
return itr, nil
}
// Next implements method in interface ledger.ResultsIterator
// Before returning the next result, update the EndKey and ItrExhausted in rangeQueryInfo
// If we set the EndKey in the constructor (as we do for the StartKey) to what is
// supplied in the original query, we may be capturing the unnecessary longer range if the
// caller decides to stop iterating at some intermediate point. Alternatively, we could have
// set the EndKey and ItrExhausted in the Close() function but it may not be desirable to change
// transactional behaviour based on whether the Close() was invoked or not
func (itr *resultsItr) Next() (commonledger.QueryResult, error) {
queryResult, err := itr.dbItr.Next()
if err != nil {
return nil, err
}
itr.updateRangeQueryInfo(queryResult)
if queryResult == nil {
return nil, nil
}
versionedKV := queryResult.(*statedb.VersionedKV)
return &queryresult.KV{Namespace: versionedKV.Namespace, Key: versionedKV.Key, Value: versionedKV.Value}, nil
}
// GetBookmarkAndClose implements method in interface ledger.ResultsIterator
func (itr *resultsItr) GetBookmarkAndClose() string {
returnBookmark := ""
if queryResultIterator, ok := itr.dbItr.(statedb.QueryResultsIterator); ok {
returnBookmark = queryResultIterator.GetBookmarkAndClose()
}
return returnBookmark
}
// updateRangeQueryInfo updates two attributes of the rangeQueryInfo
// 1) The EndKey - set to either a) latest key that is to be returned to the caller (if the iterator is not exhausted)
// because, we do not know if the caller is again going to invoke Next() or not.
// or b) the last key that was supplied in the original query (if the iterator is exhausted)
// 2) The ItrExhausted - set to true if the iterator is going to return nil as a result of the Next() call
func (itr *resultsItr) updateRangeQueryInfo(queryResult statedb.QueryResult) {
if itr.rwSetBuilder == nil {
return
}
if queryResult == nil {
// caller scanned till the iterator got exhausted.
// So, set the endKey to the actual endKey supplied in the query
itr.rangeQueryInfo.ItrExhausted = true
itr.rangeQueryInfo.EndKey = itr.endKey
return
}
versionedKV := queryResult.(*statedb.VersionedKV)
itr.rangeQueryResultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version))
// Set the end key to the latest key retrieved by the caller.
// Because, the caller may actually not invoke the Next() function again
itr.rangeQueryInfo.EndKey = versionedKV.Key
}
// Close implements method in interface ledger.ResultsIterator
func (itr *resultsItr) Close() {
itr.dbItr.Close()
}
type queryResultsItr struct {
DBItr statedb.ResultsIterator
RWSetBuilder *rwsetutil.RWSetBuilder
}
// Next implements method in interface ledger.ResultsIterator
func (itr *queryResultsItr) Next() (commonledger.QueryResult, error) {
queryResult, err := itr.DBItr.Next()
if err != nil {
return nil, err
}
if queryResult == nil {
return nil, nil
}
versionedQueryRecord := queryResult.(*statedb.VersionedKV)
logger.Debugf("queryResultsItr.Next() returned a record:%s", string(versionedQueryRecord.Value))
if itr.RWSetBuilder != nil {
itr.RWSetBuilder.AddToReadSet(versionedQueryRecord.Namespace, versionedQueryRecord.Key, versionedQueryRecord.Version)
}
return &queryresult.KV{Namespace: versionedQueryRecord.Namespace, Key: versionedQueryRecord.Key, Value: versionedQueryRecord.Value}, nil
}
// Close implements method in interface ledger.ResultsIterator
func (itr *queryResultsItr) Close() {
itr.DBItr.Close()
}
func (itr *queryResultsItr) GetBookmarkAndClose() string {
returnBookmark := ""
if queryResultIterator, ok := itr.DBItr.(statedb.QueryResultsIterator); ok {
returnBookmark = queryResultIterator.GetBookmarkAndClose()
}
return returnBookmark
}
func | (versionedValue *statedb.VersionedValue) ([]byte, []byte, *version.Height) {
var value []byte
var metadata []byte
var ver *version.Height
if versionedValue != nil {
value = versionedValue.Value
ver = versionedValue.Version
metadata = versionedValue.Metadata
}
return value, metadata, ver
}
// pvtdataResultsItr iterates over results of a query on pvt data
type pvtdataResultsItr struct {
ns string
coll string
dbItr statedb.ResultsIterator
}
// Next implements method in interface ledger.ResultsIterator
func (itr *pvtdataResultsItr) Next() (commonledger.QueryResult, error) {
queryResult, err := itr.dbItr.Next()
if err != nil {
return nil, err
}
if queryResult == nil {
return nil, nil
}
versionedQueryRecord := queryResult.(*statedb.VersionedKV)
return &queryresult.KV{
Namespace: itr.ns,
Key: versionedQueryRecord.Key,
Value: versionedQueryRecord.Value,
}, nil
}
// Close implements method in interface ledger.ResultsIterator
func (itr *pvtdataResultsItr) Close() {
itr.dbItr.Close()
}
| decomposeVersionedValue |
flask_exmaple.py | # coding=utf-8
"""
@author: magician
@date: 2018/9/14
"""
import datetime
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, request
from sqlalchemy.exc import IntegrityError
from marshmallow import Schema, fields, ValidationError, pre_load
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
# MODELS
class | (db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
class Quote(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
author = db.relationship(
'Author',
backref=db.backref('quotes', lazy='dynamic'),
)
posted_at = db.Column(db.DateTime)
# SCHEMAS
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method('format_name', dump_only=True)
def format_name(self, author):
return '{}, {}'.format(author.last, author.first)
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class QuoteSchema(Schema):
id = fields.Int(dump_only=True)
author = fields.Nested(AuthorSchema, validate=must_not_be_blank)
content = fields.Str(required=True, validate=must_not_be_blank)
posted_at = fields.DateTime(dump_only=True)
# Allow client to pass author's full name in request body
# e.g. {"author': 'Tim Peters"} rather than {"first": "Tim", "last": "Peters"}
@pre_load
def process_author(self, data):
author_name = data.get('author')
if author_name:
first, last = author_name.split(' ')
author_dict = dict(first=first, last=last)
else:
author_dict = {}
data['author'] = author_dict
return data
author_schema = AuthorSchema()
authors_schema = AuthorSchema(many=True)
quote_schema = QuoteSchema()
quotes_schema = QuoteSchema(many=True, only=('id', 'content'))
# API
@app.route('/authors')
def get_authors():
authors = Author.query.all()
# Serialize the queryset
result = authors_schema.dump(authors)
return jsonify({'authors': result})
@app.route('/authors/<int:pk>')
def get_author(pk):
try:
author = Author.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Author could not be found.'}), 400
author_result = author_schema.dump(author)
quotes_result = quotes_schema.dump(author.quotes.all())
return jsonify({'author': author_result, 'quotes': quotes_result})
@app.route('/quotes/', methods=['GET'])
def get_quotes():
quotes = Quote.query.all()
result = quotes_schema.dump(quotes, many=True)
return jsonify({'quotes': result})
@app.route('/quotes/<int:pk>')
def get_quote(pk):
try:
quote = Quote.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Quote could not be found.'}), 400
result = quote_schema.dump(quote)
return jsonify({'quote': result})
@app.route('/quotes/', methods=['POST'])
def new_quote():
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
# Validate and deserialize input
try:
data = quote_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 422
first, last = data['author']['first'], data['author']['last']
author = Author.query.filter_by(first=first, last=last).first()
if author is None:
# Create a new author
author = Author(first=first, last=last)
db.session.add(author)
# Create new quote
quote = Quote(
content=data['content'],
author=author,
posted_at=datetime.datetime.utcnow(),
)
db.session.add(quote)
db.session.commit()
result = quote_schema.dump(Quote.query.get(quote.id))
return jsonify({
'message': 'Created new quote.',
'quote': result,
})
if __name__ == '__main__':
db.create_all()
app.run(debug=True, port=5000)
| Author |
class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.js | var class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config =
[
[ "AllowNull", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a1d17d5f7998e83a052e5180b5d0715bc", null ],
[ "Calculate", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#aef7273065605c355701efa14fbb2e7be", null ],
[ "Convert", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#aedae12a8c2260b2784f24ca9ebc1d1f6", null ],
[ "Critical", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a66e458f5a94ceb1214cd40a252048c09", null ],
[ "CustomSortType", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a300a628608f8e33680e7caefd872d398", null ],
[ "DateFormat", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a883ceddd854e766fcd4763960e083ddb", null ],
[ "DateReadFormat", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a2d2660b5481c3b2cd8bbe1f056ab8e32", null ],
[ "DateWriteFormat", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#ac3c3bcc231cf42c1363455ae049909c6", null ],
[ "DefaultValue", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a88c815f4e0f3ebb2fd9ca731f81bfcb9", null ],
[ "Depends", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a193f52d5e255945788da5a3e49096a01", null ],
[ "Fields", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a156ffabcffdddb5c2634a190d11514c4", null ],
[ "HtmlEncode", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a50edd177edf6f41bf5c829bef19b49bf", null ],
[ "IsComplex", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a7b3535108eb120b1765e50d20f47b7fe", null ],
[ "Mapping", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a8c31b312b6d1a89bac7011547c00b79d", null ],
[ "MappingFunction", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a315f1595f8074175b88b8dafccf3b9f1", null ], | [ "Name", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a6db2901aa326e80e71c962bd9f546f5d", null ],
[ "NullConvert", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a1d1bdcd8cf243c2f8be3378f91da5a47", null ],
[ "Persist", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a8204a0f982c97fe3eb305a96e89e1bd5", null ],
[ "Reference", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a2e620f60cd12aed4efc0d8a5d8b75d4b", null ],
[ "ReferenceString", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a45a096b1639b77456caac0e5a75a56af", null ],
[ "Serialize", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a312d732a427bfaa36e78ecf428703c50", null ],
[ "ServerMapping", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a3d5af54585ae3f6e2d26e963b449f6e6", null ],
[ "SortDir", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a9e9b312e2208a75c8b2374cd7500a800", null ],
[ "SortType", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#aea4b8d45d57a14f7c9fe3dee2f19bab9", null ],
[ "SubmitEmptyValue", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a6a9d4617c7283c2a98c86275d72642e3", null ],
[ "Type", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a37278e983d33416dd62dc936d2796e41", null ],
[ "Unique", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a08403f2dc74b4d7d1e5d7d57983eaf1f", null ],
[ "VersionProperty", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a44adce1391f97c294c03658905009ce0", null ]
]; | [ "Model", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#aa7f5948710c104a63d08e835543d5f64", null ],
[ "ModelName", "d5/d73/class_ext_1_1_net_1_1_mobile_1_1_model_field_1_1_config.html#a19334ebb31dd84e408b77c3c6dc1afb9", null ], |
solar-system.component.ts | import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-solar-system',
templateUrl: './solar-system.component.html',
styleUrls: ['./solar-system.component.css']
})
export class | implements OnInit {
constructor() { }
ngOnInit() {
}
}
| SolarSystemComponent |
__init__.py | """
The Cibin package.
"""
|
from .cibin import * | __version__ = "0.0.1" |
ngram.rs | use std::collections::VecDeque;
pub struct NGramIter<T: Iterator> {
n: usize,
tokn: T,
prev: VecDeque<T::Item>,
init: bool
}
impl<T: Iterator> NGramIter<T>
where <T as Iterator>::Item: Clone {
pub fn from(s: T, n: usize) -> NGramIter<T> {
NGramIter { n: n, tokn: s, prev: VecDeque::new(), init: false }
}
pub fn incomplete_next(&mut self) -> Option<Vec<T::Item>> {
self.tokn.next().map(|s| {
self.prev.push_back(s);
while self.prev.len() > self.n {
self.prev.pop_front();
}
let l = self.prev.len();
let s = if l < self.n { 0 } else { l - self.n };
let mut v = Vec::with_capacity(self.n);
for i in s .. l {
v.push(self.prev[i].clone());
}
v
})
}
}
impl<T: Iterator> Iterator for NGramIter<T>
where <T as Iterator>::Item: Clone {
type Item = Vec<T::Item>;
fn next(&mut self) -> Option<Self::Item> |
}
pub trait NGram<T: Iterator>
where <T as Iterator>::Item: Clone {
fn ngram(self, n: usize) -> NGramIter<T>;
}
impl<T: Iterator> NGram<T> for T
where <T as Iterator>::Item: Clone {
fn ngram(self, n: usize) -> NGramIter<T> {
NGramIter::from(self, n)
}
}
#[test]
fn test_char_bigram() {
let s = "abc";
let mut ngram = s.chars().ngram(2);
assert_eq!(Some(vec!['a', 'b']), ngram.next());
assert_eq!(Some(vec!['b', 'c']), ngram.next());
assert_eq!(None, ngram.next());
}
#[test]
fn test_char_trigram() {
let s = "abcd";
let mut ngram = s.chars().ngram(3);
assert_eq!(Some(vec!['a', 'b', 'c']), ngram.next());
assert_eq!(Some(vec!['b', 'c', 'd']), ngram.next());
assert_eq!(None, ngram.next());
}
#[test]
fn test_word_bigram() {
use super::tokenizer::Words;
let s = "abc, d e.";
let mut ngram = s.words().ngram(2);
assert_eq!(Some(vec!["abc", "d"]), ngram.next());
assert_eq!(Some(vec!["d", "e"]), ngram.next());
assert_eq!(None, ngram.next());
}
#[test]
fn test_word_tiigram() {
use super::tokenizer::Words;
let s = "abc, d e abc.";
let mut ngram = s.words().ngram(3);
assert_eq!(Some(vec!["abc", "d", "e"]), ngram.next());
assert_eq!(Some(vec!["d", "e", "abc"]), ngram.next());
assert_eq!(None, ngram.next());
}
| {
if !self.init {
for _ in 0 .. self.n - 1 {
self.incomplete_next();
}
self.init = true
}
self.incomplete_next()
} |
app.py | import gzip
import io
import os
import uuid
import nbformat
from dagster import __version__ as dagster_version
from dagster import check
from dagster.cli.workspace import Workspace
from dagster.cli.workspace.context import WorkspaceProcessContext
from dagster.core.debug import DebugRunPayload
from dagster.core.execution.compute_logs import warn_if_compute_logs_disabled
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.telemetry import log_workspace_stats
from dagster_graphql.schema import create_schema
from dagster_graphql.version import __version__ as dagster_graphql_version
from flask import Blueprint, Flask, jsonify, redirect, render_template_string, request, send_file
from flask_cors import CORS
from flask_graphql import GraphQLView
from flask_sockets import Sockets
from graphql.execution.executors.gevent import GeventExecutor as Executor
from nbconvert import HTMLExporter
from .format_error import format_error_with_stack_trace
from .subscription_server import DagsterSubscriptionServer
from .templates.playground import TEMPLATE as PLAYGROUND_TEMPLATE
from .version import __version__
MISSING_SCHEDULER_WARNING = (
"You have defined ScheduleDefinitions for this repository, but have "
"not defined a scheduler on the instance"
)
class DagsterGraphQLView(GraphQLView):
def __init__(self, context, **kwargs):
super(DagsterGraphQLView, self).__init__(**kwargs)
self.context = check.inst_param(context, "context", WorkspaceProcessContext)
def get_context(self):
return self.context.create_request_context()
format_error = staticmethod(format_error_with_stack_trace)
def dagster_graphql_subscription_view(subscription_server, context):
context = check.inst_param(context, "context", WorkspaceProcessContext)
def view(ws):
# Even though this argument is named as the "request_context", we are passing it
# a `WorkspaceProcessContext`. This is a naming restriction from the underlying
# `GeventSubscriptionServer` which we reply on. If you view the implementation
# for the DagsterSubscriptionServer, you will see that we create a request context
# for every GraphQL request in the `on_start` method.
subscription_server.handle(ws, request_context=context)
return []
return view
def info_view():
return (
jsonify(
dagit_version=__version__,
dagster_graphql_version=dagster_graphql_version,
dagster_version=dagster_version,
),
200,
)
def notebook_view(request_args):
check.dict_param(request_args, "request_args")
# This currently provides open access to your file system - the very least we can
# do is limit it to notebook files until we create a more permanent solution.
path = request_args["path"]
if not path.endswith(".ipynb"):
return "Invalid Path", 400
with open(os.path.abspath(path)) as f:
read_data = f.read()
notebook = nbformat.reads(read_data, as_version=4)
html_exporter = HTMLExporter()
html_exporter.template_file = "basic"
(body, resources) = html_exporter.from_notebook_node(notebook)
return "<style>" + resources["inlining"]["css"][0] + "</style>" + body, 200
def download_log_view(context):
context = check.inst_param(context, "context", WorkspaceProcessContext)
def view(run_id, step_key, file_type):
run_id = str(uuid.UUID(run_id)) # raises if not valid run_id
step_key = step_key.split("/")[-1] # make sure we're not diving deep into
out_name = f"{run_id}_{step_key}.{file_type}"
manager = context.instance.compute_log_manager
try:
io_type = ComputeIOType(file_type)
result = manager.get_local_path(run_id, step_key, io_type)
if not os.path.exists(result):
result = io.BytesIO()
timeout = None if manager.is_watch_completed(run_id, step_key) else 0
except ValueError:
result = io.BytesIO()
timeout = 0
if not result:
result = io.BytesIO()
return send_file(
result, as_attachment=True, attachment_filename=out_name, cache_timeout=timeout
)
return view
def download_dump_view(context):
context = check.inst_param(context, "context", WorkspaceProcessContext)
def view(run_id):
run = context.instance.get_run_by_id(run_id)
debug_payload = DebugRunPayload.build(context.instance, run)
check.invariant(run is not None)
out_name = f"{run_id}.gzip"
result = io.BytesIO()
with gzip.GzipFile(fileobj=result, mode="wb") as file:
debug_payload.write(file)
result.seek(0) # be kind, please rewind
return send_file(result, as_attachment=True, attachment_filename=out_name)
return view
def instantiate_app_with_views(
context, schema, app_path_prefix, target_dir=os.path.dirname(__file__)
):
|
def create_app_from_workspace(
workspace: Workspace, instance: DagsterInstance, path_prefix: str = ""
):
check.inst_param(workspace, "workspace", Workspace)
check.inst_param(instance, "instance", DagsterInstance)
check.str_param(path_prefix, "path_prefix")
if path_prefix:
if not path_prefix.startswith("/"):
raise Exception(f'The path prefix should begin with a leading "/": got {path_prefix}')
if path_prefix.endswith("/"):
raise Exception(f'The path prefix should not include a trailing "/": got {path_prefix}')
warn_if_compute_logs_disabled()
print("Loading repository...") # pylint: disable=print-call
context = WorkspaceProcessContext(instance=instance, workspace=workspace, version=__version__)
log_workspace_stats(instance, context)
schema = create_schema()
return instantiate_app_with_views(context, schema, path_prefix)
| app = Flask(
"dagster-ui",
static_url_path=app_path_prefix,
static_folder=os.path.join(target_dir, "./webapp/build"),
)
subscription_server = DagsterSubscriptionServer(schema=schema)
# Websocket routes
sockets = Sockets(app)
sockets.add_url_rule(
f"{app_path_prefix}/graphql",
"graphql",
dagster_graphql_subscription_view(subscription_server, context),
)
# HTTP routes
bp = Blueprint("routes", __name__, url_prefix=app_path_prefix)
bp.add_url_rule("/graphiql", "graphiql", lambda: redirect(f"{app_path_prefix}/graphql", 301))
bp.add_url_rule(
"/graphql",
"graphql",
DagsterGraphQLView.as_view(
"graphql",
schema=schema,
graphiql=True,
graphiql_template=PLAYGROUND_TEMPLATE,
executor=Executor(),
context=context,
),
)
bp.add_url_rule(
# should match the `build_local_download_url`
"/download/<string:run_id>/<string:step_key>/<string:file_type>",
"download_view",
download_log_view(context),
)
bp.add_url_rule(
"/download_debug/<string:run_id>",
"download_dump_view",
download_dump_view(context),
)
# these routes are specifically for the Dagit UI and are not part of the graphql
# API that we want other people to consume, so they're separate for now.
# Also grabbing the magic global request args dict so that notebook_view is testable
bp.add_url_rule("/dagit/notebook", "notebook", lambda: notebook_view(request.args))
bp.add_url_rule("/dagit_info", "sanity_view", info_view)
index_path = os.path.join(target_dir, "./webapp/build/index.html")
def index_view():
try:
with open(index_path) as f:
rendered_template = render_template_string(f.read())
return rendered_template.replace(
'src="/static', f'src="{app_path_prefix}/static'
).replace('href="/static', f'href="{app_path_prefix}/static')
except FileNotFoundError:
raise Exception(
"""Can't find webapp files. Probably webapp isn't built. If you are using
dagit, then probably it's a corrupted installation or a bug. However, if you are
developing dagit locally, your problem can be fixed as follows:
cd ./python_modules/
make rebuild_dagit"""
)
def error_redirect(_path):
return index_view()
bp.add_url_rule("/", "index_view", index_view)
bp.context_processor(lambda: {"app_path_prefix": app_path_prefix})
app.app_protocol = lambda environ_path_info: "graphql-ws"
app.register_blueprint(bp)
app.register_error_handler(404, error_redirect)
# if the user asked for a path prefix, handle the naked domain just in case they are not
# filtering inbound traffic elsewhere and redirect to the path prefix.
if app_path_prefix:
app.add_url_rule("/", "force-path-prefix", lambda: redirect(app_path_prefix, 301))
CORS(app)
return app |
table_borders.rs | use std::io::Read;
use std::str::FromStr;
use xml::attribute::OwnedAttribute;
use xml::reader::{EventReader, XmlEvent};
use super::*;
use crate::types::*;
impl ElementReader for TableBorders {
fn read<R: Read>(r: &mut EventReader<R>, _: &[OwnedAttribute]) -> Result<Self, ReaderError> {
let mut borders = TableBorders::with_empty();
loop {
let e = r.next();
match e {
Ok(XmlEvent::StartElement {
attributes, name, ..
}) => {
let e = XMLElement::from_str(&name.local_name).unwrap();
match e {
XMLElement::Top => {
let attr = read_border(&attributes)?;
let mut border = TableBorder::new(TableBorderPosition::Top)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
XMLElement::Right => {
let attr = read_border(&attributes)?;
let mut border = TableBorder::new(TableBorderPosition::Right)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
XMLElement::Bottom => {
let attr = read_border(&attributes)?;
let mut border = TableBorder::new(TableBorderPosition::Bottom)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
XMLElement::Left => {
let attr = read_border(&attributes)?;
let mut border = TableBorder::new(TableBorderPosition::Left)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
XMLElement::InsideH => {
let attr = read_border(&attributes)?; | let mut border = TableBorder::new(TableBorderPosition::InsideH)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
XMLElement::InsideV => {
let attr = read_border(&attributes)?;
let mut border = TableBorder::new(TableBorderPosition::InsideV)
.border_type(attr.border_type)
.color(attr.color);
if let Some(size) = attr.size {
border = border.size(size as usize);
};
borders = borders.set(border);
continue;
}
_ => {}
}
}
Ok(XmlEvent::EndElement { name, .. }) => {
let e = XMLElement::from_str(&name.local_name).unwrap();
if e == XMLElement::TableBorders {
return Ok(borders);
}
}
Err(_) => return Err(ReaderError::XMLReadError),
_ => {}
}
}
}
} | |
ReportLayoutItem.js | "use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.ReportLayoutItem = exports.ReportLayoutItemField = exports.createReportLayoutItem = void 0;
var core_1 = require("@sap-cloud-sdk/core");
/**
* @deprecated Since v1.6.0. Use [[ReportLayoutItem.build]] instead.
*/
function createReportLayoutItem(json) {
return ReportLayoutItem.build(json);
}
exports.createReportLayoutItem = createReportLayoutItem;
/**
* ReportLayoutItemField
* @typeparam EntityT - Type of the entity the complex type field belongs to.
*/
var ReportLayoutItemField = /** @class */ (function (_super) {
__extends(ReportLayoutItemField, _super);
/**
* Creates an instance of ReportLayoutItemField.
*
* @param fieldName - Actual name of the field as used in the OData request.
* @param fieldOf - Either the parent entity constructor of the parent complex type this field belongs to.
*/
function ReportLayoutItemField(fieldName, fieldOf) {
var _this = _super.call(this, fieldName, fieldOf, ReportLayoutItem) || this;
/**
* Representation of the [[ReportLayoutItem.fieldIdentifier]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.fieldIdentifier = new core_1.ComplexTypeStringPropertyField('FieldIdentifier', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.parentType]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.parentType = new core_1.ComplexTypeNumberPropertyField('ParentType', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.type]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.type = new core_1.ComplexTypeEnumPropertyField('Type', _this);
/**
* Representation of the [[ReportLayoutItem.visible]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.visible = new core_1.ComplexTypeEnumPropertyField('Visible', _this);
/**
* Representation of the [[ReportLayoutItem.suppressZeros]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.suppressZeros = new core_1.ComplexTypeEnumPropertyField('SuppressZeros', _this);
/**
* Representation of the [[ReportLayoutItem.left]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.left = new core_1.ComplexTypeNumberPropertyField('Left', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.top]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.top = new core_1.ComplexTypeNumberPropertyField('Top', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.width]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.width = new core_1.ComplexTypeNumberPropertyField('Width', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.height]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.height = new core_1.ComplexTypeNumberPropertyField('Height', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.leftMargin]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.leftMargin = new core_1.ComplexTypeNumberPropertyField('LeftMargin', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.rightMargin]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.rightMargin = new core_1.ComplexTypeNumberPropertyField('RightMargin', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.topMargin]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.topMargin = new core_1.ComplexTypeNumberPropertyField('TopMargin', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.bottomMargin]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.bottomMargin = new core_1.ComplexTypeNumberPropertyField('BottomMargin', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.leftBorderLineThickness]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.leftBorderLineThickness = new core_1.ComplexTypeNumberPropertyField('LeftBorderLineThickness', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.rightBorderLineThickness]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.rightBorderLineThickness = new core_1.ComplexTypeNumberPropertyField('RightBorderLineThickness', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.topBorderLineThickness]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.topBorderLineThickness = new core_1.ComplexTypeNumberPropertyField('TopBorderLineThickness', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.bottomBorderLineThickness]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.bottomBorderLineThickness = new core_1.ComplexTypeNumberPropertyField('BottomBorderLineThickness', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.shadowThickness]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.shadowThickness = new core_1.ComplexTypeNumberPropertyField('ShadowThickness', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.backgroundRed]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.backgroundRed = new core_1.ComplexTypeNumberPropertyField('BackgroundRed', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.backgroundGreen]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.backgroundGreen = new core_1.ComplexTypeNumberPropertyField('BackgroundGreen', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.backgroundBlue]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.backgroundBlue = new core_1.ComplexTypeNumberPropertyField('BackgroundBlue', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.textRed]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.textRed = new core_1.ComplexTypeNumberPropertyField('TextRed', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.textGreen]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.textGreen = new core_1.ComplexTypeNumberPropertyField('TextGreen', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.textBlue]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.textBlue = new core_1.ComplexTypeNumberPropertyField('TextBlue', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.highlightRed]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.highlightRed = new core_1.ComplexTypeNumberPropertyField('HighlightRed', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.highlightGreen]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.highlightGreen = new core_1.ComplexTypeNumberPropertyField('HighlightGreen', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.highlightBlue]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.highlightBlue = new core_1.ComplexTypeNumberPropertyField('HighlightBlue', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.borderRed]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.borderRed = new core_1.ComplexTypeNumberPropertyField('BorderRed', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.borderGreen]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.borderGreen = new core_1.ComplexTypeNumberPropertyField('BorderGreen', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.borderBlue]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.borderBlue = new core_1.ComplexTypeNumberPropertyField('BorderBlue', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.groupNumber]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.groupNumber = new core_1.ComplexTypeNumberPropertyField('GroupNumber', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.fontName]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.fontName = new core_1.ComplexTypeStringPropertyField('FontName', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.fontSize]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.fontSize = new core_1.ComplexTypeNumberPropertyField('FontSize', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.textStyle]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.textStyle = new core_1.ComplexTypeNumberPropertyField('TextStyle', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.horizontalAlignment]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.horizontalAlignment = new core_1.ComplexTypeEnumPropertyField('HorizontalAlignment', _this);
/**
* Representation of the [[ReportLayoutItem.lineBreak]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.lineBreak = new core_1.ComplexTypeEnumPropertyField('LineBreak', _this);
/**
* Representation of the [[ReportLayoutItem.pictureSize]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.pictureSize = new core_1.ComplexTypeEnumPropertyField('PictureSize', _this);
/**
* Representation of the [[ReportLayoutItem.dataSource]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.dataSource = new core_1.ComplexTypeEnumPropertyField('DataSource', _this);
/**
* Representation of the [[ReportLayoutItem.string]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.string = new core_1.ComplexTypeStringPropertyField('String', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.variableNumber]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.variableNumber = new core_1.ComplexTypeNumberPropertyField('VariableNumber', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.tableName]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.tableName = new core_1.ComplexTypeStringPropertyField('TableName', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.fieldName]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.fieldName = new core_1.ComplexTypeStringPropertyField('FieldName', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.displayDescription]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.displayDescription = new core_1.ComplexTypeEnumPropertyField('DisplayDescription', _this);
/**
* Representation of the [[ReportLayoutItem.editable]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.editable = new core_1.ComplexTypeNumberPropertyField('Editable', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.itemNumber]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.itemNumber = new core_1.ComplexTypeNumberPropertyField('ItemNumber', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.verticalAlignment]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.verticalAlignment = new core_1.ComplexTypeEnumPropertyField('VerticalAlignment', _this);
/**
* Representation of the [[ReportLayoutItem.sortLevel]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.sortLevel = new core_1.ComplexTypeNumberPropertyField('SortLevel', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.reverseSort]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.reverseSort = new core_1.ComplexTypeEnumPropertyField('ReverseSort', _this);
/**
* Representation of the [[ReportLayoutItem.sortType]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.sortType = new core_1.ComplexTypeEnumPropertyField('SortType', _this);
/**
* Representation of the [[ReportLayoutItem.unique]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.unique = new core_1.ComplexTypeEnumPropertyField('Unique', _this);
/**
* Representation of the [[ReportLayoutItem.setAsGroup]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.setAsGroup = new core_1.ComplexTypeEnumPropertyField('SetAsGroup', _this);
/**
* Representation of the [[ReportLayoutItem.newPage]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.newPage = new core_1.ComplexTypeEnumPropertyField('NewPage', _this);
/**
* Representation of the [[ReportLayoutItem.printAsBarCode]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.printAsBarCode = new core_1.ComplexTypeEnumPropertyField('PrintAsBarCode', _this);
/**
* Representation of the [[ReportLayoutItem.linkToField]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.linkToField = new core_1.ComplexTypeStringPropertyField('LinkToField', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.barCodeStandard]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.barCodeStandard = new core_1.ComplexTypeEnumPropertyField('BarCodeStandard', _this);
/**
* Representation of the [[ReportLayoutItem.displayTotalAsAWord]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.displayTotalAsAWord = new core_1.ComplexTypeEnumPropertyField('DisplayTotalAsAWord', _this);
/**
* Representation of the [[ReportLayoutItem.blockFontChange]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.blockFontChange = new core_1.ComplexTypeEnumPropertyField('BlockFontChange', _this);
/**
* Representation of the [[ReportLayoutItem.parentIndex]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.parentIndex = new core_1.ComplexTypeNumberPropertyField('ParentIndex', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.itemIndex]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.itemIndex = new core_1.ComplexTypeNumberPropertyField('ItemIndex', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.stringLength]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.stringLength = new core_1.ComplexTypeNumberPropertyField('StringLength', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.stringFiller]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.stringFiller = new core_1.ComplexTypeStringPropertyField('StringFiller', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.relateToField]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.relateToField = new core_1.ComplexTypeStringPropertyField('RelateToField', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.nextSegmentItemNumber]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.nextSegmentItemNumber = new core_1.ComplexTypeStringPropertyField('NextSegmentItemNumber', _this, 'Edm.String');
/**
* Representation of the [[ReportLayoutItem.heightAdjustments]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.heightAdjustments = new core_1.ComplexTypeEnumPropertyField('HeightAdjustments', _this);
/**
* Representation of the [[ReportLayoutItem.duplicateRepetitiveArea]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.duplicateRepetitiveArea = new core_1.ComplexTypeEnumPropertyField('DuplicateRepetitiveArea', _this);
/**
* Representation of the [[ReportLayoutItem.numberOfLinesInRepetitiveArea]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.numberOfLinesInRepetitiveArea = new core_1.ComplexTypeNumberPropertyField('NumberOfLinesInRepetitiveArea', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.distanceToRepetitiveDuplicate]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.distanceToRepetitiveDuplicate = new core_1.ComplexTypeNumberPropertyField('DistanceToRepetitiveDuplicate', _this, 'Edm.Int32');
/**
* Representation of the [[ReportLayoutItem.hideRepetitiveAreaIfEmpty]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.hideRepetitiveAreaIfEmpty = new core_1.ComplexTypeEnumPropertyField('HideRepetitiveAreaIfEmpty', _this);
/**
* Representation of the [[ReportLayoutItem.displayRepetitiveAreaFooterOnAllPages]] property for query construction.
* Use to reference this property in query operations such as 'filter' in the fluent request API.
*/
_this.displayRepetitiveAreaFooterOnAllPages = new core_1.ComplexTypeEnumPropertyField('DisplayRepetitiveAreaFooterOnAllPages', _this);
return _this;
}
return ReportLayoutItemField;
}(core_1.ComplexTypeField));
exports.ReportLayoutItemField = ReportLayoutItemField;
var ReportLayoutItem;
(function (ReportLayoutItem) {
/**
* Metadata information on all properties of the `ReportLayoutItem` complex type.
*/
ReportLayoutItem._propertyMetadata = [{
originalName: 'FieldIdentifier',
name: 'fieldIdentifier',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'ParentType',
name: 'parentType',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'Type',
name: 'type',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'Visible',
name: 'visible',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'SuppressZeros',
name: 'suppressZeros',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'Left',
name: 'left',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'Top',
name: 'top',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'Width',
name: 'width',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'Height',
name: 'height',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'LeftMargin',
name: 'leftMargin',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'RightMargin',
name: 'rightMargin',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TopMargin',
name: 'topMargin',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BottomMargin',
name: 'bottomMargin',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'LeftBorderLineThickness',
name: 'leftBorderLineThickness',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'RightBorderLineThickness',
name: 'rightBorderLineThickness',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TopBorderLineThickness',
name: 'topBorderLineThickness',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BottomBorderLineThickness',
name: 'bottomBorderLineThickness',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'ShadowThickness',
name: 'shadowThickness',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BackgroundRed',
name: 'backgroundRed',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BackgroundGreen',
name: 'backgroundGreen',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BackgroundBlue',
name: 'backgroundBlue',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TextRed',
name: 'textRed',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TextGreen',
name: 'textGreen',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TextBlue',
name: 'textBlue',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'HighlightRed',
name: 'highlightRed',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'HighlightGreen',
name: 'highlightGreen',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'HighlightBlue',
name: 'highlightBlue',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BorderRed',
name: 'borderRed',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BorderGreen',
name: 'borderGreen',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'BorderBlue',
name: 'borderBlue',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'GroupNumber',
name: 'groupNumber',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'FontName',
name: 'fontName',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'FontSize',
name: 'fontSize',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TextStyle',
name: 'textStyle',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'HorizontalAlignment',
name: 'horizontalAlignment',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'LineBreak',
name: 'lineBreak',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'PictureSize',
name: 'pictureSize',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'DataSource',
name: 'dataSource',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'String',
name: 'string',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'VariableNumber',
name: 'variableNumber',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'TableName',
name: 'tableName',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'FieldName',
name: 'fieldName',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'DisplayDescription',
name: 'displayDescription',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'Editable',
name: 'editable',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'ItemNumber',
name: 'itemNumber',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'VerticalAlignment',
name: 'verticalAlignment',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'SortLevel',
name: 'sortLevel',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'ReverseSort',
name: 'reverseSort',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'SortType',
name: 'sortType',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'Unique',
name: 'unique',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'SetAsGroup',
name: 'setAsGroup',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'NewPage',
name: 'newPage',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'PrintAsBarCode',
name: 'printAsBarCode',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'LinkToField',
name: 'linkToField',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'BarCodeStandard',
name: 'barCodeStandard',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'DisplayTotalAsAWord',
name: 'displayTotalAsAWord',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'BlockFontChange',
name: 'blockFontChange',
type: 'Edm.Enum', | isCollection: false
}, {
originalName: 'ParentIndex',
name: 'parentIndex',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'ItemIndex',
name: 'itemIndex',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'StringLength',
name: 'stringLength',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'StringFiller',
name: 'stringFiller',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'RelateToField',
name: 'relateToField',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'NextSegmentItemNumber',
name: 'nextSegmentItemNumber',
type: 'Edm.String',
isCollection: false
}, {
originalName: 'HeightAdjustments',
name: 'heightAdjustments',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'DuplicateRepetitiveArea',
name: 'duplicateRepetitiveArea',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'NumberOfLinesInRepetitiveArea',
name: 'numberOfLinesInRepetitiveArea',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'DistanceToRepetitiveDuplicate',
name: 'distanceToRepetitiveDuplicate',
type: 'Edm.Int32',
isCollection: false
}, {
originalName: 'HideRepetitiveAreaIfEmpty',
name: 'hideRepetitiveAreaIfEmpty',
type: 'Edm.Enum',
isCollection: false
}, {
originalName: 'DisplayRepetitiveAreaFooterOnAllPages',
name: 'displayRepetitiveAreaFooterOnAllPages',
type: 'Edm.Enum',
isCollection: false
}];
/**
* @deprecated Since v1.25.0. Use `deserializeComplexTypeV2` or `deserializeComplexTypeV4` of the `@sap-cloud-sdk/core` package instead.
*/
function build(json) {
return core_1.deserializeComplexTypeV4(json, ReportLayoutItem);
}
ReportLayoutItem.build = build;
})(ReportLayoutItem = exports.ReportLayoutItem || (exports.ReportLayoutItem = {}));
//# sourceMappingURL=ReportLayoutItem.js.map | |
helpers.py | from typing import Union
import math
AbstractText = Union[int, bytes]
def | (i: int) -> int:
"""Returns the minimal amount of bytes needed to represent unsigned integer `i`."""
# we need to add 1 to correct the fact that a byte can only go up to 255, instead of 256:
# i.e math.log(0x100, 0x100) = 1 but needs 2 bytes
return math.ceil(math.log(i + 1, 0x100))
def bit_length(i: int) -> int:
"""Returns the minimal amount of bits needed to represent unsigned integer `i`."""
return math.ceil(math.log(i + 1, 2))
def int_to_bytes(i: int, length: int=-1) -> bytes:
"""Converts integer to a MSB-first byte sequence using the least amount of bytes possible"""
return i.to_bytes(byte_length(i) if length == -1 else length, "big")
def bytes_to_int(b: bytes) -> int:
"""Converts MSB-first byte sequence to an integer"""
return int.from_bytes(b, "big")
| byte_length |
compare.py | # compare contents of two files in binary form
import sys | with open(srcFile,"rb") as src:
srcData = src.read()
with open(destFile,"rb") as dest:
destData = dest.read()
checked = False
if(len(srcData)!=len(destData)):
print("It unequal between ",srcFile,destFile,". The file size is different")
checked = True
for i in range(min(len(srcData),len(destData))):
if(srcData[i] != destData[i]):
print("unequal index:%d, modleDatata:%d, flashData:%d " % (i,srcData[i],destData[i]))
checked = True
if checked:
print('Check Result: unequal')
else:
print('Check Result: equal')
def main():
if(len(sys.argv) !=3 ):
print('Wrong parameters,need two files')
return
compareFile(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main() |
def compareFile(srcFile,destFile): |
loan_estimator.py | # This file is to get a rough estimation of how much you need to pay or how many months you need to pay for a loan
import pandas as pd
import numpy as np
from IPython.display import display
def group(number):
"""show money in laks and crores (indian way of presenting money)"""
s = '%d' % number
groups = []
groups.append(s[-3:])
s = s[:-3]
while s and s[-1].isdigit():
groups.append(s[-2:])
s = s[:-2]
return s + ','.join(reversed(groups))
class loan:
def __init__(self, R=8.1, principal=30, years=5):
"""R is yearly interest
principal is principal amount in lakhs
years = number of years
"""
self.R = R * 0.01
self.r = R * 0.01 * (1 / 12)
self.principal = principal * 100000
self.years = years
self.num_months = self.years * 12
self.months = {"Jan": 31, "Feb": 28, "Mar": 31, "Apr": 30, "May": 31, "June": 30, "Jul": 31, "Aug": 31,
"Sep": 30, "Oct": 31, "Nov": 30, "Dec": 31}
def find_monthly_emi_flat(self, print_=True):
""" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat"""
total = self.principal * (1 + self.R * (self.num_months / 12))
if print_:
print("------------- flat interest -------------------")
print("total amount you are paying over full period:", total)
print("monthly installment/emi : {}".format(total / self.num_months))
return total, total / self.num_months
def num_months_emi_diminishing(self, emi, principal=0, interest=0, print_=True):
"""find the number of months you need to pay for, if you are paying emi every month"""
"""emi is in rupees, principal is in lakhs, interest is yearly interest"""
"""n = np.log((E/r)/(E/r -P))/np.log(1+r) """
if not principal:
principal = self.principal
if not interest:
interest = self.r
num_months = np.log((emi / interest) / (emi / interest - principal)) / np.log(1 + interest)
if print_:
print("------------- diminishing interest -------------------")
print("you need to pay {} monthly, for {} months".format(emi, num_months))
return num_months
def find_monthly_emi_diminishing(self, num_months=0, principal=0, print_=True):
""" find how much emi need to be paid given some principal, interest, and number of months when the interest scheme is flat"""
"""P*r*(1 + 1/(np.power(1+r,60)-1))"""
if not num_months:
num_months = self.num_months
if not principal:
principal = self.principal
else:
principal *= 100000
monthly_emi = principal * self.r * (1 + 1 / (np.power(1 + self.r, num_months) - 1))
if print_:
print("------------- diminishing interest -------------------")
print(" you need to pay {} monthly, for {} months".format(monthly_emi, num_months))
print("total amount you will pay over full period is roughly {}".format(monthly_emi * num_months))
return monthly_emi
def confirm_diminishing(self, emi, print_=False):
""" function to confirm if the interest scheme is dimishing"""
principal = self.principal
i = 1
while principal > 0:
principal += ((self.r) * principal - emi)
if print_:
print(i, principal)
i += 1
if abs(principal / self.principal) < 0.001:
print("final net amount is {} after {} months".format(principal, i - 1))
return principal, i
## Usage
R = 10.5 #10.5 % monthly interest rate
principal = 30 # principal is 30 lakhs
years = 4.5 # loan term period is 4.5 years
loan1 = loan(R,principal,years) # initialize a loan instance
loan1.find_monthly_emi_flat()
loan1.num_months_emi_diminishing(35000)
loan1.find_monthly_emi_diminishing()
#-----------output-----------------------
# ------------- flat interest -------------------
# total amount you are paying over full period: 4417500.0
# monthly installment/emi : 81805.55555555556
# ------------- diminishing interest -------------------
# you need to pay 35000 monthly, for 159.1257820098328 months
# ------------- diminishing interest -------------------
# you need to pay 69948.58010333449 monthly, for 54.0 months
# total amount you will pay over full period is roughly 3777223.3255800623
def get_df():
# make a table to find how much emi to be paid for different principals over different tenure/periods
loan1 = loan(10.5,principal = 30, years =5)
# print(loan1.find_monthly_emi_diminishing())
years = [2,3,4,5]
amounts = [15,20,25]
yearss = [str(x)+'y' for x in years]
df = pd.DataFrame(columns=yearss)
total = pd.DataFrame(columns = yearss)
for amount in amounts:
arr=[]
arr1 = []
for year in years:
temp = loan1.find_monthly_emi_diminishing(num_months=year*12, principal=amount,print_ = False)
arr.append(group(round(int(temp),-2))) # rounding to closest hundred
arr1.append(group(round(int(temp*year*12),-2)))
df.loc[str(amount)+'Lks']=arr
total.loc[str(amount)+'Lks']=arr1
print("--------------------- emi ------------------")
display(df)
print("---------------------- total ---------------------") | display(total)
# get_df() |
|
shop_feature.rs | use crate::{
features::{GeoTile, GeoTileProperties, GeoTilesDataStructure, Geometry, ShopType},
operations::{line_string_operations::draw_line, address_from_properties, property_to_option_string},
};
use osm_geo_mapper_macros::{ extract_type_from_string, geotile_from_properties };
use paste::paste; // Required for the extract_type_from_string macro.
use geo_types as gt;
use log::warn;
use std::sync::Arc;
pub fn get_shop_geo_tile(props: &GeoTileProperties, geometry: Geometry) -> GeoTile |
pub fn draw_shop_line_string(
geo_tile: Arc<GeoTile>,
data_structure: GeoTilesDataStructure,
_shop_type: ShopType,
line_string: gt::LineString<f64>,
) {
let points = line_string.into_points();
let mut first_iteration = true;
let mut last_point = points[0];
for point in points {
if first_iteration {
first_iteration = false;
continue;
}
draw_line(&last_point, &point, 1, geo_tile.clone(), data_structure.clone());
last_point = point;
}
}
| {
let shop_type_str = props["shop"].as_str().unwrap();
let shop_type = extract_type_from_string!(shop_type_str<props> => ShopType [Agrarian, Alcohol, Anime, Antiques, Appliance, Art, Atv, BabyGoods, Bag, Bakery, BathroomFurnishing, Beauty, Bed, Beverages, Bicycle, Boat, Bookmaker, Books, Boutique, BrewingSupplies, Butcher, Camera, Candles, Cannabis, Car, Caravan, CarParts, Carpet, CarRepair, Charity, Cheese, Chemist, Chocolate, Clothes, Coffee, Collector, Computer, Confectionery, Convenience, Copyshop, Cosmetics, Craft, Curtain, Dairy, Deli, DepartmentStore, Doityourself, Doors, Drugstore, DryCleaning, ECigarette, Electrical, Electronics, Energy, Erotic, Fabric, Farm, Fashion, FashionAccessories, Fireplace, Fishing, Flooring, Florist, Frame, FrozenFood, Fuel, FuneralDirectors, Furniture, Games, GardenCentre, GardenFurniture, Gas, General, Gift, Glaziery, Golf, Greengrocer, Groundskeeping, Hairdresser, HairdresserSupply, Hardware, HealthFood, HearingAids, Herbalist, Hifi, HouseholdLinen, Houseware, Hunting, IceCream, InteriorDecoration, Jetski, Jewelry, Kiosk, Kitchen, Lamps, Laundry, Leather, Lighting, Locksmith, Lottery, Mall, Massage, MedicalSupply, MilitarySurplus, MobilePhone, Model, MoneyLender, Motorcycle, Music, MusicalInstrument, Newsagent, NutritionSupplements, Optician, Organic, Outdoor, Outpost, Paint, Party, Pasta, Pastry, Pawnbroker, Perfumery, PestControl, Pet, PetGrooming, Photo, Pyrotechnics, Radiotechnics, Religion, ScubaDiving, Seafood, SecondHand, Security, Sewing, Shoes, Ski, Snowmobile, Spices, Sports, Stationery, StorageRental, Supermarket, SwimmingPool, Tailor, Tattoo, Tea, Ticket, Tiles, Tobacco, Toys, Trade, Trailer, TravelAgency, Trophy, Tyres, Unclassified, User, Vacant, VacuumCleaner, VarietyStore, Video, VideoGames, Watches, Water, Weapons, Wholesale, WindowBlind, Windows, Wine, Wool]);
geotile_from_properties!(geometry<props> => Shop<shop_type> [agrarian, alcohol, authorization, bakehouse, beauty, books, branch, brand, brewery, bulk_purchase, butcher, cash_withdrawal, clothes, coffee, collector, cuisine, delivery, denomination, description, diet, distillery, drink, dry_cleaning, email, fair_trade, female, fuel, furniture, ice_cream, industrial, laundry_service, lgbtq, licensed, lottery, male, massage, medical_supply, membership, min_age, music, music_genre, musical_instrument, name, opening_hours, operator, organic, origin, oven, ownership, parts, payment, pet, phone, produce, product, religion, rental, repair, reservation, sales, salt, second_hand, self_service, service, shoes, stamps, tobacco, trade, unisex, vending, video_games, website, wheelchair, wholesale, winery]);
} |
testing.py | import bagel
import numpy as np
from sklearn.metrics import precision_recall_curve
from typing import Sequence, Tuple, Dict, Optional
def _adjust_scores(labels: np.ndarray,
scores: np.ndarray,
delay: Optional[int] = None,
inplace: bool = False) -> np.ndarray:
if np.shape(scores) != np.shape(labels):
raise ValueError('`labels` and `scores` must have same shape')
if delay is None:
delay = len(scores)
splits = np.where(labels[1:] != labels[:-1])[0] + 1
is_anomaly = labels[0] == 1
adjusted_scores = np.copy(scores) if not inplace else scores
pos = 0
for part in splits:
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: ptr] = np.max(adjusted_scores[pos: ptr])
adjusted_scores[ptr: part] = np.maximum(adjusted_scores[ptr: part], adjusted_scores[pos])
is_anomaly = not is_anomaly
pos = part
part = len(labels)
if is_anomaly:
ptr = min(pos + delay + 1, part)
adjusted_scores[pos: part] = np.max(adjusted_scores[pos: ptr])
return adjusted_scores
def | (series_list: Sequence, missing: np.ndarray) -> Tuple[np.ndarray, ...]:
ret = []
for series in series_list:
series = np.copy(series)
ret.append(series[missing != 1])
return tuple(ret)
def _best_f1score(labels: np.ndarray, scores: np.ndarray) -> Tuple[float, float, float, float]:
precision, recall, thresholds = precision_recall_curve(y_true=labels, probas_pred=scores)
f1score = 2 * precision * recall / np.clip(precision + recall, a_min=1e-8, a_max=None)
best_threshold = thresholds[np.argmax(f1score)]
best_precision = precision[np.argmax(f1score)]
best_recall = recall[np.argmax(f1score)]
return best_threshold, best_precision, best_recall, np.max(f1score)
def get_test_results(labels: np.ndarray,
scores: np.ndarray,
missing: np.ndarray,
window_size: int,
delay: Optional[int] = None) -> Dict:
labels = labels[window_size - 1:]
scores = scores[window_size - 1:]
missing = missing[window_size - 1:]
adjusted_scores = _adjust_scores(labels=labels, scores=scores, delay=delay)
adjusted_labels, adjusted_scores = _ignore_missing([labels, adjusted_scores], missing=missing)
threshold, precision, recall, f1score = _best_f1score(labels=adjusted_labels, scores=adjusted_scores)
return {'threshold': threshold,
'precision': precision,
'recall': recall,
'f1score': f1score}
class KPIStats:
def __init__(self, kpi: bagel.data.KPI):
self.num_points = len(kpi.values)
self.num_missing = len(kpi.missing[kpi.missing == 1])
self.num_anomaly = len(kpi.labels[kpi.labels == 1])
self.missing_rate = self.num_missing / self.num_points
self.anomaly_rate = self.num_anomaly / self.num_points
def get_kpi_stats(*kpis: bagel.data.KPI) -> Tuple[KPIStats, ...]:
ret = []
for kpi in kpis:
ret.append(KPIStats(kpi))
return tuple(ret)
| _ignore_missing |
seedGenerator.go | package tryte_cipher
import (
"crypto/rand"
"encoding/base64"
"errors"
"math/big"
)
const letters = "9ABCDEFGHIJKLMNOPQRSTUVWXYZ" //pool of letters to generate IOTA seed
func RandomPassphraseGenerator(n int) (string, error) {
if n < 8 {
//longer passphrase is more secure enforce this
return "", errors.New("number of bytes cannot be less than 8")
}
b, err := GenerateRandomBytes(n)
return base64.URLEncoding.EncodeToString(b), err
}
// GenerateRandomBytes returns securely generated random bytes.
// It will return an error if the system's secure random
// number generator fails to function correctly, in which
// case the caller should not continue.
func | (n int) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return nil, err
}
return b, nil
}
// GenerateRandomSeed returns a securely generated string.
// It will return an error if a secure random int generator
// fails to function correctly
func GenerateRandomSeed() (string, error) {
ints, err := generateRandomInts(81)
if err != nil {
return "", err
}
token := make([]byte, 81)
for i, x := range ints {
token[i] = intToCharByte(x)
}
return string(token), nil
}
func generateRandomInts(n int) ([]int64, error) {
ints := make([]int64, n)
for i := range ints {
randomInt, err := rand.Int(rand.Reader, big.NewInt(27))
if err != nil {
return nil, err
}
ints[i] = randomInt.Int64()
}
return ints, nil
}
func intToCharByte(i int64) byte {
return letters[i]
}
| GenerateRandomBytes |
mdreplace_test.go | package main
import (
"strings"
"testing"
)
type exp struct {
name string
in string
ot *string
err error
}
// in these tests, the expected output is the same as the input
// if output is not specified (helps to test idempotency)
var tests = []exp{
{
name: "simple test",
in: "This is a test",
},
{
name: "code fence simple test",
in: `This is a test
` + "```json" + `
contains a code block
` + "```" + `
Something`,
},
{
name: "__TEMPLATE block simple",
in: `This is a test
<!-- __TEMPLATE: echo -n hello world
{{.}}
-->
hello world
<!-- END -->
Something`,
},
{
name: "__TEMPLATE block that references $DOLLAR",
in: `This is a test
<!-- __TEMPLATE: echo -n hello world $DOLLAR
{{.}}
-->
hello world $
<!-- END -->
Something`,
},
{
name: "__TEMPLATE block that contains a code fence block",
in: `This is a test
<!-- __TEMPLATE: echo -n hello world
` + "```" + `
{{.}}
` + "```" + `
-->
` + "```" + `
hello world
` + "```" + `
<!-- END -->
Something`,
},
{
name: "__TEMPLATE block quoted args",
in: `This is a test
<!-- __TEMPLATE: echo -en "hello world"
{{.}}
-->
hello world
<!-- END -->
Something`,
},
{
name: "__TEMPLATE block using lines func",
in: `This is a test
<!-- __TEMPLATE: echo -en "hello\nworld"
{{ range (lines .) -}}
{{.}}
{{end -}}
-->
hello
world
<!-- END -->
Something`,
},
{
name: "__JSON block simple",
in: `This is a test
<!-- __JSON: go list -json .
{{.ImportPath}}
-->
myitcv.io/cmd/mdreplace
<!-- END -->
Something`,
},
{
name: "__JSON block with bad original contents",
in: `This is a test
<!-- __JSON: go list -json .
{{.ImportPath}}
-->
rubbish
<!-- END -->
Something`,
ot: strVal(`This is a test
<!-- __JSON: go list -json .
{{.ImportPath}}
-->
myitcv.io/cmd/mdreplace
<!-- END -->
Something`),
},
{
name: "__TEMPLATE nested quoted string",
in: `<!-- __TEMPLATE: sh -c "BANANA=fruit; echo -n \"${DOLLAR}BANANA\""
{{.}}
-->
fruit
<!-- END -->
`,
},
}
func strVal(s string) *string |
func TestSimple(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
debugf(">>>>>>>>>\n")
in := strings.NewReader(test.in)
out := new(strings.Builder)
err := run(in, out)
if (test.err == nil) != (err == nil) {
t.Fatalf("unexpected error; wanted [%v]; got [%v]", test.err, err)
}
if test.err != nil && err != nil && test.err.Error() != err.Error() {
t.Fatalf("incorrect error; wanted [%v]; got [%v]", test.err, err)
}
expOut := test.in
if test.ot != nil {
expOut = *(test.ot)
}
if v := out.String(); v != expOut {
t.Fatalf("incorrect output; wanted:\n\n%q\n\ngot:\n\n%q\n", expOut, v)
}
})
}
}
| {
return &s
} |
index.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var TRANSACTION_TYPE;
(function (TRANSACTION_TYPE) {
TRANSACTION_TYPE[TRANSACTION_TYPE["GENESIS"] = 1] = "GENESIS";
TRANSACTION_TYPE[TRANSACTION_TYPE["PAYMENT"] = 2] = "PAYMENT";
TRANSACTION_TYPE[TRANSACTION_TYPE["ISSUE"] = 3] = "ISSUE";
TRANSACTION_TYPE[TRANSACTION_TYPE["TRANSFER"] = 4] = "TRANSFER";
TRANSACTION_TYPE[TRANSACTION_TYPE["REISSUE"] = 5] = "REISSUE"; | TRANSACTION_TYPE[TRANSACTION_TYPE["CANCEL_LEASE"] = 9] = "CANCEL_LEASE";
TRANSACTION_TYPE[TRANSACTION_TYPE["ALIAS"] = 10] = "ALIAS";
TRANSACTION_TYPE[TRANSACTION_TYPE["MASS_TRANSFER"] = 11] = "MASS_TRANSFER";
TRANSACTION_TYPE[TRANSACTION_TYPE["DATA"] = 12] = "DATA";
TRANSACTION_TYPE[TRANSACTION_TYPE["SET_SCRIPT"] = 13] = "SET_SCRIPT";
TRANSACTION_TYPE[TRANSACTION_TYPE["SPONSORSHIP"] = 14] = "SPONSORSHIP";
})(TRANSACTION_TYPE = exports.TRANSACTION_TYPE || (exports.TRANSACTION_TYPE = {}));
var DATA_FIELD_TYPE;
(function (DATA_FIELD_TYPE) {
DATA_FIELD_TYPE["INTEGER"] = "integer";
DATA_FIELD_TYPE["BOOLEAN"] = "boolean";
DATA_FIELD_TYPE["STRING"] = "string";
DATA_FIELD_TYPE["BINARY"] = "binary";
})(DATA_FIELD_TYPE = exports.DATA_FIELD_TYPE || (exports.DATA_FIELD_TYPE = {})); | TRANSACTION_TYPE[TRANSACTION_TYPE["BURN"] = 6] = "BURN";
TRANSACTION_TYPE[TRANSACTION_TYPE["EXCHANGE"] = 7] = "EXCHANGE";
TRANSACTION_TYPE[TRANSACTION_TYPE["LEASE"] = 8] = "LEASE"; |
safe-html.pipe.ts | import { Pipe, PipeTransform } from '@angular/core';
import {DomSanitizer} from "@angular/platform-browser";
@Pipe({
name: 'safeHtml'
})
export class | implements PipeTransform {
constructor(private sanitizer:DomSanitizer){}
// transform(value: any, ...args: any[]): any {
// return null;
// }
transform(html) {
return this.sanitizer.bypassSecurityTrustHtml(html);
}
}
| SafeHtmlPipe |
services.rs | use std::sync::Arc;
use crate::config::Config;
use crate::database::Database;
use crate::modules::auth::AuthService;
use crate::modules::post::{PostRepository, PostService};
use crate::modules::user::{UserRepository, UserService};
pub struct | {
pub auth: Arc<AuthService>,
pub post: Arc<PostService>,
pub user: Arc<UserService>,
}
impl Services {
pub fn new(config: &Config, database: Database) -> Self {
let database = Arc::new(database);
let user_repository = Arc::new(UserRepository::new(Arc::clone(&database)));
let user_service = Arc::new(UserService::new(Arc::clone(&user_repository)));
let post_repository = Arc::new(PostRepository::new(Arc::clone(&database)));
let post_service = Arc::new(PostService::new(post_repository));
let auth_service = Arc::new(AuthService::new(config, Arc::clone(&user_service)));
Self {
auth: auth_service,
post: post_service,
user: user_service,
}
}
}
| Services |
generational.rs | #![allow(unused)]
use super::naive_copying::GcState as Space;
use rt::Universe;
#[repr(C)]
pub struct GcState { | impl GcState {
pub unsafe fn new(major_size: usize) -> Self {
GcState {
minor: Space::new(30000), // 64 KB L1 d-cache
major: Space::new(major_size),
}
}
pub fn set_universe(&mut self, u: &Universe) {
self.minor.set_universe(u);
self.major.set_universe(u);
}
} | minor: Space,
major: Space,
}
|
hook-pygame.py | """
binaries hook for pygame seems to be required for pygame 2.0 Windows.
Otherwise some essential DLLs will not be transfered to the exe.
And also put hooks for datas, resources that pygame uses, to work
correctly with pyinstaller
"""
import os
import platform
from pygame import __file__ as pygame_main_file
# Get pygame's folder
pygame_folder = os.path.dirname(os.path.abspath(pygame_main_file))
# datas is the variable that pyinstaller looks for while processing hooks
datas = []
# exclude some unneeded binaries
exclude_bin = ('libFLAC-8', 'libfreetype-6', 'libjpeg-9', 'libmodplug-1', 'libmpg123-0', 'libogg-0', 'libopus-0',
'libopusfile-0', 'libpng16-16', 'libtiff-5', 'libvorbis-0', 'libvorbisfile-3', 'libwebp-7', 'portmidi',
'SDL2_image', 'SDL2_mixer', 'SDL2_ttf')
# A helper to append the relative path of a resource to hook variable - datas
def | (file_path):
global datas
res_path = os.path.join(pygame_folder, file_path)
if os.path.exists(res_path):
datas.append((res_path, "pygame"))
# First append the font file, then based on the OS, append pygame icon file
_append_to_datas("freesansbold.ttf")
if platform.system() == "Darwin":
_append_to_datas("pygame_icon.tiff")
else:
_append_to_datas("pygame_icon.bmp")
if platform.system() == "Windows":
from PyInstaller.utils.hooks import collect_dynamic_libs
pre_binaries = collect_dynamic_libs('pygame')
binaries = []
for b in pre_binaries:
binary, location = b
filename = os.path.split(binary)[-1]
if filename.removesuffix('.dll') in exclude_bin:
print('Custom pygame hook excluding binary:', filename)
continue
# settles all the DLLs into the top level folder, which prevents duplication
# with the DLLs already being put there.
binaries.append((binary, "."))
| _append_to_datas |
Final.min.py | import cv2
import numpy as np
import os
subjects = ["","Mama","Samin","Delwar"]
def detect_faces(colored_img, scaleFactor=1.06):
img_copy = colored_img.copy()
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
f_cascade = cv2.CascadeClassifier('data/lbpcascade_frontalface.xml')
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);
if len(faces) == 0:
return None, None
(x, y, w, h) = faces[0]
return gray[y:y+w, x:x+h], faces[0]
def prepare_training_data(data_folder_path):
dirs = os.listdir(data_folder_path)
faces = []
labels = []
| for dir_name in dirs:
if not dir_name.startswith("s"):
continue
label = int(dir_name.replace("s", ""))
subject_dir_path = data_folder_path + "/" + dir_name
subject_images_names = os.listdir(subject_dir_path)
for image_name in subject_images_names:
if image_name.startswith("."):
continue
image_path = subject_dir_path + "/" + image_name
image = cv2.imread(image_path)
cv2.imshow("Training on image...", cv2.resize(image, (400, 500)))
cv2.waitKey(10)
face, rect = detect_faces(image)
if face is not None:
faces.append(face)
labels.append(label)
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
print("Total faces: ", len(faces))
print("Total labels: ", len(labels))
return faces, labels
def trainData(trainingDataPath, output_path):
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
faces, labels = prepare_training_data(trainingDataPath)
face_recognizer.train(faces, np.array(labels))
face_recognizer.write(output_path)
def loadTrainedData(path):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(path)
return recognizer
def predictStaticImage(test_img,trainer_file):
img = test_img.copy()
face, rect = detect_faces(img)
lt = loadTrainedData(trainer_file)
label, confidence = lt.predict(face)
label_text = subjects[label]
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(img, label_text, (rect[0], rect[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
print("Confidence =",confidence)
return img
def showImage(image):
cv2.imshow('Frame', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def camToFile(framesToCapture,output_dir):
cam = cv2.VideoCapture(1)
detector = cv2.CascadeClassifier('data/haarcascade_frontalface_alt.xml')
sampleNum = 0
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face = detector.detectMultiScale(gray, 1.5, 5)
for (x, y, w, h) in face:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
sampleNum = sampleNum + 1
if sampleNum%(100/framesToCapture) == 0:
print("Frames Captured:", int(sampleNum/(100/framesToCapture)))
cv2.imwrite(output_dir+"/"+ str(int(sampleNum/(100/framesToCapture))) + ".jpg", gray[y:y + h, x:x + w])
cv2.imshow('frame', img)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif sampleNum >= 100:
break
def detectFace(trainer_file):
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(trainer_file)
faceCascade = cv2.CascadeClassifier("data/haarcascade_frontalface_alt.xml")
cam = cv2.VideoCapture(1)
font = cv2.FONT_HERSHEY_DUPLEX
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces:
nbr_predicted, conf = recognizer.predict(gray[y:y + h, x:x + w])
cv2.rectangle(im, (x - 50, y - 50), (x + w + 50, y + h + 50), (0, 225, 0), 2)
nbr_predicted = subjects[nbr_predicted]
cv2.putText(im, str(nbr_predicted), (x + 30, y + h + 30), font, 1, (0, 0, 225)) # Draw the text
cv2.imshow('FaceDetector', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
#trainData('training-data','test.yml')
detectFace('test.yml')
#showImage(predictStaticImage(cv2.imread("test-data/4.jpg"),'test3.yml'))
#camToFile(20,'training-data/s7') | |
log_test.go | // Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package middleware
import (
"errors"
"net/http"
"net/http/httptest"
"testing"
echo "github.com/labstack/echo/v4"
"github.com/smartystreets/assertions"
"go.thethings.network/lorawan-stack/v3/pkg/log"
"go.thethings.network/lorawan-stack/v3/pkg/util/test/assertions/should"
)
func errorHandler(c echo.Context) error {
return c.String(http.StatusInternalServerError, "500")
}
func r | c echo.Context) error {
return c.Redirect(http.StatusMovedPermanently, "/other")
}
func forwardMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Request().Header.Set("X-Forwarded-For", "/other")
return next(c)
}
}
func noopHandler(c echo.Context) error { return nil }
func invalidHandler(c echo.Context) error {
return errors.New("This handler throws an error")
}
func TestLogging(t *testing.T) {
logger, _ := log.NewLogger(log.WithHandler(log.NoopHandler))
messages := []log.Entry{}
// collect is a middleware that collects the messages
collect := log.MiddlewareFunc(func(next log.Handler) log.Handler {
return log.HandlerFunc(func(entry log.Entry) error {
messages = append(messages, entry)
return next.HandleLog(entry)
})
})
logger.Use(collect)
a := assertions.New(t)
e := echo.New()
// Test Logging middleware
{
handler := Log(logger)(handler)
{
req := httptest.NewRequest("GET", "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
err := handler(c)
a.So(err, should.BeNil)
}
fields := messages[0].Fields().Fields()
a.So(len(messages), should.Equal, 1)
a.So(messages[0].Message(), should.Equal, "Request handled")
a.So(messages[0].Level(), should.Equal, log.InfoLevel)
a.So(fields["method"], should.Equal, "GET")
a.So(fields["url"], should.Equal, "/")
a.So(fields["response_size"], should.Equal, 3)
a.So(fields["status"], should.Equal, 200)
a.So(fields, should.ContainKey, "duration")
a.So(fields, should.ContainKey, "remote_addr")
a.So(fields, should.ContainKey, "request_id")
a.So(fields, should.ContainKey, "response_size")
a.So(fields, should.NotContainKey, "redirect")
}
// Reset messages
messages = nil
// Test Logging middleware on error
{
handler := Log(logger)(errorHandler)
req := httptest.NewRequest("GET", "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
err := handler(c)
a.So(err, should.BeNil)
fields := messages[0].Fields().Fields()
a.So(len(messages), should.Equal, 1)
a.So(messages[0].Message(), should.Equal, "Request error")
a.So(messages[0].Level(), should.Equal, log.ErrorLevel)
a.So(fields["status"], should.Equal, 500)
}
// Reset messages
messages = nil
// Test Logging middleware on redirect
{
handler := Log(logger)(redirectHandler)
req := httptest.NewRequest("GET", "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
err := handler(c)
a.So(err, should.BeNil)
fields := messages[0].Fields().Fields()
a.So(len(messages), should.Equal, 1)
a.So(messages[0].Message(), should.Equal, "Request handled")
a.So(messages[0].Level(), should.Equal, log.InfoLevel)
a.So(fields, should.ContainKey, "location")
}
// Reset messages
messages = nil
// Test Logging middleware on forward
{
handler := forwardMiddleware(Log(logger)(noopHandler))
req := httptest.NewRequest("GET", "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
err := handler(c)
a.So(err, should.BeNil)
fields := messages[0].Fields().Fields()
a.So(len(messages), should.Equal, 1)
a.So(fields, should.ContainKey, "forwarded_for")
}
// Reset messages
messages = nil
// Test Logging middleware with invalid handler
{
handler := Log(logger)(invalidHandler)
req := httptest.NewRequest("GET", "/", nil)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
err := handler(c)
a.So(err, should.NotBeNil)
fields := messages[0].Fields().Fields()
a.So(len(messages), should.Equal, 1)
a.So(fields, should.ContainKey, "error")
}
}
| edirectHandler( |
storage_test.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"net"
"strings"
"testing"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
"k8s.io/apiserver/pkg/storage/storagebackend/factory"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/registry/core/service/allocator"
allocatorstore "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/registrytest"
"golang.org/x/net/context"
)
func newStorage(t *testing.T) (*etcdtesting.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface, factory.DestroyFunc) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
var backing allocator.Interface
storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
backing = mem
etcd := allocatorstore.NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), etcdStorage)
return etcd
})
s, d := generic.NewRawStorage(etcdStorage)
destroyFunc := func() {
d()
server.Terminate(t)
}
return server, storage, backing, s, destroyFunc
}
func validNewRangeAllocation() *api.RangeAllocation {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
return &api.RangeAllocation{
Range: cidr.String(),
}
}
func key() string {
return "/ranges/serviceips"
}
func TestEmpty(t *testing.T) |
func TestErrors(t *testing.T) {
_, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc()
err := storage.Allocate(net.ParseIP("192.168.0.0"))
if _, ok := err.(*ipallocator.ErrNotInRange); !ok {
t.Fatal(err)
}
}
func TestStore(t *testing.T) {
_, storage, backing, si, destroyFunc := newStorage(t)
defer destroyFunc()
if err := si.Create(context.TODO(), key(), validNewRangeAllocation(), nil, 0); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil {
t.Fatal(err)
}
ok, err := backing.Allocate(1)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Expected allocation to fail")
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err)
}
}
| {
_, storage, _, _, destroyFunc := newStorage(t)
defer destroyFunc()
if err := storage.Allocate(net.ParseIP("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocations at this time") {
t.Fatal(err)
}
} |
walk_test.go | package git
import (
"os"
"testing"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-billy.v4/memfs"
)
func TestWalkTree(t *testing.T) {
mockFs, countFiles := setupPopulatedFilesystem()
var discoveredFiles int
WalkTree(mockFs, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
discoveredFiles++
}
return nil
})
if discoveredFiles != countFiles {
t.Errorf("discovered files count %d does not equal expected files count %d", discoveredFiles, countFiles)
}
}
func TestWalkTreeEmptyTree(t *testing.T) {
mockFs := memfs.New()
err := WalkTree(mockFs, func(path string, info os.FileInfo, err error) error {
return err
})
if err == nil {
t.Error(err)
}
}
func | () (fs billy.Filesystem, countFiles int) {
fs = memfs.New()
fs.MkdirAll(".git/hooks", os.ModeDir)
fs.MkdirAll("dirA/dirB/dirC", os.ModeDir)
fs.MkdirAll("dirA/dirE/dirF", os.ModeDir)
fs.MkdirAll("dirA/dirE/dirG", os.ModeDir)
fs.MkdirAll("dirH/dirI", os.ModeDir)
fs.MkdirAll("dirJ", os.ModeDir)
fs.Create("dog.txt")
countFiles++
fs.Create("dirA/dog.txt")
countFiles++
fs.Create("dirA/dirB/dog.txt")
countFiles++
fs.Create("dirA/dirB/dirC/dog.txt")
countFiles++
fs.Create("dirA/dirB/dirC/cat.txt")
countFiles++
fs.Create("dirA/dirE/dog.txt")
countFiles++
fs.Create("dirA/dirE/dirF/dog.txt")
countFiles++
fs.Create("dirA/dirE/dirG/dog.txt")
countFiles++
fs.Create("dirH/dog.txt")
countFiles++
fs.Create("dirH/dirI/dog.txt")
countFiles++
return fs, countFiles
}
| setupPopulatedFilesystem |
connections.py | import socket
from six.moves.urllib.parse import urlparse
from frappe import get_conf
REDIS_KEYS = ("redis_cache", "redis_queue", "redis_socketio")
def is_open(ip, port, timeout=10):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except socket.error:
return False
finally:
s.close()
def check_database():
config = get_conf()
db_type = config.get("db_type", "mariadb")
db_host = config.get("db_host", "localhost")
db_port = config.get("db_port", 3306 if db_type == "mariadb" else 5432)
return {db_type: is_open(db_host, db_port)}
def | (redis_services=None):
config = get_conf()
services = redis_services or REDIS_KEYS
status = {}
for conn in services:
redis_url = urlparse(config.get(conn)).netloc
redis_host, redis_port = redis_url.split(":")
status[conn] = is_open(redis_host, redis_port)
return status
def check_connection(redis_services=None):
service_status = {}
service_status.update(check_database())
service_status.update(check_redis(redis_services))
return service_status
| check_redis |
test_modeling_flax_vit.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from .test_configuration_common import ConfigTester
from .test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class FlaxViTModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
image_size=30,
patch_size=2,
num_channels=3,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
type_sequence_label_size=10,
initializer_range=0.02,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = ViTConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, pixel_values
def create_and_check_model(self, config, pixel_values, labels):
model = FlaxViTModel(config=config)
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
pixel_values,
) = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class FlaxViTModelTest(FlaxModelTesterMixin, unittest.TestCase):
all_model_classes = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def setUp(self) -> None:
self.model_tester = FlaxViTModelTester(self)
self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
# We need to override this test because in ViT, the seq_len equals the number of patches + 1
# we compute that here
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
num_patches = (config.image_size // config.patch_size) ** 2
seq_length = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length, seq_length],
)
# We neeed to override this test because ViT's forward signature is different than text models.
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
# We neeed to override this test because ViT expects pixel_values instead of input_ids
@slow
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def | (pixel_values, **kwargs):
return model(pixel_values=pixel_values, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
# We need to override this test because in ViT, the seq_len equals the number of patches + 1
# we compute that here
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
num_patches = (config.image_size // config.patch_size) ** 2
seq_length = num_patches + 1 # we add 1 for the [CLS] token
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("google/vit-base-patch16-224")
outputs = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(outputs)
| model_jitted |
summary.rs | use {
super::*,
crate::*,
termimad::minimad::OwningTemplateExpander,
};
static SUMMARY_MD: &str = r#"
${hits} hits and ${bytes} from **${start}** to **${end}**
${filterings
Filtering by ${field} on pattern `${pattern}` removed **${removed_percent}** of total lines
}
${filtered-stats | }
"#;
pub fn print_summary(base: &LogBase, printer: &Printer) {
let mut expander = OwningTemplateExpander::new();
let total_bytes = base.unfiltered_histogram.total_bytes_sent();
expander
.set_md("hits", printer.md_hits(base.unfiltered_count as usize))
.set_md("bytes", printer.md_bytes(total_bytes))
.set("start", base.start_time())
.set("end", base.end_time());
if base.filterer.has_filters() {
let total_hits = base.unfiltered_count as f32;
for filtering in &base.filterer.filterings {
let removed = filtering.removed_count as f32;
let percent = format!("{:.2}%", 100f32 * removed / total_hits);
expander.sub("filterings")
.set("field", filtering.filter.field_name())
.set("pattern", &filtering.pattern)
.set("removed_percent", percent);
}
let filtered_bytes = base.filtered_histogram.total_bytes_sent();
expander.sub("filtered-stats")
.set_md("hits", printer.md_hits(base.filtered_count as usize))
.set_md("bytes", printer.md_bytes(filtered_bytes));
}
printer.print(expander, SUMMARY_MD);
} | **==>** hits: ${hits}, bytes sent: ${bytes} |
admin.py | from .models import Profile, Group
from django.contrib import admin | admin.site.register(Profile)
admin.site.register(Group) | |
conf.py | # -*- coding: utf-8 -*-
#
# VPP test framework documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 13 08:45:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import subprocess
from datetime import date
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
autodoc_mock_imports = ['objgraph',
'parameterized',
'pexpect',
'psutil',
'pympler',
'scapy',
'syslog_rfc5424_parser',
'vpp_papi']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VPP test framework'
copyright = f'{date.today().year}, FD.io VPP team'
author = u'FD.io VPP team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
version = f'{output.stdout.decode("utf-8")}'
# The full version, including alpha/beta/rc tags.
release = f'{output.stdout.decode("utf-8")}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'VPP test framework v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
| # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'VPPtestframeworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
u'VPP team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'vpptestframework', u'VPP test framework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'VPPtestframework', u'VPP test framework Documentation',
author, 'VPPtestframework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False | # If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
|
VColorPickerSwatches.js | "use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = void 0;
require("../../../src/components/VColorPicker/VColorPickerSwatches.sass");
var _VIcon = _interopRequireDefault(require("../VIcon"));
var _colors = _interopRequireDefault(require("../../util/colors"));
var _util = require("./util");
var _helpers = require("../../util/helpers");
var _mixins = _interopRequireDefault(require("../../util/mixins"));
var _themeable = _interopRequireDefault(require("../../mixins/themeable"));
var _colorUtils = require("../../util/colorUtils");
function | (obj) { return obj && obj.__esModule ? obj : { default: obj }; }
// Styles
// Components
// Helpers
function parseDefaultColors(colors) {
return Object.keys(colors).map(function (key) {
var color = colors[key];
return color.base ? [color.base, color.darken4, color.darken3, color.darken2, color.darken1, color.lighten1, color.lighten2, color.lighten3, color.lighten4, color.lighten5] : [color.black, color.white, color.transparent];
});
}
var white = (0, _util.fromHex)('#FFFFFF').rgba;
var black = (0, _util.fromHex)('#000000').rgba;
var _default2 = (0, _mixins.default)(_themeable.default).extend({
name: 'v-color-picker-swatches',
props: {
swatches: {
type: Array,
default: function _default() {
return parseDefaultColors(_colors.default);
}
},
color: Object,
maxWidth: [Number, String],
maxHeight: [Number, String]
},
methods: {
genColor: function genColor(color) {
var _this = this;
var content = this.$createElement('div', {
style: {
background: color
}
}, [(0, _helpers.deepEqual)(this.color, (0, _util.parseColor)(color, null)) && this.$createElement(_VIcon.default, {
props: {
small: true,
dark: (0, _colorUtils.contrastRatio)(this.color.rgba, white) > 2 && this.color.alpha > 0.5,
light: (0, _colorUtils.contrastRatio)(this.color.rgba, black) > 2 && this.color.alpha > 0.5
}
}, '$success')]);
return this.$createElement('div', {
staticClass: 'v-color-picker__color',
on: {
// TODO: Less hacky way of catching transparent
click: function click() {
return _this.$emit('update:color', (0, _util.fromHex)(color === 'transparent' ? '#00000000' : color));
}
}
}, [content]);
},
genSwatches: function genSwatches() {
var _this2 = this;
return this.swatches.map(function (swatch) {
var colors = swatch.map(_this2.genColor);
return _this2.$createElement('div', {
staticClass: 'v-color-picker__swatch'
}, colors);
});
}
},
render: function render(h) {
return h('div', {
staticClass: 'v-color-picker__swatches',
style: {
maxWidth: (0, _helpers.convertToUnit)(this.maxWidth),
maxHeight: (0, _helpers.convertToUnit)(this.maxHeight)
}
}, [this.$createElement('div', this.genSwatches())]);
}
});
exports.default = _default2;
//# sourceMappingURL=VColorPickerSwatches.js.map | _interopRequireDefault |
main.go | package main
import (
"fmt"
"io"
"os"
)
func main() | {
var length int
buffer := make([]byte, 16000000)
if len(os.Args) == 1 {
for {
size, errorObject := os.Stdin.Read(buffer)
length += size
if errorObject == io.EOF {
break
} else if errorObject != nil {
fmt.Println(errorObject)
os.Exit(1)
}
}
} else {
os.Args = os.Args[1:]
for _, argument := range os.Args {
file, errorObject := os.Open(argument)
if errorObject != nil {
fmt.Println(errorObject)
os.Exit(1)
}
for {
size, errorObject := file.Read(buffer)
length += size
if errorObject == io.EOF {
break
} else if errorObject != nil {
fmt.Println(errorObject)
os.Exit(1)
}
}
file.Close()
}
}
fmt.Println(length)
} |
|
stateHistoryPlugin.ts | import { pairwise } from 'rxjs/operators';
import { toBoolean } from '../../toBoolean';
import { AkitaPlugin, Queries } from '../plugin';
import { EntityParam } from '../entityCollectionPlugin';
import { logAction } from '../../actions';
export interface StateHistoryParams {
maxAge?: number;
comparator?: (prevState, currentState) => boolean;
}
export class StateHistoryPlugin<E = any, S = any> extends AkitaPlugin<E, S> {
/** Allow skipping an update from outside */
private skip = false;
private history = {
past: [],
present: null,
future: []
};
/** Skip the update when redo/undo */
private skipUpdate = false;
private subscription;
constructor(protected query: Queries<E, S>, private params: StateHistoryParams = {}, private _entityId?: EntityParam) {
super(query, {
resetFn: () => this.clear()
});
params.maxAge = toBoolean(params.maxAge) ? params.maxAge : 10;
params.comparator = params.comparator || ((prev, current) => true);
this.activate();
}
get hasPast() {
return this.history.past.length > 0;
}
get hasFuture() {
return this.history.future.length > 0;
}
activate() {
this.history.present = this.getSource(this._entityId);
this.subscription = this.selectSource(this._entityId)
.pipe(pairwise())
.subscribe(([past, present]) => {
if (this.skip) {
this.skip = false;
return;
}
/**
* comparator: (prev, current) => isEqual(prev, current) === false
*/
const shouldUpdate = this.params.comparator(past, present);
if (!this.skipUpdate && shouldUpdate) {
if (this.history.past.length === this.params.maxAge) {
this.history.past = this.history.past.slice(1);
}
this.history.past = [...this.history.past, past];
this.history.present = present;
}
});
}
undo() {
if (this.history.past.length > 0) {
const { past, present } = this.history;
const previous = past[past.length - 1];
this.history.past = past.slice(0, past.length - 1);
this.history.present = previous;
this.history.future = [present, ...this.history.future];
this.update();
}
}
redo() {
if (this.history.future.length > 0) {
const { past, present } = this.history;
const next = this.history.future[0];
const newFuture = this.history.future.slice(1);
this.history.past = [...past, present];
this.history.present = next;
this.history.future = newFuture;
this.update('Redo');
}
}
jumpToPast(index: number) {
if (index < 0 || index >= this.history.past.length) return;
const { past, future } = this.history;
/**
*
* const past = [1, 2, 3, 4, 5];
*
* newPast = past.slice(0, 2) = [1, 2];
* present = past[index] = 3;
* [...past.slice(2 + 1), ...future] = [4, 5];
*
*/
const newPast = past.slice(0, index);
const newFuture = [...past.slice(index + 1), ...future];
const newPresent = past[index];
this.history.past = newPast;
this.history.present = newPresent;
this.history.future = newFuture;
this.update();
}
jumpToFuture(index: number) {
if (index < 0 || index >= this.history.future.length) return;
const { past, future } = this.history;
const newPast = [...past, ...future.slice(0, index)];
const newPresent = future[index];
const newFuture = future.slice(index + 1);
this.history.past = newPast; | }
clear() {
this.history = {
past: [],
present: null,
future: []
};
}
destroy(clearHistory = false) {
if (clearHistory) {
this.clear();
}
this.subscription.unsubscribe();
}
ignoreNext() {
this.skip = true;
}
private update(action = 'Undo') {
this.skipUpdate = true;
logAction(`@StateHistory - ${action}`);
this.updateStore(this.history.present, this._entityId);
this.skipUpdate = false;
}
} | this.history.present = newPresent;
this.history.future = newFuture;
this.update('Redo'); |
gradients.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients_impl as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested structure of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
sparse (IndexedSlices), jacobian function currently makes it dense and
returns a Tensor instead. This may change in the future.
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs), |
for i, out in enumerate(pfor_outputs):
if isinstance(out, ops.Tensor):
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None, when use_pfor is
true, corresponds to vectorizing all the iterations. When use_pfor is
false, the default value of None corresponds to parallel_iterations=10.
This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
f"and `inp` shape ({inp.shape}) to match.")
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape) | output_size,
parallel_iterations=parallel_iterations) |
test_operator_gaussian_blur.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import numpy as np
import cv2
from scipy.ndimage import convolve1d
import os
from nose.tools import raises
from nose.plugins.attrib import attr
from test_utils import get_dali_extra_path, check_batch, compare_pipelines, RandomlyShapedDataIterator, dali_type
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
test_iters = 4
shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3),
((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2),
((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2),
((5, 10, 10, 7, 3), "FDHWC", 3), ((5, 3, 20, 30), "FCHW", 2),
((3, 5, 10, 10, 7), "CFDHW", 3)]
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
def to_cv_sigma(sigma, axes=2):
if sigma is None:
return (0,) * axes
elif isinstance(sigma, (int, float)):
return (sigma,) * axes
elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0):
return (float(sigma),) * axes
elif len(sigma) == 1:
return (sigma[0],) * axes
return tuple(reversed(sigma))
def to_cv_win_size(window_size, axes=2, sigma=None):
if window_size is None:
# when using cv2.getGaussianKernel we need to always provide window size
if sigma is not None:
sigma = to_cv_sigma(sigma, axes)
return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma])
return (0,) * axes
elif isinstance(window_size, int):
return (int(window_size),) * axes
elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0):
return (int(window_size),) * axes
elif len(window_size) == 1:
return (int(window_size[0]),) * axes
# OpenCV shape is the other way round: (width, height)
return tuple(int(x) for x in reversed(window_size))
def gaussian_cv(image, sigma, window_size):
sigma_x, sigma_y = to_cv_sigma(sigma)
window_size_cv = to_cv_win_size(window_size)
# compute on floats and round like a sane person (in mathematically complicit way)
blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y)
return np.uint8(blurred + 0.5)
def gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8):
sigma_xyz = to_cv_sigma(sigma, axes)
win_xyz = to_cv_win_size(window_size, axes, sigma)
filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)]
filters = [np.float32(f).squeeze() for f in filters]
filters.reverse()
for i in reversed(range(axes)):
axis = i + skip_axes
image = convolve1d(np.float32(image), filters[i], axis, mode="mirror")
if dtype == np.float32:
return image
else:
return dtype(image + 0.5)
def get_gaussian_pipe(batch_size, sigma, window_size, op_type):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input, _ = fn.file_reader(file_root=images_dir, shard_id=0, num_shards=1)
decoded = fn.image_decoder(input, device="cpu", output_type=types.RGB)
if op_type == "gpu":
decoded = decoded.gpu()
blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size)
pipe.set_outputs(blurred, decoded)
return pipe
def check_gaussian_blur(batch_size, sigma, window_size, op_type="cpu"):
pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input]
check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout="HWC")
def test_image_gaussian_blur():
for dev in ["cpu", "gpu"]:
for sigma in [1.0]:
for window_size in [3, 5, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
yield check_gaussian_blur, 10, None, 11, dev
@attr('slow')
def test_image_gaussian_blur_slow():
for dev in ["cpu", "gpu"]:
for sigma in [1.0, [1.0, 2.0]]:
for window_size in [3, 5, [7, 5], [5, 9], None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
for window_size in [15, [17, 31]]:
yield check_gaussian_blur, 10, None, window_size, dev
def check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size):
cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "cpu")
gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "gpu")
compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1)
def test_gaussian_blur_cpu_gpu():
for window_size in [5, [7, 13]]:
yield check_gaussian_blur_cpu_gpu, 10, None, window_size
@attr('slow')
def test_gaussian_blur_cpu_gpu_slow():
for sigma in [1.0, [1.0, 2.0], None]:
for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size
def count_skip_axes(layout):
if layout.startswith("FC") or layout.startswith("CF"):
return 2
elif layout.startswith("F") or layout.startswith("C"):
return 1
else:
return 0
def check_generic_gaussian_blur(
batch_size, sigma, window_size, shape, layout, axes, op_type="cpu", in_dtype=np.uint8,
out_dtype=types.NO_TYPE):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomlyShapedDataIterator(batch_size, max_shape=shape, dtype=in_dtype)
# Extract the numpy type from DALI, we can have float32 or the same as input
if out_dtype == types.NO_TYPE:
result_type = in_dtype
elif dali_type(in_dtype) == out_dtype:
result_type = in_dtype
else: | with pipe:
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma,
window_size=window_size, dtype=out_dtype)
pipe.set_outputs(blurred, input)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
skip_axes = count_skip_axes(layout)
baseline = [
gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type)
for img in input]
max_error = 1 if result_type != np.float32 else 1e-04
check_batch(result, baseline, batch_size, max_allowed_error=max_error, expected_layout=layout)
# Generate tests for single or per-axis sigma and window_size arguments
def generate_generic_cases(dev, t_in, t_out):
for shape, layout, axes in shape_layout_axes_cases:
for sigma in [1.0, [1.0, 2.0, 3.0]]:
for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]:
if isinstance(sigma, list):
sigma = sigma[0:axes]
if isinstance(window_size, list):
window_size = window_size[0:axes]
yield check_generic_gaussian_blur, 10, sigma, window_size, shape, layout, axes, dev, t_in, t_out
for window_size in [11, 15]:
yield check_generic_gaussian_blur, 10, None, window_size, shape, layout, axes, dev, t_in, t_out
def test_generic_gaussian_blur():
for dev in ["cpu", "gpu"]:
for (t_in, t_out) in [(np.uint8, types.NO_TYPE), (np.float32, types.FLOAT), (np.uint8, types.FLOAT)]:
yield from generate_generic_cases(dev, t_in, t_out)
@attr('slow')
def test_generic_gaussian_blur_slow():
for dev in ["cpu", "gpu"]:
for t_in in [np.uint8, np.int32, np.float32]:
for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]:
yield from generate_generic_cases(dev, t_in, t_out)
def check_per_sample_gaussian_blur(
batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type="cpu"):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomlyShapedDataIterator(batch_size, max_shape=shape)
with pipe:
if sigma_dim is not None:
sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim])
sigma_arg = sigma
else:
# placeholder, so we can return something
sigma = fn.coin_flip(probability=0)
sigma_arg = None
if window_size_dim is not None:
window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim])
window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1
window_arg = window_size
else:
window_size = fn.coin_flip(probability=0)
window_arg = None
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg)
pipe.set_outputs(blurred, input, sigma, window_size)
pipe.build()
for _ in range(test_iters):
result, input, sigma, window_size = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
sigma = to_batch(sigma, batch_size)
window_size = to_batch(window_size, batch_size)
baseline = []
for i in range(batch_size):
sigma_arg = sigma[i] if sigma is not None else None
window_arg = window_size[i] if window_size_dim is not None else None
skip_axes = count_skip_axes(layout)
baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes))
check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout)
# TODO(klecki): consider checking mixed ArgumentInput/Scalar value cases
def test_per_sample_gaussian_blur():
for dev in ["cpu", "gpu"]:
for shape, layout, axes in shape_layout_axes_cases:
for sigma_dim in [None, 1, axes]:
for window_size_dim in [None, 1, axes]:
if sigma_dim is None and window_size_dim is None:
continue
yield check_per_sample_gaussian_blur, 10, sigma_dim, window_size_dim, shape, layout, axes, dev
@raises(RuntimeError)
def check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype=np.uint8, out_dtype=types.NO_TYPE):
check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype, out_dtype)
def test_fail_gaussian_blur():
for dev in ["cpu", "gpu"]:
# Check layout and channel placement errors
for shape, layout, axes in [((20, 20, 30, 3), "DHCW", 3), ((5, 20, 30, 3), "HFWC", 2),
((5, 10, 10, 10, 7, 3), "FWXYZC", 4),
((5, 3, 20, 3, 30), "FCHCW", 2),
((5, 3, 20, 3, 30), "FCCHW", 2)]:
yield check_fail_gaussian_blur, 10, 1.0, 11, shape, layout, axes, dev
# Negative, disallowed or both unspecified values of sigma and window size
yield check_fail_gaussian_blur, 10, 0.0, 0, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, -1.0, 0, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, 0.0, -11, (100, 20, 3), "HWC", 3, dev
yield check_fail_gaussian_blur, 10, 0.0, 2, (100, 20, 3), "HWC", 3, dev | result_type = np.float32 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.