file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
wallet_import_with_label.py
#!/usr/bin/env python3 # Copyright (c) 2018 The Refnet Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the behavior of RPC importprivkey on set and unset labels of addresses. It tests different cases in which an address is imported with importaddress with or without a label and then its private key is imported with importprivkey with and without a label. """ from test_framework.test_framework import RefnetTestFramework from test_framework.wallet_util import test_address class ImportWithLabel(RefnetTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): """Main test logic""" self.log.info( "Test importaddress with label and importprivkey without label." ) self.log.info("Import a watch-only address with a label.") address = self.nodes[0].getnewaddress() label = "Test Label" self.nodes[1].importaddress(address, label) test_address(self.nodes[1], address, iswatchonly=True, ismine=False, label=label) self.log.info( "Import the watch-only address's private key without a " "label and the address should keep its label." ) priv_key = self.nodes[0].dumpprivkey(address) self.nodes[1].importprivkey(priv_key) test_address(self.nodes[1], address, label=label) self.log.info( "Test importaddress without label and importprivkey with label." ) self.log.info("Import a watch-only address without a label.") address2 = self.nodes[0].getnewaddress() self.nodes[1].importaddress(address2) test_address(self.nodes[1], address2, iswatchonly=True, ismine=False, label="") self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." ) priv_key2 = self.nodes[0].dumpprivkey(address2) label2 = "Test Label 2" self.nodes[1].importprivkey(priv_key2, label2) test_address(self.nodes[1], address2, label=label2) self.log.info("Test importaddress with label and importprivkey with label.") self.log.info("Import a watch-only address with a label.") address3 = self.nodes[0].getnewaddress() label3_addr = "Test Label 3 for importaddress" self.nodes[1].importaddress(address3, label3_addr) test_address(self.nodes[1], address3, iswatchonly=True, ismine=False, label=label3_addr)
priv_key3 = self.nodes[0].dumpprivkey(address3) label3_priv = "Test Label 3 for importprivkey" self.nodes[1].importprivkey(priv_key3, label3_priv) test_address(self.nodes[1], address3, label=label3_priv) self.log.info( "Test importprivkey won't label new dests with the same " "label as others labeled dests for the same key." ) self.log.info("Import a watch-only legacy address with a label.") address4 = self.nodes[0].getnewaddress() label4_addr = "Test Label 4 for importaddress" self.nodes[1].importaddress(address4, label4_addr) test_address(self.nodes[1], address4, iswatchonly=True, ismine=False, label=label4_addr, embedded=None) self.log.info( "Import the watch-only address's private key without a " "label and new destinations for the key should have an " "empty label while the 'old' destination should keep " "its label." ) priv_key4 = self.nodes[0].dumpprivkey(address4) self.nodes[1].importprivkey(priv_key4) embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address'] test_address(self.nodes[1], embedded_addr, label="") test_address(self.nodes[1], address4, label=label4_addr) self.stop_nodes() if __name__ == "__main__": ImportWithLabel().main()
self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." )
constants.rs
use std::collections::HashMap; use std::sync::RwLock; use lazy_static::lazy_static; use storage_proofs::hasher::Hasher; use storage_proofs::util::NODE_SIZE; use storage_proofs::MAX_LEGACY_POREP_REGISTERED_PROOF_ID; use typenum::{U0, U2, U8}; use crate::types::UnpaddedBytesAmount; pub const SECTOR_SIZE_2_KIB: u64 = 1 << 11; pub const SECTOR_SIZE_4_KIB: u64 = 1 << 12; pub const SECTOR_SIZE_16_KIB: u64 = 1 << 14; pub const SECTOR_SIZE_32_KIB: u64 = 1 << 15; pub const SECTOR_SIZE_8_MIB: u64 = 1 << 23; pub const SECTOR_SIZE_16_MIB: u64 = 1 << 24; pub const SECTOR_SIZE_512_MIB: u64 = 1 << 29; pub const SECTOR_SIZE_1_GIB: u64 = 1 << 30; pub const SECTOR_SIZE_32_GIB: u64 = 1 << 35; pub const SECTOR_SIZE_64_GIB: u64 = 1 << 36; pub const WINNING_POST_CHALLENGE_COUNT: usize = 66; pub const WINNING_POST_SECTOR_COUNT: usize = 1; pub const WINDOW_POST_CHALLENGE_COUNT: usize = 10; pub const DRG_DEGREE: usize = storage_proofs::drgraph::BASE_DEGREE; pub const EXP_DEGREE: usize = storage_proofs::porep::stacked::EXP_DEGREE; pub const MAX_LEGACY_REGISTERED_SEAL_PROOF_ID: u64 = MAX_LEGACY_POREP_REGISTERED_PROOF_ID; /// Sector sizes for which parameters have been published. pub const PUBLISHED_SECTOR_SIZES: [u64; 10] = [ SECTOR_SIZE_2_KIB, SECTOR_SIZE_4_KIB, SECTOR_SIZE_16_KIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_8_MIB, SECTOR_SIZE_16_MIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_1_GIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_64_GIB, ]; lazy_static! { pub static ref POREP_MINIMUM_CHALLENGES: RwLock<HashMap<u64, u64>> = RwLock::new( [ (SECTOR_SIZE_2_KIB, 2), (SECTOR_SIZE_4_KIB, 2), (SECTOR_SIZE_16_KIB, 2), (SECTOR_SIZE_32_KIB, 2), (SECTOR_SIZE_8_MIB, 2), (SECTOR_SIZE_16_MIB, 2), (SECTOR_SIZE_512_MIB, 2), (SECTOR_SIZE_1_GIB, 2), (SECTOR_SIZE_32_GIB, 176), (SECTOR_SIZE_64_GIB, 176), ] .iter() .copied() .collect() ); pub static ref POREP_PARTITIONS: RwLock<HashMap<u64, u8>> = RwLock::new( [ (SECTOR_SIZE_2_KIB, 1), (SECTOR_SIZE_4_KIB, 1), (SECTOR_SIZE_16_KIB, 1), (SECTOR_SIZE_32_KIB, 1), (SECTOR_SIZE_8_MIB, 1), (SECTOR_SIZE_16_MIB, 1), (SECTOR_SIZE_512_MIB, 1), (SECTOR_SIZE_1_GIB, 1), (SECTOR_SIZE_32_GIB, 10), (SECTOR_SIZE_64_GIB, 10), ] .iter() .copied() .collect() ); pub static ref LAYERS: RwLock<HashMap<u64, usize>> = RwLock::new( [ (SECTOR_SIZE_2_KIB, 2), (SECTOR_SIZE_4_KIB, 2), (SECTOR_SIZE_16_KIB, 2), (SECTOR_SIZE_32_KIB, 2), (SECTOR_SIZE_8_MIB, 2), (SECTOR_SIZE_16_MIB, 2), (SECTOR_SIZE_512_MIB, 2), (SECTOR_SIZE_1_GIB, 2), (SECTOR_SIZE_32_GIB, 11), (SECTOR_SIZE_64_GIB, 11), ] .iter() .copied() .collect() ); // These numbers must match those used for Window PoSt scheduling in the miner actor. // Please coordinate changes with actor code. // https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go pub static ref WINDOW_POST_SECTOR_COUNT: RwLock<HashMap<u64, usize>> = RwLock::new( [ (SECTOR_SIZE_2_KIB, 2), (SECTOR_SIZE_4_KIB, 2), (SECTOR_SIZE_16_KIB, 2), (SECTOR_SIZE_32_KIB, 2), (SECTOR_SIZE_8_MIB, 2), (SECTOR_SIZE_16_MIB, 2), (SECTOR_SIZE_512_MIB, 2), (SECTOR_SIZE_1_GIB, 2), (SECTOR_SIZE_32_GIB, 2349), // this gives 125,279,217 constraints, fitting in a single partition (SECTOR_SIZE_64_GIB, 2300), // this gives 129,887,900 constraints, fitting in a single partition ] .iter() .copied() .collect() ); } /// The size of a single snark proof. pub const SINGLE_PARTITION_PROOF_LEN: usize = 192; pub const MINIMUM_RESERVED_LEAVES_FOR_PIECE_IN_SECTOR: u64 = 4; // Bit padding causes bytes to only be aligned at every 127 bytes (for 31.75 bytes). pub const MINIMUM_RESERVED_BYTES_FOR_PIECE_IN_FULLY_ALIGNED_SECTOR: u64 = (MINIMUM_RESERVED_LEAVES_FOR_PIECE_IN_SECTOR * NODE_SIZE as u64) - 1; /// The minimum size a single piece must have before padding. pub const MIN_PIECE_SIZE: UnpaddedBytesAmount = UnpaddedBytesAmount(127); /// The hasher used for creating comm_d. pub type DefaultPieceHasher = storage_proofs::hasher::Sha256Hasher; pub type DefaultPieceDomain = <DefaultPieceHasher as Hasher>::Domain; /// The default hasher for merkle trees currently in use. pub type DefaultTreeHasher = storage_proofs::hasher::PoseidonHasher; pub type DefaultTreeDomain = <DefaultTreeHasher as Hasher>::Domain; pub type DefaultBinaryTree = storage_proofs::merkle::BinaryMerkleTree<DefaultTreeHasher>; pub type DefaultOctTree = storage_proofs::merkle::OctMerkleTree<DefaultTreeHasher>; pub type DefaultOctLCTree = storage_proofs::merkle::OctLCMerkleTree<DefaultTreeHasher>; // Generic shapes pub type SectorShapeBase = LCTree<DefaultTreeHasher, U8, U0, U0>; pub type SectorShapeSub2 = LCTree<DefaultTreeHasher, U8, U2, U0>; pub type SectorShapeSub8 = LCTree<DefaultTreeHasher, U8, U8, U0>; pub type SectorShapeTop2 = LCTree<DefaultTreeHasher, U8, U8, U2>; // Specific size constants by shape pub type SectorShape2KiB = SectorShapeBase; pub type SectorShape8MiB = SectorShapeBase; pub type SectorShape512MiB = SectorShapeBase; pub type SectorShape4KiB = SectorShapeSub2; pub type SectorShape16MiB = SectorShapeSub2; pub type SectorShape1GiB = SectorShapeSub2; pub type SectorShape16KiB = SectorShapeSub8; pub type SectorShape32GiB = SectorShapeSub8; pub type SectorShape32KiB = SectorShapeTop2; pub type SectorShape64GiB = SectorShapeTop2; pub fn is_sector_shape_base(sector_size: u64) -> bool { match sector_size { SECTOR_SIZE_2_KIB | SECTOR_SIZE_8_MIB | SECTOR_SIZE_512_MIB => true, _ => false, } } pub fn is_sector_shape_sub2(sector_size: u64) -> bool { match sector_size { SECTOR_SIZE_4_KIB | SECTOR_SIZE_16_MIB | SECTOR_SIZE_1_GIB => true, _ => false, } } pub fn is_sector_shape_sub8(sector_size: u64) -> bool { match sector_size { SECTOR_SIZE_16_KIB | SECTOR_SIZE_32_GIB => true, _ => false, } } pub fn is_sector_shape_top2(sector_size: u64) -> bool { match sector_size { SECTOR_SIZE_32_KIB | SECTOR_SIZE_64_GIB => true, _ => false, } } pub use storage_proofs::merkle::{DiskTree, LCTree}; pub use storage_proofs::parameter_cache::{ get_parameter_data, get_parameter_data_from_id, get_verifying_key_data, }; /// Calls a function with the type hint of the sector shape matching the provided sector. /// Panics if provided with an unknown sector size. #[macro_export] macro_rules! with_shape { ($size:expr, $f:ident) => { with_shape!($size, $f,) }; ($size:expr, $f:ident, $($args:expr,)*) => { match $size { _x if $size == $crate::constants::SECTOR_SIZE_2_KIB => { $f::<$crate::constants::SectorShape2KiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_4_KIB => { $f::<$crate::constants::SectorShape4KiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_16_KIB => { $f::<$crate::constants::SectorShape16KiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_32_KIB => { $f::<$crate::constants::SectorShape32KiB>($($args),*) }, _xx if $size == $crate::constants::SECTOR_SIZE_8_MIB => { $f::<$crate::constants::SectorShape8MiB>($($args),*) }, _xx if $size == $crate::constants::SECTOR_SIZE_16_MIB => { $f::<$crate::constants::SectorShape16MiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_512_MIB => { $f::<$crate::constants::SectorShape512MiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_1_GIB => { $f::<$crate::constants::SectorShape1GiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_32_GIB => { $f::<$crate::constants::SectorShape32GiB>($($args),*) }, _x if $size == $crate::constants::SECTOR_SIZE_64_GIB => { $f::<$crate::constants::SectorShape64GiB>($($args),*) }, _ => panic!("unsupported sector size: {}", $size), } }; ($size:expr, $f:ident, $($args:expr),*) => { with_shape!($size, $f, $($args,)*) }; } #[cfg(test)] mod tests { use super::*; use generic_array::typenum::Unsigned; use storage_proofs::merkle::MerkleTreeTrait; fn canonical_shape(sector_size: u64) -> (usize, usize, usize) { // This could perhaps be cleaned up, but I think it expresses the intended constraints // and is consistent with our current hard-coded size->shape mappings. assert_eq!(sector_size.count_ones(), 1); let log_byte_size = sector_size.trailing_zeros(); let log_nodes = log_byte_size - 5; // 2^5 = 32-byte nodes let max_tree_log = 3; // Largest allowable arity. The optimal shape. let log_max_base = 27; // 4 GiB worth of nodes let log_base = max_tree_log; // Base must be oct trees.x let log_in_base = u32::min(log_max_base, (log_nodes / log_base) * log_base); // How many nodes in base? let log_upper = log_nodes - log_in_base; // Nodes in sub and upper combined. let log_rem = log_upper % max_tree_log; // Remainder after filling optimal trees. let (log_sub, log_top) = { // Are the upper trees empty? if log_upper > 0 { // Do we need a remainder tree? if log_rem == 0 { (Some(max_tree_log), None) // No remainder tree, fill the sub tree optimall.y } else { // Need a remainder tree. // Do we have room for another max tree? if log_upper > max_tree_log { // There is room. Use the sub tree for as much overflow as we can fit optimally. // And put the rest in the top tree. (Some(max_tree_log), Some(log_rem)) } else { // Can't fit another max tree. // Just put the remainder in the sub tree. (Some(log_rem), None) } } } else { // Upper trees are empty. (None, None) } }; let base = 1 << log_base; let sub = if let Some(l) = log_sub { 1 << l } else { 0 }; let top = if let Some(l) = log_top { 1 << l } else { 0 }; (base, sub, top) } fn arities_to_usize<Tree: MerkleTreeTrait>() -> (usize, usize, usize) { ( Tree::Arity::to_usize(), Tree::SubTreeArity::to_usize(), Tree::TopTreeArity::to_usize(), ) } #[test] fn test_with_shape_macro()
fn test_with_shape_macro_aux(sector_size: u64) { let expected = canonical_shape(sector_size); let arities = with_shape!(sector_size, arities_to_usize); assert_eq!( arities, expected, "Wrong shape for sector size {}: have {:?} but need {:?}.", sector_size, arities, expected ); } }
{ test_with_shape_macro_aux(SECTOR_SIZE_2_KIB); test_with_shape_macro_aux(SECTOR_SIZE_4_KIB); test_with_shape_macro_aux(SECTOR_SIZE_8_MIB); test_with_shape_macro_aux(SECTOR_SIZE_16_MIB); test_with_shape_macro_aux(SECTOR_SIZE_512_MIB); test_with_shape_macro_aux(SECTOR_SIZE_1_GIB); test_with_shape_macro_aux(SECTOR_SIZE_32_GIB); test_with_shape_macro_aux(SECTOR_SIZE_64_GIB); }
dagrun.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Optional, cast from sqlalchemy import ( Boolean, Column, DateTime, Index, Integer, PickleType, String, UniqueConstraint, and_, func, or_, ) from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import synonym from sqlalchemy.orm.session import Session from airflow.exceptions import AirflowException from airflow.models.base import ID_LEN, Base from airflow.stats import Stats from airflow.ti_deps.dep_context import SCHEDULEABLE_STATES, DepContext from airflow.utils import timezone from airflow.utils.log.logging_mixin import LoggingMixin from airflow.utils.session import provide_session from airflow.utils.sqlalchemy import UtcDateTime from airflow.utils.state import State class DagRun(Base, LoggingMixin): """ DagRun describes an instance of a Dag. It can be created by the scheduler (for regular runs) or by an external trigger """ __tablename__ = "dag_run" ID_PREFIX = 'scheduled__' ID_FORMAT_PREFIX = ID_PREFIX + '{0}' id = Column(Integer, primary_key=True) dag_id = Column(String(ID_LEN)) execution_date = Column(UtcDateTime, default=timezone.utcnow) start_date = Column(UtcDateTime, default=timezone.utcnow) end_date = Column(UtcDateTime) _state = Column('state', String(50), default=State.RUNNING) run_id = Column(String(ID_LEN)) external_trigger = Column(Boolean, default=True) conf = Column(PickleType) dag = None __table_args__ = ( Index('dag_id_state', dag_id, _state), UniqueConstraint('dag_id', 'execution_date'), UniqueConstraint('dag_id', 'run_id'), ) def __init__(self, dag_id=None, run_id=None, execution_date=None, start_date=None, external_trigger=None, conf=None, state=None): self.dag_id = dag_id self.run_id = run_id self.execution_date = execution_date self.start_date = start_date self.external_trigger = external_trigger self.conf = conf self.state = state super().__init__() def __repr__(self): return ( '<DagRun {dag_id} @ {execution_date}: {run_id}, ' 'externally triggered: {external_trigger}>' ).format( dag_id=self.dag_id, execution_date=self.execution_date, run_id=self.run_id, external_trigger=self.external_trigger) def get_state(self): return self._state def set_state(self, state): if self._state != state: self._state = state self.end_date = timezone.utcnow() if self._state in State.finished() else None @declared_attr def state(self): return synonym('_state', descriptor=property(self.get_state, self.set_state)) @classmethod def id_for_date(cls, date, prefix=ID_FORMAT_PREFIX): return prefix.format(date.isoformat()[:19]) @provide_session def refresh_from_db(self, session=None): """ Reloads the current dagrun from the database :param session: database session """ DR = DagRun exec_date = func.cast(self.execution_date, DateTime) dr = session.query(DR).filter( DR.dag_id == self.dag_id, func.cast(DR.execution_date, DateTime) == exec_date, DR.run_id == self.run_id ).one() self.id = dr.id self.state = dr.state @staticmethod @provide_session def find(dag_id=None, run_id=None, execution_date=None, state=None, external_trigger=None, no_backfills=False, session=None): """ Returns a set of dag runs for the given search criteria. :param dag_id: the dag_id to find dag runs for :type dag_id: int, list :param run_id: defines the run id for this dag run :type run_id: str :param execution_date: the execution date :type execution_date: datetime.datetime :param state: the state of the dag run :type state: str :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param no_backfills: return no backfills (True), return all (False). Defaults to False :type no_backfills: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ DR = DagRun qry = session.query(DR) if dag_id: qry = qry.filter(DR.dag_id == dag_id) if run_id: qry = qry.filter(DR.run_id == run_id) if execution_date: if isinstance(execution_date, list): qry = qry.filter(DR.execution_date.in_(execution_date)) else: qry = qry.filter(DR.execution_date == execution_date) if state: qry = qry.filter(DR.state == state) if external_trigger is not None: qry = qry.filter(DR.external_trigger == external_trigger) if no_backfills: # in order to prevent a circular dependency from airflow.jobs import BackfillJob qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%')) dr = qry.order_by(DR.execution_date).all() return dr @provide_session def get_task_instances(self, state=None, session=None): """ Returns the task instances for this dag run """ from airflow.models.taskinstance import TaskInstance # Avoid circular import tis = session.query(TaskInstance).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.execution_date == self.execution_date, ) if state: if isinstance(state, str): tis = tis.filter(TaskInstance.state == state) else: # this is required to deal with NULL values if None in state: if all(x is None for x in state): tis = tis.filter(TaskInstance.state.is_(None)) else: not_none_state = [s for s in state if s] tis = tis.filter( or_(TaskInstance.state.in_(not_none_state), TaskInstance.state.is_(None)) ) else: tis = tis.filter(TaskInstance.state.in_(state)) if self.dag and self.dag.partial: tis = tis.filter(TaskInstance.task_id.in_(self.dag.task_ids)) return tis.all() @provide_session def get_task_instance(self, task_id, session=None): """ Returns the task instance specified by task_id for this dag run :param task_id: the task id """ from airflow.models.taskinstance import TaskInstance # Avoid circular import TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.execution_date == self.execution_date, TI.task_id == task_id ).first() return ti def get_dag(self): """ Returns the Dag associated with this DagRun. :return: DAG """ if not self.dag: raise AirflowException("The DAG (.dag) for {} needs to be set" .format(self)) return self.dag @provide_session def get_previous_dagrun(self, state: Optional[str] = None, session: Session = None) -> Optional['DagRun']: """The previous DagRun, if there is one""" session = cast(Session, session) # mypy filters = [ DagRun.dag_id == self.dag_id, DagRun.execution_date < self.execution_date, ] if state is not None: filters.append(DagRun.state == state) return session.query(DagRun).filter( *filters ).order_by( DagRun.execution_date.desc() ).first() @provide_session def get_previous_scheduled_dagrun(self, session=None): """The previous, SCHEDULED DagRun, if there is one""" dag = self.get_dag() return session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == dag.previous_schedule(self.execution_date) ).first() @provide_session def update_state(self, session=None): """ Determines the overall state of the DagRun based on the state of its TaskInstances. :return: ready_tis: the tis that can be scheduled in the current loop :rtype ready_tis: list[airflow.models.TaskInstance] """ dag = self.get_dag() ready_tis = [] tis = [ti for ti in self.get_task_instances(session=session, state=State.task_states + (State.SHUTDOWN,))] self.log.debug("number of tis tasks for %s: %s task(s)", self, len(tis)) for ti in list(tis): ti.task = dag.get_task(ti.task_id) start_dttm = timezone.utcnow() unfinished_tasks = [t for t in tis if t.state in State.unfinished()] finished_tasks = [t for t in tis if t.state in State.finished() + [State.UPSTREAM_FAILED]] none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks) none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks) # small speed up if unfinished_tasks and none_depends_on_past and none_task_concurrency: scheduleable_tasks = [ut for ut in unfinished_tasks if ut.state in SCHEDULEABLE_STATES] self.log.debug("number of scheduleable tasks for %s: %s task(s)", self, len(scheduleable_tasks)) ready_tis, changed_tis = self._get_ready_tis(scheduleable_tasks, finished_tasks, session) self.log.debug("ready tis length for %s: %s task(s)", self, len(ready_tis)) are_runnable_tasks = ready_tis or self._are_premature_tis( unfinished_tasks, finished_tasks, session) or changed_tis duration = (timezone.utcnow() - start_dttm) Stats.timing("dagrun.dependency-check.{}".format(self.dag_id), duration) leaf_tis = [ti for ti in tis if ti.task_id in {t.task_id for t in dag.leaves}] # if all roots finished and at least one failed, the run failed if not unfinished_tasks and any( leaf_ti.state in {State.FAILED, State.UPSTREAM_FAILED} for leaf_ti in leaf_tis ): self.log.info('Marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='task_failure', session=session) # if all leafs succeeded and no unfinished tasks, the run succeeded elif not unfinished_tasks and all( leaf_ti.state in {State.SUCCESS, State.SKIPPED} for leaf_ti in leaf_tis ): self.log.info('Marking run %s successful', self) self.set_state(State.SUCCESS) dag.handle_callback(self, success=True, reason='success', session=session) # if *all tasks* are deadlocked, the run failed elif (unfinished_tasks and none_depends_on_past and none_task_concurrency and not are_runnable_tasks): self.log.info('Deadlock; marking run %s failed', self) self.set_state(State.FAILED) dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session) # finally, if the roots aren't done, the dag is still running else: self.set_state(State.RUNNING) self._emit_duration_stats_for_finished_state() # todo: determine we want to use with_for_update to make sure to lock the run session.merge(self) session.commit() return ready_tis def _get_ready_tis(self, scheduleable_tasks, finished_tasks, session):
def _are_premature_tis(self, unfinished_tasks, finished_tasks, session): # there might be runnable tasks that are up for retry and from some reason(retry delay, etc) are # not ready yet so we set the flags to count them in for ut in unfinished_tasks: if ut.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, ignore_in_retry_period=True, ignore_in_reschedule_period=True, finished_tasks=finished_tasks), session=session): return True def _emit_duration_stats_for_finished_state(self): if self.state == State.RUNNING: return duration = (self.end_date - self.start_date) if self.state is State.SUCCESS: Stats.timing('dagrun.duration.success.{}'.format(self.dag_id), duration) elif self.state == State.FAILED: Stats.timing('dagrun.duration.failed.{}'.format(self.dag_id), duration) @provide_session def verify_integrity(self, session=None): """ Verifies the DagRun by checking for removed tasks or tasks that are not in the database yet. It will set state to removed or add the task if required. """ from airflow.models.taskinstance import TaskInstance # Avoid circular import dag = self.get_dag() tis = self.get_task_instances(session=session) # check for removed or restored tasks task_ids = [] for ti in tis: task_ids.append(ti.task_id) task = None try: task = dag.get_task(ti.task_id) except AirflowException: if ti.state == State.REMOVED: pass # ti has already been removed, just ignore it elif self.state is not State.RUNNING and not dag.partial: self.log.warning("Failed to get task '{}' for dag '{}'. " "Marking it as removed.".format(ti, dag)) Stats.incr( "task_removed_from_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.REMOVED should_restore_task = (task is not None) and ti.state == State.REMOVED if should_restore_task: self.log.info("Restoring task '{}' which was previously " "removed from DAG '{}'".format(ti, dag)) Stats.incr("task_restored_to_dag.{}".format(dag.dag_id), 1, 1) ti.state = State.NONE # check for missing tasks for task in dag.task_dict.values(): if task.start_date > self.execution_date and not self.is_backfill: continue if task.task_id not in task_ids: Stats.incr( "task_instance_created-{}".format(task.__class__.__name__), 1, 1) ti = TaskInstance(task, self.execution_date) session.add(ti) session.commit() @staticmethod def get_run(session, dag_id, execution_date): """ :param dag_id: DAG ID :type dag_id: unicode :param execution_date: execution date :type execution_date: datetime :return: DagRun corresponding to the given dag_id and execution date if one exists. None otherwise. :rtype: airflow.models.DagRun """ qry = session.query(DagRun).filter( DagRun.dag_id == dag_id, DagRun.external_trigger == False, # noqa pylint: disable=singleton-comparison DagRun.execution_date == execution_date, ) return qry.first() @property def is_backfill(self): from airflow.jobs import BackfillJob return ( self.run_id is not None and self.run_id.startswith(BackfillJob.ID_PREFIX) ) @classmethod @provide_session def get_latest_runs(cls, session): """Returns the latest DagRun for each DAG. """ subquery = ( session .query( cls.dag_id, func.max(cls.execution_date).label('execution_date')) .group_by(cls.dag_id) .subquery() ) dagruns = ( session .query(cls) .join(subquery, and_(cls.dag_id == subquery.c.dag_id, cls.execution_date == subquery.c.execution_date)) .all() ) return dagruns
ready_tis = [] changed_tis = False for st in scheduleable_tasks: st_old_state = st.state if st.are_dependencies_met( dep_context=DepContext( flag_upstream_failed=True, finished_tasks=finished_tasks), session=session): ready_tis.append(st) elif st_old_state != st.current_state(session=session): changed_tis = True return ready_tis, changed_tis
hamming.rs
use hamming; fn process_distance_case(strand_pair: [&str; 2], expected_distance: Option<usize>) { assert_eq!( hamming::hamming_distance(strand_pair[0], strand_pair[1]), expected_distance );
#[test] fn test_empty_strands() { process_distance_case(["", ""], Some(0)); } #[test] #[ignore] /// disallow first strand longer fn test_disallow_first_strand_longer() { process_distance_case(["AATG", "AAA"], None); } #[test] #[ignore] /// disallow second strand longer fn test_disallow_second_strand_longer() { process_distance_case(["ATA", "AGTG"], None); } #[test] #[ignore] fn test_first_string_is_longer() { process_distance_case(["AAA", "AA"], None); } #[test] #[ignore] fn test_second_string_is_longer() { process_distance_case(["A", "AA"], None); } #[test] #[ignore] /// single letter identical strands fn test_single_letter_identical_strands() { process_distance_case(["A", "A"], Some(0)); } #[test] #[ignore] /// small distance fn test_single_letter_different_strands() { process_distance_case(["G", "T"], Some(1)); } #[test] #[ignore] /// long identical strands fn test_long_identical_strands() { process_distance_case(["GGACTGAAATCTG", "GGACTGAAATCTG"], Some(0)); } #[test] #[ignore] fn test_no_difference_between_identical_strands() { process_distance_case(["GGACTGA", "GGACTGA"], Some(0)); } #[test] #[ignore] fn test_complete_hamming_distance_in_small_strand() { process_distance_case(["ACT", "GGA"], Some(3)); } #[test] #[ignore] fn test_small_hamming_distance_in_the_middle_somewhere() { process_distance_case(["GGACG", "GGTCG"], Some(1)); } #[test] #[ignore] fn test_larger_distance() { process_distance_case(["ACCAGGG", "ACTATGG"], Some(2)); } #[test] #[ignore] /// large distance in off-by-one strand fn test_long_different_strands() { process_distance_case(["GGACGGATTCTG", "AGGACGGATTCT"], Some(9)); }
}
string.rs
use std::{ borrow::Borrow, char::decode_utf16, hash::Hash, mem::size_of, ops::{Deref, DerefMut, RangeBounds}, str::Utf8Error, }; use comet::letroot; use super::vector::Vector; use crate::{ api::{Collectable, Finalize, Gc, Trace}, gc_base::GcBase, mutator::MutatorRef, }; /// A possible error value when converting a `String` from a UTF-8 byte vector. /// /// This type is the error type for the [`from_utf8`] method on [`String`]. It /// is designed in such a way to carefully avoid reallocations: the /// [`into_bytes`] method will give back the byte vector that was used in the /// conversion attempt. /// /// [`from_utf8`]: String::from_utf8 /// [`into_bytes`]: FromUtf8Error::into_bytes /// /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's /// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error` /// through the [`utf8_error`] method. /// /// [`Utf8Error`]: str::Utf8Error "std::str::Utf8Error" /// [`std::str`]: core::str "std::str" /// [`&str`]: prim@str "&str" /// [`utf8_error`]: FromUtf8Error::utf8_error /// /// # Examples /// /// Basic usage: /// /// ``` /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// /// let value = String::from_utf8(bytes); /// /// assert!(value.is_err()); /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes()); /// ``` #[derive(Debug, PartialEq, Eq)] pub struct FromUtf8Error<H: GcBase> { bytes: Vector<u8, H>, error: Utf8Error, } #[derive(Debug)] pub struct FromUtf16Error(()); /// GCed version of [alloc::string::String] It has all the same features as std String. #[derive(Clone)] pub struct String<H: GcBase> { vec: Vector<u8, H>, } impl<H: GcBase> String<H> { pub fn from_str(mutator: &mut MutatorRef<H>, str: impl AsRef<str>) -> Self { let str = str.as_ref(); let mut this = Self::with_capacity(mutator, str.len()); this.push_str(mutator, str); this } /// Creates a new empty `String`. #[inline] pub fn new(mutator: &mut MutatorRef<H>) -> Self { Self { vec: Vector::new(mutator), } } /// Creates a new empty `String` with a particular capacity. /// /// `String`s have an internal buffer to hold their data. The capacity is /// the length of that buffer, and can be queried with the [`capacity`] /// method. This method creates an empty `String`, but one with an initial /// buffer that can hold `capacity` bytes. This is useful when you may be /// appending a bunch of data to the `String`, reducing the number of /// reallocations it needs to do. #[inline] pub fn with_capacity(mutator: &mut MutatorRef<H>, capacity: usize) -> Self { Self { vec: Vector::with_capacity(mutator, capacity), } } /// Converts a vector of bytes to a `String`. /// /// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes /// ([`Vector<u8>`]) is made of bytes, so this function converts between the /// two. Not all byte slices are valid `String`s, however: `String` /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that /// the bytes are valid UTF-8, and then does the conversion. /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the validity check, there is an unsafe version /// of this function, [`from_utf8_unchecked`], which has the same behavior /// but skips the check. /// /// This method will take care to not copy the vector, for efficiency's /// sake. /// /// If you need a [`&str`] instead of a `String`, consider /// [`str::from_utf8`]. /// /// The inverse of this method is [`into_bytes`]. /// /// # Errors /// /// Returns [`Err`] if the slice is not UTF-8 with a description as to why the /// provided bytes are not UTF-8. The vector you moved in is also included. /// /// # Examples /// /// Basic usage: /// /// ``` /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// /// // We know these bytes are valid, so we'll use `unwrap()`. /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); /// /// assert_eq!("๐Ÿ’–", sparkle_heart); /// ``` /// /// Incorrect bytes: /// /// ``` /// // some invalid bytes, in a vector /// let sparkle_heart = vec![0, 159, 146, 150]; /// /// assert!(String::from_utf8(sparkle_heart).is_err()); /// ``` /// /// See the docs for [`FromUtf8Error`] for more details on what you can do /// with this error. #[inline] pub fn from_utf8(vec: Vector<u8, H>) -> Result<Self, FromUtf8Error<H>> { match std::str::from_utf8(vec.as_slice()) { Ok(..) => Ok(String { vec }), Err(e) => Err(FromUtf8Error { bytes: vec, error: e, }), } } pub fn from_utf16(mutator: &mut MutatorRef<H>, v: &[u16]) -> Result<String<H>, FromUtf16Error> { let stack = mutator.shadow_stack(); letroot!(ret = stack, Some(Self::with_capacity(mutator, v.len()))); for c in decode_utf16(v.iter().copied()) { if let Ok(c) = c { ret.as_mut().unwrap().push(mutator, c); } else { return Err(FromUtf16Error(())); } } Ok(ret.take().unwrap()) } #[inline] pub unsafe fn from_utf8_unchecked(bytes: Vector<u8, H>) -> Self { Self { vec: bytes } } #[inline] pub fn into_bytes(self) -> Vector<u8, H> { self.vec } #[inline] pub fn as_str(&self) -> &str { self } #[inline] pub fn as_mut_str(&mut self) -> &mut str { self } #[inline] pub fn push_str(&mut self, mutator: &mut MutatorRef<H>, string: &str) { for byte in string.as_bytes() { self.vec.push(mutator, *byte); } } #[inline] pub fn capacity(&self) -> usize { self.vec.capacity() } #[inline] pub fn len(&self) -> usize { self.vec.len() } #[inline] pub fn reserve(&mut self, mutator: &mut MutatorRef<H>, additional: usize) { self.vec.reserve(mutator, additional); } #[inline] pub fn push(&mut self, mutator: &mut MutatorRef<H>, ch: char) { match ch.len_utf8() { 1 => self.vec.push(mutator, ch as u8), _ => { let mut dst = [0; 4]; let utf8 = ch.encode_utf8(&mut dst).as_bytes(); for x in utf8 { self.vec.push(mutator, *x); } } } } #[inline] pub fn remove(&mut self, idx: usize) -> char { let ch = match self[idx..].chars().next() { Some(ch) => ch, None => panic!("cannot remove a char from the end of a string"), }; let next = idx + ch.len_utf8(); let len = self.len(); unsafe { std::ptr::copy( self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next, ); self.vec.set_len(len - (next - idx)); } ch } #[inline] pub fn insert(&mut self, mutator: &mut MutatorRef<H>, idx: usize, ch: char) { assert!(self.is_char_boundary(idx)); let mut bits = [0; 4]; let bits = ch.encode_utf8(&mut bits).as_bytes(); unsafe { self.insert_bytes(mutator, idx, bits); } } unsafe fn insert_bytes(&mut self, mutator: &mut MutatorRef<H>, idx: usize, bytes: &[u8]) { let len = self.len(); let amt = bytes.len(); self.vec.reserve(mutator, amt); std::ptr::copy( self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx, ); std::ptr::copy_nonoverlapping(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt); self.vec.set_len(len + amt); } #[inline] pub fn insert_str(&mut self, mutator: &mut MutatorRef<H>, idx: usize, string: &str) { assert!(self.is_char_boundary(idx)); unsafe { self.insert_bytes(mutator, idx, string.as_bytes()); } } #[inline] pub unsafe fn as_mut_vec(&mut self) -> &mut Vector<u8, H> { &mut self.vec } #[inline] #[must_use] pub fn is_empty(&self) -> bool { self.len() == 0 } #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] pub fn split_off(&mut self, mutator: &mut MutatorRef<H>, at: usize) -> String<H> { assert!(self.is_char_boundary(at)); let other = self.vec.split_off(mutator, at); unsafe { String::from_utf8_unchecked(other) } } #[inline] pub fn clear(&mut self) { self.vec.clear() } pub fn replace_range<R>(&mut self, mutator: &mut MutatorRef<H>, range: R, replace_with: &str) where R: RangeBounds<usize>, { // Memory safety // // Replace_range does not have the memory safety issues of a vector Splice. // of the vector version. The data is just plain bytes. // WARNING: Inlining this variable would be unsound (#81138) let start = range.start_bound(); match start { std::ops::Bound::Included(&n) => assert!(self.is_char_boundary(n)), std::ops::Bound::Excluded(&n) => assert!(self.is_char_boundary(n + 1)), std::ops::Bound::Unbounded => {} }; // WARNING: Inlining this variable would be unsound (#81138) let end = range.end_bound(); match end { std::ops::Bound::Included(&n) => assert!(self.is_char_boundary(n + 1)), std::ops::Bound::Excluded(&n) => assert!(self.is_char_boundary(n)), std::ops::Bound::Unbounded => {} }; // Using `range` again would be unsound (#81138) // We assume the bounds reported by `range` remain the same, but // an adversarial implementation could change between calls unsafe { self.as_mut_vec() }.splice(mutator, (start, end), replace_with.bytes()); } } unsafe impl<H: GcBase> Trace for String<H> { fn trace(&mut self, vis: &mut dyn crate::api::Visitor) { self.vec.trace(vis); } } unsafe impl<H: GcBase> Finalize for String<H> {} impl<H: GcBase + 'static> Collectable for String<H> {} impl<H: GcBase> Deref for String<H> { type Target = str; fn deref(&self) -> &Self::Target {
impl<H: GcBase> DerefMut for String<H> { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { std::str::from_utf8_unchecked_mut(self.vec.as_slice_mut()) } } } impl<H: GcBase> std::fmt::Debug for String<H> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self.as_str()) } } impl<H: GcBase> std::fmt::Display for String<H> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, " {}", self.as_str()) } } impl<H: GcBase> std::cmp::PartialEq for String<H> { fn eq(&self, other: &Self) -> bool { self.as_str().eq(other.as_str()) } } impl<H: GcBase> Eq for String<H> {} impl<H: GcBase> Hash for String<H> { fn hash<HS: std::hash::Hasher>(&self, state: &mut HS) { self.as_str().hash(state); } } impl<H: GcBase> std::cmp::PartialOrd for String<H> { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { self.as_str().partial_cmp(other.as_str()) } } impl<H: GcBase> std::cmp::Ord for String<H> { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.as_str().cmp(other.as_str()) } } /// Garbage collected immuable string. It is just [str] that is allocated on GC heap. #[repr(C)] pub struct Str { length: usize, data_start: [u8; 0], } impl Str { pub fn new<H: GcBase>(mutator: &mut MutatorRef<H>, from: impl AsRef<str>) -> Gc<Self, H> { let src = from.as_ref(); let mut this = mutator.allocate( Self { length: src.len(), data_start: [], }, crate::gc_base::AllocationSpace::New, ); unsafe { std::ptr::copy_nonoverlapping(src.as_ptr(), this.data_start.as_mut_ptr(), src.len()); } this } pub fn len(&self) -> usize { self.length } pub fn as_str(&self) -> &str { self } pub fn as_mut_str(&mut self) -> &mut str { self } } impl Deref for Str { type Target = str; fn deref(&self) -> &Self::Target { unsafe { std::str::from_utf8_unchecked(std::slice::from_raw_parts( self.data_start.as_ptr(), self.len(), )) } } } impl DerefMut for Str { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { std::str::from_utf8_unchecked_mut(std::slice::from_raw_parts_mut( self.data_start.as_mut_ptr(), self.len(), )) } } } unsafe impl Trace for Str {} unsafe impl Finalize for Str {} impl Collectable for Str { fn allocation_size(&self) -> usize { size_of::<Self>() + self.length } } impl Hash for Str { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.as_str().hash(state); } } impl Eq for Str {} impl PartialEq for Str { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl std::fmt::Display for Str { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.as_str()) } } impl std::fmt::Debug for Str { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl PartialEq<str> for Str { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl<Heap: GcBase> PartialEq<str> for String<Heap> { fn eq(&self, other: &str) -> bool { self.as_str() == other } }
unsafe { std::str::from_utf8_unchecked(self.vec.as_slice()) } } }
withPlugins.js
import React, { useContext } from 'react'; import curry from 'lodash/curry'; import isEmpty from 'lodash/isEmpty'; import PluginContext from './PluginContext'; /** withPlugins should be the innermost HOC */ function
(targetName, TargetComponent) { // eslint-disable-line no-underscore-dangle /** */ function PluginHoc(props, ref) { const pluginMap = useContext(PluginContext); const passDownProps = { ...props, }; if (ref) passDownProps.ref = ref; if (isEmpty(pluginMap)) { return <TargetComponent {...passDownProps} />; } const plugins = pluginMap[targetName]; if (isEmpty(plugins)) { return <TargetComponent {...passDownProps} />; } if (!isEmpty(plugins.wrap) && !isEmpty(plugins.add)) { const WrapPluginComponent = plugins.wrap[0].component; const AddPluginComponents = plugins.add.map(plugin => plugin.component); return ( <WrapPluginComponent targetProps={passDownProps} {...passDownProps} PluginComponents={AddPluginComponents} TargetComponent={TargetComponent} /> ); } if (!isEmpty(plugins.wrap)) { const PluginComponent = plugins.wrap[0].component; return <PluginComponent targetProps={passDownProps} TargetComponent={TargetComponent} />; } if (!isEmpty(plugins.add)) { const PluginComponents = plugins.add.map(plugin => plugin.component); return <TargetComponent {...passDownProps} PluginComponents={PluginComponents} />; } } const whatever = React.forwardRef(PluginHoc); whatever.displayName = `WithPlugins(${targetName})`; return whatever; } /** withPlugins('MyComponent')(MyComponent) */ export const withPlugins = curry(_withPlugins);
_withPlugins
profile.ts
import { Injectable } from '@angular/core'; import { TranslateService } from '@ngx-translate/core'; import { Events } from 'ionic-angular'; import * as _ from 'lodash'; import { Observable } from 'rxjs'; // providers import { DerivationPathHelperProvider } from '../../providers/derivation-path-helper/derivation-path-helper'; import { ActionSheetProvider } from '../action-sheet/action-sheet'; import { AppProvider } from '../app/app'; import { BwcErrorProvider } from '../bwc-error/bwc-error'; import { BwcProvider } from '../bwc/bwc'; import { ConfigProvider } from '../config/config'; import { CurrencyProvider } from '../currency/currency'; import { KeyProvider } from '../key/key'; import { LanguageProvider } from '../language/language'; import { Logger } from '../logger/logger'; import { OnGoingProcessProvider } from '../on-going-process/on-going-process'; import { PersistenceProvider } from '../persistence/persistence'; import { PlatformProvider } from '../platform/platform'; import { PopupProvider } from '../popup/popup'; import { ReplaceParametersProvider } from '../replace-parameters/replace-parameters'; import { TxFormatProvider } from '../tx-format/tx-format'; import { WalletOptions } from '../wallet/wallet'; // models import { Profile } from '../../models/profile/profile.model'; interface WalletGroups { [keyId: string]: { name?: string; needsBackup?: boolean; order?: number; isPrivKeyEncrypted?: boolean; canSign?: boolean; isDeletedSeed?: boolean; }; } @Injectable() export class ProfileProvider { public walletsGroups: WalletGroups = {}; // TODO walletGroups Class public wallet: any = {}; public profile: Profile; public UPDATE_PERIOD = 15; public UPDATE_PERIOD_FAST = 5; private throttledBwsEvent; private validationLock: boolean = false; private errors = this.bwcProvider.getErrors(); constructor( private currencyProvider: CurrencyProvider, private logger: Logger, private persistenceProvider: PersistenceProvider, private configProvider: ConfigProvider, private replaceParametersProvider: ReplaceParametersProvider, private bwcProvider: BwcProvider, private bwcErrorProvider: BwcErrorProvider, private platformProvider: PlatformProvider, private appProvider: AppProvider, private languageProvider: LanguageProvider, private events: Events, private popupProvider: PopupProvider, private onGoingProcessProvider: OnGoingProcessProvider, private translate: TranslateService, private txFormatProvider: TxFormatProvider, private actionSheetProvider: ActionSheetProvider, private keyProvider: KeyProvider, private derivationPathHelperProvider: DerivationPathHelperProvider ) { this.throttledBwsEvent = _.throttle((n, wallet) => { this.newBwsEvent(n, wallet); }, 10000); } private updateWalletFromConfig(wallet): void { const config = this.configProvider.get(); const defaults = this.configProvider.getDefaults(); // this.config.whenAvailable( (config) => { TODO wallet.usingCustomBWS = config.bwsFor && config.bwsFor[wallet.id] && config.bwsFor[wallet.id] != defaults.bws.url; wallet.name = (config.aliasFor && config.aliasFor[wallet.id]) || wallet.credentials.walletName; wallet.email = config.emailFor && config.emailFor[wallet.id]; // }); } public setWalletOrder(walletId: string, index: number): void { this.persistenceProvider.setWalletOrder(walletId, index).then(() => { this.logger.debug( 'Wallet new order stored for ' + walletId + ': ' + index ); }); if (this.wallet[walletId]) this.wallet[walletId]['order'] = index; } public setWalletGroupName(keyId: string, name: string): void { this.persistenceProvider.setWalletGroupName(keyId, name); if (this.walletsGroups[keyId]) this.walletsGroups[keyId].name = name; } public async getWalletGroupName(keyId: string) { const name = await this.persistenceProvider.getWalletGroupName(keyId); return name; } private async getWalletOrder(walletId: string) { const order = await this.persistenceProvider.getWalletOrder(walletId); return order; } public setBackupGroupFlag( keyId: string, timestamp?: string, migrating?: boolean ): void { if (!keyId) return; this.persistenceProvider.setBackupGroupFlag(keyId, timestamp); this.logger.debug('Backup flag stored'); if (!migrating) this.walletsGroups[keyId].needsBackup = false; } public setWalletBackup(walletId: string): void { this.wallet[walletId].needsBackup = false; } private requiresGroupBackup(keyId: string) { let k = this.keyProvider.getKey(keyId); if (!k) return false; if (!k.mnemonic && !k.mnemonicEncrypted) return false; return true; } private requiresBackup(wallet) { let k = this.keyProvider.getKey(wallet.credentials.keyId); if (!k) return false; if (!k.mnemonic && !k.mnemonicEncrypted) return false; if (wallet.credentials.network == 'testnet') return false; return true; } private getBackupInfo(wallet): Promise<any> { if (!this.requiresBackup(wallet)) { return Promise.resolve({ needsBackup: false }); } return this.persistenceProvider .getBackupFlag(wallet.credentials.walletId) .then(timestamp => { if (timestamp) { return Promise.resolve({ needsBackup: false, timestamp }); } return Promise.resolve({ needsBackup: true }); }) .catch(err => { this.logger.error(err); }); } private getBackupGroupInfo(keyId, wallet?): Promise<any> { if (!this.requiresGroupBackup(keyId)) { return Promise.resolve({ needsBackup: false }); } return this.persistenceProvider .getBackupGroupFlag(keyId) .then(async timestamp => { if (timestamp) { return Promise.resolve({ needsBackup: false, timestamp }); } else { const backupInfo = await this.getBackupInfo(wallet); if (backupInfo && !backupInfo.needsBackup) { this.setBackupGroupFlag(keyId, backupInfo.timestamp, true); return Promise.resolve({ needsBackup: false, timestamp: backupInfo.timestamp }); } } return Promise.resolve({ needsBackup: true }); }) .catch(err => { this.logger.error(err); }); } private isBalanceHidden(wallet): Promise<boolean> { return new Promise(resolve => { this.persistenceProvider .getHideBalanceFlag(wallet.credentials.walletId) .then(shouldHideBalance => { const isHidden = shouldHideBalance && shouldHideBalance.toString() == 'true' ? true : false; return resolve(isHidden); }) .catch(err => { this.logger.error(err); }); }); } private isWalletHidden(wallet): Promise<boolean> { return new Promise(resolve => { this.persistenceProvider .getHideWalletFlag(wallet.credentials.walletId) .then(shouldHideWallet => { const isHidden = shouldHideWallet && shouldHideWallet.toString() == 'true' ? true : false; return resolve(isHidden); }) .catch(err => { this.logger.error(err); }); }); } private async bindWalletClient(wallet): Promise<boolean> { const walletId = wallet.credentials.walletId; let keyId = wallet.credentials.keyId; if (this.wallet[walletId] && this.wallet[walletId].started) { this.logger.info('This wallet has been initialized. Skip. ' + walletId); return Promise.resolve(false); } // Workaround to avoid wrong order relatad to async functions if (keyId) this.walletsGroups[keyId] = {}; this.wallet[walletId] = {}; // INIT WALLET VIEWMODEL wallet.id = walletId; wallet.started = true; wallet.network = wallet.credentials.network; wallet.copayerId = wallet.credentials.copayerId; wallet.m = wallet.credentials.m; wallet.n = wallet.credentials.n; wallet.coin = wallet.credentials.coin; wallet.cachedStatus = {}; wallet.balanceHidden = await this.isBalanceHidden(wallet); wallet.order = await this.getWalletOrder(wallet.id); wallet.hidden = await this.isWalletHidden(wallet); wallet.canSign = keyId ? true : false; wallet.isPrivKeyEncrypted = wallet.canSign ? this.keyProvider.isPrivKeyEncrypted(keyId) : false; wallet.canAddNewAccount = this.checkAccountCreation(wallet, keyId); this.updateWalletFromConfig(wallet); this.wallet[walletId] = wallet; wallet.removeAllListeners(); wallet.on('report', n => { this.logger.info('BWC Report:' + n); }); wallet.on('notification', n => { if (this.platformProvider.isElectron) { this.showDesktopNotifications(n, wallet); } if ( (n.data.network && n.data.network != wallet.network) || (n.data.coin && n.data.coin != wallet.coin) ) return; // TODO many NewBlocks notifications...if many blocks if (n.type == 'NewBlock' && n.data.network == 'testnet') { this.throttledBwsEvent(n, wallet); } else { this.newBwsEvent(n, wallet); } }); wallet.on('walletCompleted', () => { this.logger.debug('Wallet completed'); this.updateCredentials(JSON.parse(wallet.toString())); this.events.publish('Local/WalletListChange'); this.events.publish('Local/WalletUpdate', { walletId: wallet.id }); }); wallet.initialize( { notificationIncludeOwn: true }, err => { if (err) { this.logger.error('Could not init notifications err:', err); return; } wallet.setNotificationsInterval(this.UPDATE_PERIOD); wallet.openWallet(() => {}); } ); this.events.subscribe('Local/ConfigUpdate', opts => { this.logger.debug('Local/ConfigUpdate handler @profile', opts); if (opts.walletId && opts.walletId == wallet.id) { this.logger.debug('Updating wallet from config ' + wallet.id); this.updateWalletFromConfig(wallet); } }); // INIT WALLET GROUP VIEWMODEL let groupBackupInfo, needsBackup, order, name, isPrivKeyEncrypted, canSign, isDeletedSeed; if (keyId) { groupBackupInfo = await this.getBackupGroupInfo(keyId, wallet); needsBackup = groupBackupInfo.needsBackup; isPrivKeyEncrypted = this.keyProvider.isPrivKeyEncrypted(keyId); canSign = true; isDeletedSeed = this.keyProvider.isDeletedSeed(keyId); name = await this.getWalletGroupName(keyId); if (!name) { let walletsGroups = _.cloneDeep(this.walletsGroups); delete walletsGroups['read-only']; // use wallets name for wallets group name at migration name = `Key ${Object.keys(walletsGroups).indexOf(keyId) + 1}`; } } else { keyId = 'read-only'; needsBackup = false; name = 'Read Only Wallets'; isPrivKeyEncrypted = false; canSign = false; isDeletedSeed = true; } wallet.needsBackup = needsBackup; wallet.keyId = keyId; wallet.walletGroupName = name; this.walletsGroups[keyId] = { order, name, isPrivKeyEncrypted, needsBackup, canSign, isDeletedSeed }; let date; if (groupBackupInfo && groupBackupInfo.timestamp) date = new Date(Number(groupBackupInfo.timestamp)); this.logger.info( `Binding wallet: ${wallet.id} - Backed up: ${!needsBackup} ${ date ? date : '' } - Encrypted: ${wallet.isPrivKeyEncrypted}` ); return Promise.resolve(true); } public checkAccountCreation(wallet, keyId: string): boolean { /* Allow account creation only for wallets: wallet n=1 : BIP44 - P2PKH - BTC o BCH only if it is 145' wallet n>1 : BIP48 - P2SH - BTC o BCH only if it is 145' wallet n=1 : BIP44 - P2SH - ETH only if it is 60' key : !use44forMultisig - !use0forBCH - compliantDerivation - !BIP45 */ const key = this.keyProvider.getKey(keyId); if (!wallet) { return false; } else if (!key) { return false; } else if ( key.use44forMultisig || key.use0forBCH || key.BIP45 || key.compliantDerivation === false ) { return false; } else { const derivationStrategy = this.derivationPathHelperProvider.getDerivationStrategy( wallet.credentials.rootPath ); const coinCode = this.derivationPathHelperProvider.parsePath( wallet.credentials.rootPath ).coinCode; if ( wallet.n == 1 && wallet.credentials.addressType == 'P2PKH' && derivationStrategy == 'BIP44' && (wallet.coin == 'btc' || (wallet.coin == 'bch' && coinCode == "145'")) ) { return true; } if ( wallet.n > 1 && wallet.credentials.addressType == 'P2SH' && derivationStrategy == 'BIP48' && (wallet.coin == 'btc' || (wallet.coin == 'bch' && coinCode == "145'")) ) { return true; } if ( wallet.n == 1 && wallet.credentials.addressType == 'P2PKH' && derivationStrategy == 'BIP44' && (wallet.coin == 'eth' && coinCode == "60'") ) { return true; } return false; } } public setFastRefresh(wallet): void { this.logger.debug(`Wallet ${wallet.id} set to fast refresh`); wallet.setNotificationsInterval(this.UPDATE_PERIOD_FAST); } public setSlowRefresh(wallet): void { this.logger.debug(`Wallet ${wallet.id} back to slow refresh`); wallet.setNotificationsInterval(this.UPDATE_PERIOD); } private showDesktopNotifications(n, wallet): void { if (!this.configProvider.get().desktopNotificationsEnabled) return; const creatorId = n && n.data && n.data.creatorId; const amount = n && n.data && n.data.amount; const walletName = wallet.name; let title: string; let body: string; let translatedMsg: string; switch (n.type) { case 'NewCopayer': if (wallet.copayerId != creatorId) { title = this.translate.instant('New copayer'); translatedMsg = this.translate.instant( 'A new copayer just joined your wallet {{walletName}}.' ); body = this.replaceParametersProvider.replace(translatedMsg, { walletName }); } break; case 'WalletComplete': title = this.translate.instant('Wallet complete'); translatedMsg = this.translate.instant( 'Your wallet {{walletName}} is complete.' ); body = this.replaceParametersProvider.replace(translatedMsg, { walletName }); break; case 'NewTxProposal': if (wallet && wallet.m > 1 && wallet.copayerId != creatorId) { title = this.translate.instant('New payment proposal'); translatedMsg = this.translate.instant( 'A new payment proposal has been created in your wallet {{walletName}}.' ); body = this.replaceParametersProvider.replace(translatedMsg, { walletName }); } break; case 'NewIncomingTx': title = this.translate.instant('New payment received'); const amountStr = this.txFormatProvider.formatAmountStr( wallet.coin, amount ); translatedMsg = this.translate.instant( 'A payment of {{amountStr}} has been received into your wallet {{walletName}}.' ); body = this.replaceParametersProvider.replace(translatedMsg, { amountStr, walletName }); break; case 'TxProposalFinallyRejected': title = this.translate.instant('Payment proposal rejected'); translatedMsg = this.translate.instant( 'A payment proposal in your wallet {{walletName}} has been rejected.' ); body = this.replaceParametersProvider.replace(translatedMsg, { walletName }); break; case 'TxConfirmation': title = this.translate.instant('Transaction confirmed'); translatedMsg = this.translate.instant( 'The transaction from {{walletName}} that you were waiting for has been confirmed.' ); body = this.replaceParametersProvider.replace(translatedMsg, { walletName }); break; } if (!body) return; const OS = this.platformProvider.getOS(); if (OS && OS.OSName === 'MacOS') this.showOsNotifications(title, body); else this.showInAppNotification(title, body); } private async showInAppNotification(title: string, body: string) { const infoSheet = this.actionSheetProvider.createInfoSheet( 'in-app-notification', { title, body } ); await infoSheet.present(); await Observable.timer(7000).toPromise(); infoSheet.dismiss(); } private showOsNotifications(title: string, body: string): void { const { ipcRenderer } = (window as any).require('electron'); ipcRenderer.send('new-notification', { title, body }); } private newBwsEvent(n, wallet): void { this.events.publish('bwsEvent', wallet.id, n.type, n); } public updateCredentials(credentials): void { this.profile.updateWallet(credentials); this.storeProfileIfDirty(); } private runValidation(wallet, delay?: number, retryDelay?: number) { delay = delay ? delay : 500; retryDelay = retryDelay ? retryDelay : 50; if (this.validationLock) { return setTimeout(() => { return this.runValidation(wallet, delay, retryDelay); }, retryDelay); } this.validationLock = true; // IOS devices are already checked const skipDeviceValidation = this.platformProvider.isIOS || this.profile.isDeviceChecked(this.platformProvider.ua); const walletId = wallet.credentials.walletId; this.logger.debug( `ValidatingWallet: ${walletId} skip Device: ${skipDeviceValidation}` ); setTimeout(() => { wallet.validateKeyDerivation( { skipDeviceValidation }, (_, isOK) => { this.validationLock = false; this.logger.debug(`ValidatingWallet End: ${walletId} isOK: ${isOK}`); if (isOK) { this.profile.setChecked(this.platformProvider.ua, walletId); } else { this.logger.warn(`Key Derivation failed for wallet: ${walletId}`); this.persistenceProvider.clearLastAddress(walletId); } this.storeProfileIfDirty(); } ); }, delay); } public storeProfileLegacy(oldProfile) { this.persistenceProvider .storeProfileLegacy(oldProfile) .then(() => { this.logger.debug('Saved legacy Profile'); }) .catch(err => { this.logger.error('Could not save legacy Profile', err); }); } public storeProfileIfDirty(): Promise<any> { if (!this.profile.dirty) { return Promise.resolve(); } return this.persistenceProvider .storeProfile(this.profile) .then(() => { this.logger.debug('Saved modified Profile (Dirty)'); return Promise.resolve(); }) .catch(err => { this.logger.error('Could not save Profile (Dirty)', err); return Promise.reject(err); }); } private askToEncryptKey(key, addingNewWallet?: boolean): Promise<any> { if (!key) return Promise.resolve(); if (key.isPrivKeyEncrypted()) return Promise.resolve(); if (addingNewWallet && !key.isPrivKeyEncrypted()) return Promise.resolve(); const title = this.translate.instant( 'Would you like to protect this wallet with a password?' ); const message = this.translate.instant( 'Encryption can protect your funds if this device is stolen or compromised by malicious software.' ); const okText = this.translate.instant('Yes'); const cancelText = this.translate.instant('No'); return this.popupProvider .ionicConfirm(title, message, okText, cancelText) .then(res => { if (!res) { return this.keyProvider.showWarningNoEncrypt().then(res => { if (res) return Promise.resolve(); return this.keyProvider.encryptNewKey(key); }); } return this.keyProvider.encryptNewKey(key); }); } private addAndBindWalletClients(data, opts): Promise<any> { // Encrypt wallet this.onGoingProcessProvider.pause(); return this.askToEncryptKey(data.key).then(() => { this.onGoingProcessProvider.resume(); const promises = []; data.walletClients.forEach(walletClient => { promises.push(this.addAndBindWalletClient(walletClient, opts)); }); return this.keyProvider.addKey(data.key).then(() => { return Promise.all(promises) .then(walletClients => { return this.storeProfileIfDirty().then(() => { this.events.publish('Local/WalletListChange'); return this.checkIfAlreadyExist(walletClients).then(() => { return Promise.resolve(_.compact(walletClients)); }); }); }) .catch(() => { return Promise.reject('failed to bind wallets'); }); }); }); } private checkIfAlreadyExist(walletClients: any[]): Promise<any> { return new Promise(resolve => { const countInArray = _.filter(walletClients, item => item == undefined) .length; if (countInArray > 0) { const msg1 = this.replaceParametersProvider.replace( this.translate.instant('The wallet is already in the app'), { nameCase: this.appProvider.info.nameCase } ); const msg2 = this.replaceParametersProvider.replace( this.translate.instant( '{{countInArray}} of your wallets already exist in {{nameCase}}' ), { countInArray, nameCase: this.appProvider.info.nameCase } ); const msg = countInArray == 1 ? msg1 : msg2; const title = this.translate.instant('Error'); const infoSheet = this.actionSheetProvider.createInfoSheet( 'default-error', { msg, title } ); infoSheet.present(); infoSheet.onDidDismiss(() => { return resolve(); }); } else { return resolve(); } }); } // Adds and bind a new client to the profile private async addAndBindWalletClient(wallet, opts): Promise<any> { if (!wallet || !wallet.credentials) { return Promise.reject(this.translate.instant('Could not access wallet')); } const walletId: string = wallet.credentials.walletId; if (!this.profile.addWallet(JSON.parse(wallet.toString()))) { return Promise.resolve(); } const skipKeyValidation: boolean = this.shouldSkipValidation(walletId); if (!skipKeyValidation) { this.logger.debug('Trying to runValidation: ' + walletId); this.runValidation(wallet); } this.saveBwsUrl(walletId, opts); return this.bindWalletClient(wallet).then(() => { return Promise.resolve(wallet); }); } private saveBwsUrl(walletId, opts): void { const defaults = this.configProvider.getDefaults(); const bwsFor = {}; bwsFor[walletId] = opts.bwsurl || defaults.bws.url; // Dont save the default if (bwsFor[walletId] == defaults.bws.url) { return; } this.configProvider.set({ bwsFor }); } private shouldSkipValidation(walletId: string): boolean { return ( this.profile.isChecked(this.platformProvider.ua, walletId) || this.platformProvider.isIOS ); } private setMetaData(wallet, addressBook): Promise<any> { return new Promise((resolve, reject) => { this.persistenceProvider .getAddressBook(wallet.credentials.network) .then(localAddressBook => { try { localAddressBook = JSON.parse(localAddressBook); } catch (ex) {
} const mergeAddressBook = _.merge(addressBook, localAddressBook); this.persistenceProvider .setAddressBook( wallet.credentials.network, JSON.stringify(mergeAddressBook) ) .then(() => { return resolve(); }) .catch(err => { return reject(err); }); }) .catch(err => { return reject(err); }); }); } public importExtendedPrivateKey(xPrivKey: string, opts): Promise<any> { this.logger.info('Importing Wallet xPrivKey'); opts.xPrivKey = xPrivKey; return this.serverAssistedImport(opts).then(data => { return this.addAndBindWalletClients(data, { bwsurl: opts.bwsurl }); }); } public importMnemonic(words, opts): Promise<any> { this.logger.info('Importing Wallets Mnemonic'); words = this.normalizeMnemonic(words); opts.words = words; return this.serverAssistedImport(opts).then(data => { return this.addAndBindWalletClients(data, { bwsurl: opts.bwsurl }); }); } public importFile(str: string, opts): Promise<any> { return this._importFile(str, opts).then(data => { this.onGoingProcessProvider.pause(); return this.askToEncryptKey(data.key).then(() => { this.onGoingProcessProvider.resume(); return this.keyProvider.addKey(data.key).then(() => { return this.addAndBindWalletClient(data.walletClient, { bwsurl: opts.bwsurl }).then(walletClient => { return this.storeProfileIfDirty().then(() => { this.events.publish('Local/WalletListChange'); return this.checkIfAlreadyExist([].concat(walletClient)).then( () => { return Promise.resolve(walletClient); } ); }); }); }); }); }); } private _importFile(str: string, opts): Promise<any> { return new Promise((resolve, reject) => { opts = opts ? opts : {}; opts['bp_partner'] = this.appProvider.info.name; opts['bp_partner_version'] = this.appProvider.info.version; this.logger.info('Importing Wallet:', opts); const client = this.bwcProvider.getClient(null, opts); let credentials; let key; let addressBook; const Key = this.bwcProvider.getKey(); const data = JSON.parse(str); if (data.credentials) { try { credentials = data.credentials; if (data.key) { key = Key.fromObj(data.key); } addressBook = data.addressBook; } catch (err) { this.logger.error(err); return reject( this.translate.instant('Could not import. Check input file.') ); } } else { // old format ? root = credentials. try { // needs to migrate? if (data.xPrivKey && data.xPrivKeyEncrypted) { this.logger.warn( 'Found both encrypted and decrypted key. Deleting the encrypted version' ); delete data.xPrivKeyEncrypted; delete data.mnemonicEncrypted; } let migrated = this.bwcProvider.upgradeCredentialsV1(data); credentials = migrated.credentials; key = migrated.key; addressBook = data.addressBook ? data.addressBook : {}; } catch (error) { this.logger.error(error); return reject( this.translate.instant('Could not import. Check input file.') ); } } if (!credentials.n) { return reject( 'Backup format not recognized. If you are using a Copay Beta backup and version is older than 0.10, please see: https://github.com/bitpay/copay/issues/4730#issuecomment-244522614' ); } client.fromString(JSON.stringify(credentials)); if (key) { this.logger.info(`Wallet ${credentials.walletId} key's extracted`); } else { this.logger.info(`READ-ONLY Wallet ${credentials.walletId} migrated`); } this.setMetaData(client, addressBook).catch(err => { this.logger.warn('Could not set meta data: ', err); }); return resolve({ key, walletClient: client }); }); } // opts.words opts.xPrivKey private serverAssistedImport(opts): Promise<any> { return new Promise((resolve, reject) => { this.bwcProvider.Client.serverAssistedImport( opts, { baseUrl: opts.bwsurl // clientOpts }, (err, key, walletClients) => { if (err) { return reject(err); } if (walletClients.length === 0) { return reject('WALLET_DOES_NOT_EXIST'); } else { return resolve({ key, walletClients }); } } ); }); } public normalizeMnemonic(words: string): string { if (!words || !words.indexOf) return words; // \u3000: A space of non-variable width: used in Chinese, Japanese, Korean const isJA = words.indexOf('\u3000') > -1; const wordList = words .trim() .toLowerCase() .split(/[\u3000\s]+/); return wordList.join(isJA ? '\u3000' : ' '); } public createProfile(): void { this.logger.info('Creating profile'); this.profile = Profile.create(); this.persistenceProvider.storeNewProfile(this.profile); } private bindProfile(profile): Promise<any> { const bindWallets = (): Promise<any> => { const profileLength = profile.credentials.length; if (!profileLength) { return Promise.resolve(); } const promises = []; return this.upgradeMultipleCredentials(profile).then(() => { _.each(profile.credentials, credentials => { promises.push(this.bindWallet(credentials)); }); return Promise.all(promises).then(() => { this.logger.info(`Bound ${profileLength} wallets`); return Promise.resolve(); }); }); }; return bindWallets().then(() => { return this.isDisclaimerAccepted().catch(() => { return Promise.reject(new Error('NONAGREEDDISCLAIMER')); }); }); } private upgradeMultipleCredentials(profile): Promise<any> { const oldProfile = _.clone(profile); const migrated = this.bwcProvider.upgradeMultipleCredentialsV1( profile.credentials ); const newKeys = migrated.keys; const newCrededentials = migrated.credentials; if (newKeys.length > 0) { this.logger.info(`Storing ${newKeys.length} migrated Keys`); this.storeProfileLegacy(oldProfile); return this.keyProvider.addKeys(newKeys).then(() => { profile.credentials = newCrededentials; profile.dirty = true; return this.storeProfileIfDirty(); }); } else { if (newCrededentials.length > 0) { // Only RO wallets. this.storeProfileLegacy(oldProfile); profile.credentials = newCrededentials; profile.dirty = true; return this.storeProfileIfDirty(); } return Promise.resolve(); } } public isDisclaimerAccepted(): Promise<any> { return new Promise((resolve, reject) => { const disclaimerAccepted = this.profile && this.profile.disclaimerAccepted; if (disclaimerAccepted) return resolve(); // OLD flag this.persistenceProvider.getCopayDisclaimerFlag().then(val => { if (val) { this.profile.disclaimerAccepted = true; return resolve(); } else { return reject(); } }); }); } private bindWallet(credentials): Promise<any> { if (!credentials.walletId || !credentials.m) { return Promise.reject( new Error('bindWallet should receive credentials JSON') ); } // Create the client const getBWSURL = (walletId: string) => { const config = this.configProvider.get(); const defaults = this.configProvider.getDefaults(); return (config.bwsFor && config.bwsFor[walletId]) || defaults.bws.url; }; const walletClient = this.bwcProvider.getClient( JSON.stringify(credentials), { bwsurl: getBWSURL(credentials.walletId), bp_partner: this.appProvider.info.name, bp_partner_version: this.appProvider.info.version } ); const skipKeyValidation = this.shouldSkipValidation(credentials.walletId); if (!skipKeyValidation) { this.logger.debug('Trying to runValidation: ' + credentials.walletId); this.runValidation(walletClient, 500); } return this.bindWalletClient(walletClient); } public getProfileLegacy(): Promise<any> { return this.persistenceProvider.getProfileLegacy().catch(err => { this.logger.info('Error getting old Profile', err); }); } public removeProfileLegacy(): Promise<any> { return this.persistenceProvider.removeProfileLegacy().catch(err => { this.logger.info('Error getting old Profile', err); }); } public loadAndBindProfile(): Promise<any> { return new Promise((resolve, reject) => { this.persistenceProvider .getProfile() .then(profile => { if (!profile) { return resolve(); } this.profile = Profile.fromObj(profile); // Deprecated: storageService.tryToMigrate this.logger.info('Profile loaded'); this.bindProfile(this.profile) .then(() => { return resolve(this.profile); }) .catch(err => { return reject(err); }); }) .catch(err => { return reject(err); }); }); } public importWithDerivationPath(opts): Promise<any> { return new Promise((resolve, reject) => { this.logger.info('Importing Wallet with derivation path'); this._importWithDerivationPath(opts).then(data => { // Check if wallet exists data.walletClient.openWallet(err => { if (err) { if (err.message.indexOf('not found') > 0) { err = 'WALLET_DOES_NOT_EXIST'; } return reject(err); } this.keyProvider.addKey(data.key).then(() => { this.addAndBindWalletClient(data.walletClient, { bwsurl: opts.bwsurl }) .then(walletClient => { return this.storeProfileIfDirty().then(() => { this.events.publish('Local/WalletListChange'); this.checkIfAlreadyExist([].concat(walletClient)).then(() => { return resolve(walletClient); }); }); }) .catch(err => { return reject(err); }); }); }); }); }); } private _importWithDerivationPath(opts): Promise<any> { const showOpts = _.clone(opts); if (showOpts.extendedPrivateKey) showOpts.extendedPrivateKey = '[hidden]'; if (showOpts.mnemonic) showOpts.mnemonic = '[hidden]'; this.logger.debug('Importing Wallet:', JSON.stringify(showOpts)); return this.seedWallet(opts); } private seedWallet(opts?): Promise<any> { return new Promise((resolve, reject) => { opts = opts ? opts : {}; opts['bp_partner'] = this.appProvider.info.name; opts['bp_partner_version'] = this.appProvider.info.version; const walletClient = this.bwcProvider.getClient(null, opts); const network = opts.networkName || 'livenet'; const Key = this.bwcProvider.getKey(); let key; if (opts.mnemonic) { try { opts.mnemonic = this.normalizeMnemonic(opts.mnemonic); key = Key.fromMnemonic(opts.mnemonic, { useLegacyCoinType: opts.useLegacyCoinType, useLegacyPurpose: opts.useLegacyPurpose, passphrase: opts.passphrase }); walletClient.fromString( key.createCredentials(opts.password, { coin: opts.coin, network, account: opts.account || 0, addressType: opts.addressType, n: opts.n || 1 }) ); } catch (ex) { this.logger.info('Invalid wallet recovery phrase: ', ex); return reject( this.translate.instant( 'Could not create: Invalid wallet recovery phrase' ) ); } } else if (opts.extendedPrivateKey) { try { key = Key.fromExtendedPrivateKey(opts.extendedPrivateKey, { useLegacyCoinType: opts.useLegacyCoinType, useLegacyPurpose: opts.useLegacyPurpose }); walletClient.fromString( key.createCredentials(null, { coin: opts.coin, network, account: opts.account || 0, n: opts.n || 1 }) ); } catch (ex) { this.logger.warn( 'Could not get seed from Extended Private Key: ', ex ); return reject( this.translate.instant( 'Could not create using the specified extended private key' ) ); } } else { const lang = this.languageProvider.getCurrent(); try { if (!opts.keyId) { key = Key.create({ lang }); } else { key = this.keyProvider.getKey(opts.keyId); } walletClient.fromString( key.createCredentials(opts.password, { coin: opts.coin, network, account: opts.account || 0, n: opts.n || 1 }) ); } catch (e) { this.logger.info('Error creating recovery phrase: ' + e.message); if (e.message.indexOf('language') > 0) { this.logger.info('Using default language for recovery phrase'); key = Key.create({}); walletClient.fromString( key.createCredentials(opts.password, { coin: opts.coin, network, account: opts.account || 0, n: opts.n || 1 }) ); } else { return reject(e); } } } return resolve({ walletClient, key }); }); } // Creates a wallet on BWC/BWS and store it private _createWallet(opts): Promise<any> { return new Promise((resolve, reject) => { const showOpts = _.clone(opts); if (showOpts.extendedPrivateKey) showOpts.extendedPrivateKey = '[hidden]'; if (showOpts.mnemonic) showOpts.mnemonic = '[hidden]'; this.logger.debug('Creating Wallet:', JSON.stringify(showOpts)); setTimeout(() => { this.seedWallet(opts) .then(data => { const coin = `[${opts.coin.toUpperCase()}]`; const name = opts.name || `${this.translate.instant('Personal Wallet')} ${coin}`; const myName = opts.myName || this.translate.instant('me'); data.walletClient.createWallet( name, myName, opts.m, opts.n, { network: opts.networkName, singleAddress: opts.singleAddress, walletPrivKey: opts.walletPrivKey, coin: opts.coin }, err => { const copayerRegistered = err instanceof this.errors.COPAYER_REGISTERED; const isSetSeed = opts.mnemonic || opts.extendedPrivateKey; if (err && (!copayerRegistered || isSetSeed)) { const msg = this.bwcErrorProvider.msg( err, this.translate.instant('Error creating wallet') ); return reject(msg); } else if (copayerRegistered) { // try with account + 1 opts.account = opts.account ? opts.account + 1 : 1; if (opts.account === 20) return reject( this.translate.instant( 'You reach the limit of twenty wallets from the same coin and network' ) ); return resolve(this._createWallet(opts)); } else { return resolve(data); } } ); }) .catch(err => { return reject(err); }); }, 50); }); } // joins and stores a wallet private _joinWallet(opts): Promise<any> { return new Promise((resolve, reject) => { this.logger.info('Joining Wallet...'); let walletData; try { walletData = this.bwcProvider.parseSecret(opts.secret); // check if exist if ( _.find(this.profile.credentials, { walletId: walletData.walletId }) ) { return reject( this.translate.instant('Cannot join the same wallet more that once') ); } } catch (ex) { this.logger.error(ex); return reject(this.translate.instant('Bad wallet invitation')); } opts.networkName = walletData.network; /* TODO: opts.n is just used to determinate if the wallet is multisig (m/48'/xx) or single sig (m/44') we should change the name to 'isMultisig' */ opts.n = 2; this.logger.debug('Joining Wallet:', opts); this.seedWallet(opts) .then(data => { data.walletClient.joinWallet( opts.secret, opts.myName || 'me', { coin: opts.coin }, err => { if (err) { if (err instanceof this.errors.COPAYER_REGISTERED) { // try with account + 1 opts.account = opts.account ? opts.account + 1 : 1; if (opts.account === 20) return reject( this.translate.instant( 'You reach the limit of twenty wallets from the same coin and network' ) ); return resolve(this._joinWallet(opts)); } else { const msg = this.bwcErrorProvider.msg( err, this.translate.instant('Could not join wallet') ); return reject(msg); } } return resolve(data); } ); }) .catch(err => { return reject(err); }); }); } public getWallet(walletId: string) { return this.wallet[walletId]; } public getWalletGroup(keyId) { keyId = keyId ? keyId : 'read-only'; return this.walletsGroups[keyId]; } public deleteWalletClient(wallet): Promise<any> { this.logger.info('Deleting Wallet:', wallet.credentials.walletName); const walletId = wallet.credentials.walletId; wallet.removeAllListeners(); this.profile.deleteWallet(walletId); delete this.wallet[walletId]; this.persistenceProvider.removeAllWalletData(walletId); this.events.publish('Local/WalletListChange'); return this.storeProfileIfDirty(); } public deleteWalletGroup(keyId: string, wallets): Promise<any> { let promises = []; wallets.forEach(wallet => { promises.push(this.deleteWalletClient(wallet)); }); return Promise.all(promises).then(() => { this.persistenceProvider.removeAllWalletGroupData(keyId); return Promise.resolve(); }); } public createDefaultWallet(addingNewWallet: boolean, opts): Promise<any> { const defaults = this.configProvider.getDefaults(); const defaultOpts: Partial<WalletOptions> = { keyId: opts.keyId, name: this.currencyProvider.getCoinName(opts.coin), m: 1, n: 1, myName: null, networkName: 'livenet', bwsurl: defaults.bws.url, singleAddress: opts.singleAddress || false, coin: opts.coin }; return this.createWallet(addingNewWallet, defaultOpts); } public createWallet(addingNewWallet: boolean, opts): Promise<any> { return this.keyProvider.handleEncryptedWallet(opts.keyId).then(password => { opts.password = password; return this._createWallet(opts).then(data => { // Encrypt wallet this.onGoingProcessProvider.pause(); return this.askToEncryptKey(data.key, addingNewWallet).then(() => { this.onGoingProcessProvider.resume(); return this.keyProvider.addKey(data.key).then(() => { return this.addAndBindWalletClient(data.walletClient, { bwsurl: opts.bwsurl }).then(walletClient => { return this.storeProfileIfDirty().then(() => { this.events.publish('Local/WalletListChange'); return Promise.resolve(walletClient); }); }); }); }); }); }); } public joinWallet(addingNewWallet: boolean, opts): Promise<any> { return this.keyProvider.handleEncryptedWallet(opts.keyId).then(password => { opts.password = password; return this._joinWallet(opts).then(data => { // Encrypt wallet this.onGoingProcessProvider.pause(); return this.askToEncryptKey(data.key, addingNewWallet).then(() => { this.onGoingProcessProvider.resume(); return this.keyProvider.addKey(data.key).then(() => { return this.addAndBindWalletClient(data.walletClient, { bwsurl: opts.bwsurl }).then(walletClient => { return this.storeProfileIfDirty().then(() => { this.events.publish('Local/WalletListChange'); return Promise.resolve(walletClient); }); }); }); }); }); }); } public setDisclaimerAccepted(): Promise<any> { this.profile.acceptDisclaimer(); return this.storeProfileIfDirty(); } public setLastKnownBalance() { // Add cached balance async _.each(_.values(this.wallet), x => { this.persistenceProvider.getLastKnownBalance(x.id).then(datum => { // this.logger.debug("Last known balance for ",x.id,datum); datum = datum || {}; let limit = Math.floor(Date.now() / 1000) - 2 * 60; let { balance = null, updatedOn = null } = datum; x.lastKnownBalance = balance; x.lastKnownBalanceUpdatedOn = updatedOn < limit ? updatedOn : null; }); }); } public getWallets(opts?) { const wallets = []; opts = opts || {}; // workaround to get wallets in the correct order Object.keys(this.walletsGroups).forEach(keyId => { opts.keyId = keyId; wallets.push(this.getWalletsFromGroup(opts)); }); return _.flatten(wallets); } public getWalletsFromGroup(opts) { if (opts && !_.isObject(opts)) throw new Error('bad argument'); opts = opts || {}; let ret = _.values(this.wallet); if (opts.keyId === 'read-only') { ret = _.filter(ret, x => { return !x.credentials.keyId; }); } else if (opts.keyId) { ret = _.filter(ret, x => { return x.credentials.keyId == opts.keyId; }); } if (opts.coin) { ret = _.filter(ret, x => { return x.credentials.coin == opts.coin; }); } if (opts.network) { ret = _.filter(ret, x => { return x.credentials.network == opts.network; }); } if (opts.n) { ret = _.filter(ret, w => { return w.credentials.n == opts.n; }); } if (opts.m) { ret = _.filter(ret, w => { return w.credentials.m == opts.m; }); } if (opts.onlyComplete) { ret = _.filter(ret, w => { return w.isComplete(); }); } if (opts.minAmount) { ret = _.filter(ret, w => { // IF no cached Status => return true! if (_.isEmpty(w.cachedStatus)) return true; return w.cachedStatus.availableBalanceSat > opts.minAmount; }); } if (opts.hasFunds) { ret = _.filter(ret, w => { // IF no cached Status => return true! if (_.isEmpty(w.cachedStatus)) return true; return w.cachedStatus.availableBalanceSat > 0; }); } if (!opts.showHidden) { // remove hidden wallets ret = _.filter(ret, w => { return !w.hidden; }); } if (opts.canAddNewAccount) { ret = _.filter(ret, w => { return w.canAddNewAccount; }); } return _.sortBy(ret, 'order'); } public toggleHideBalanceFlag(walletId: string): void { this.wallet[walletId].balanceHidden = !this.wallet[walletId].balanceHidden; this.persistenceProvider.setHideBalanceFlag( walletId, this.wallet[walletId].balanceHidden ); } public toggleHideWalletFlag(walletId: string): void { this.wallet[walletId].hidden = !this.wallet[walletId].hidden; this.persistenceProvider.setHideWalletFlag( walletId, this.wallet[walletId].hidden ); } public getTxps(opts): Promise<any> { return new Promise((resolve, reject) => { const MAX = 100; opts = opts ? opts : {}; const w = this.getWallets(opts); if (_.isEmpty(w)) { return reject('No wallets available'); } let txps = []; _.each(w, x => { if (x.pendingTxps) txps = txps.concat(x.pendingTxps); }); const n = txps.length; txps = _.sortBy(txps, 'createdOn').reverse(); txps = _.compact(_.flatten(txps)).slice(0, opts.limit || MAX); return resolve({ txps, n }); }); } public isKeyInUse(keyId: string): boolean { const keyIdIndex = this.profile.credentials.findIndex(c => { if (keyId === 'read-only') { return !c.keyId; } else { return c.keyId == keyId; } }); return keyIdIndex >= 0; } }
this.logger.info( 'Address Book: JSON.parse not neccesary.', localAddressBook );
validate.rs
extern crate varlink_parser; use std::env; use std::error::Error; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::Path; use std::process::exit; use std::result::Result; use varlink_parser::{FormatColored, IDL}; fn main() -> Result<(), Box<dyn Error>> { let mut buffer = String::new(); let args: Vec<_> = env::args().collect(); match args.len() { 0 | 1 => io::stdin().read_to_string(&mut buffer)?, _ => File::open(Path::new(&args[1]))?.read_to_string(&mut buffer)?,
}; let v = IDL::from_string(&buffer)?; println!("{}", v.get_multiline_colored(0, 80)); exit(0); }
schema.rs
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Logic for parsing and interacting with schemas in Avro format. use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; use digest::Digest; use failure::{Error, Fail}; use log::{debug, warn}; use serde::{ ser::{SerializeMap, SerializeSeq}, Serialize, Serializer, }; use serde_json::{self, Map, Value}; use crate::reader::SchemaResolver; use crate::types; use crate::util::MapHelper; use failure::_core::fmt::Formatter; use itertools::Itertools; use std::cell::RefCell; use std::collections::hash_map::Entry; use std::convert::TryFrom; use std::fmt::Display; use std::rc::Rc; use types::{DecimalValue, Value as AvroValue}; pub fn resolve_schemas(writer_schema: &Schema, reader_schema: &Schema) -> Result<Schema, Error> { let r_indices = reader_schema.indices.clone(); let (reader_to_writer_names, writer_to_reader_names): (HashMap<_, _>, HashMap<_, _>) = writer_schema .indices .iter() .flat_map(|(name, widx)| { r_indices .get(name) .map(|ridx| ((*ridx, *widx), (*widx, *ridx))) }) .unzip(); let reader_fullnames = reader_schema .indices .iter() .map(|(f, i)| (*i, f)) .collect::<HashMap<_, _>>(); let mut resolver = SchemaResolver { named: Default::default(), indices: Default::default(), writer_to_reader_names, reader_to_writer_names, reader_to_resolved_names: Default::default(), reader_fullnames, reader_schema, }; let writer_node = writer_schema.top_node_or_named(); let reader_node = reader_schema.top_node_or_named(); let inner = resolver.resolve(writer_node, reader_node)?; let sch = Schema { named: resolver.named.into_iter().map(Option::unwrap).collect(), indices: resolver.indices, top: inner, }; Ok(sch) } /// Describes errors happened while parsing Avro schemas. #[derive(Fail, Debug)] #[fail(display = "Failed to parse schema: {}", _0)] pub struct ParseSchemaError(String); impl ParseSchemaError { pub fn new<S>(msg: S) -> ParseSchemaError where S: Into<String>, { ParseSchemaError(msg.into()) } } /// Represents an Avro schema fingerprint /// More information about Avro schema fingerprints can be found in the /// [Avro Schema Fingerprint documentation](https://avro.apache.org/docs/current/spec.html#schema_fingerprints) pub struct SchemaFingerprint { pub bytes: Vec<u8>, } impl fmt::Display for SchemaFingerprint { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", self.bytes .iter() .map(|byte| format!("{:02x}", byte)) .collect::<Vec<String>>() .join("") ) } } #[derive(Clone, Debug, PartialEq)] pub enum SchemaPieceOrNamed { Piece(SchemaPiece), Named(usize), } impl SchemaPieceOrNamed { #[inline(always)] pub fn get_piece_and_name<'a>( &'a self, root: &'a Schema, ) -> (&'a SchemaPiece, Option<&'a FullName>) { self.as_ref().get_piece_and_name(root) } #[inline(always)] pub fn as_ref(&self) -> SchemaPieceRefOrNamed { match self { SchemaPieceOrNamed::Piece(piece) => SchemaPieceRefOrNamed::Piece(piece), SchemaPieceOrNamed::Named(index) => SchemaPieceRefOrNamed::Named(*index), } } } impl From<SchemaPiece> for SchemaPieceOrNamed { #[inline(always)] fn from(piece: SchemaPiece) -> Self { Self::Piece(piece) } } #[derive(Clone, Debug, PartialEq)] pub enum SchemaPiece { /// A `null` Avro schema. Null, /// A `boolean` Avro schema. Boolean, /// An `int` Avro schema. Int, /// A `long` Avro schema. Long, /// A `float` Avro schema. Float, /// A `double` Avro schema. Double, /// An `Int` Avro schema with a semantic type being days since the unix epoch. Date, /// An `Int64` Avro schema with a semantic type being milliseconds since the unix epoch. /// /// https://avro.apache.org/docs/current/spec.html#Timestamp+%28millisecond+precision%29 TimestampMilli, /// An `Int64` Avro schema with a semantic type being microseconds since the unix epoch. /// /// https://avro.apache.org/docs/current/spec.html#Timestamp+%28microsecond+precision%29 TimestampMicro, /// A `bytes` Avro schema with a logical type of `decimal` and /// the specified precision and scale. /// /// If the underlying type is `fixed`, /// the `fixed_size` field specifies the size. Decimal { precision: usize, scale: usize, fixed_size: Option<usize>, }, /// A `bytes` Avro schema. /// `Bytes` represents a sequence of 8-bit unsigned bytes. Bytes, /// A `string` Avro schema. /// `String` represents a unicode character sequence. String, /// A `string` Avro schema that is tagged as representing JSON data Json, /// A `array` Avro schema. Avro arrays are required to have the same type for each element. /// This variant holds the `Schema` for the array element type. Array(Box<SchemaPieceOrNamed>), /// A `map` Avro schema. /// `Map` holds a pointer to the `Schema` of its values, which must all be the same schema. /// `Map` keys are assumed to be `string`. Map(Box<SchemaPieceOrNamed>), /// A `union` Avro schema. Union(UnionSchema), /// A value written as `int` and read as `long` ResolveIntLong, /// A value written as `int` and read as `float` ResolveIntFloat, /// A value written as `int` and read as `double` ResolveIntDouble, /// A value written as `long` and read as `float` ResolveLongFloat, /// A value written as `long` and read as `double` ResolveLongDouble, /// A value written as `float` and read as `double` ResolveFloatDouble, /// A concrete (i.e., non-`union`) type in the writer, /// resolved against one specific variant of a `union` in the writer. ResolveConcreteUnion { /// The index of the variant in the reader index: usize, /// The concrete type inner: Box<SchemaPieceOrNamed>, }, /// A union in the writer, resolved against a union in the reader. /// The two schemas may have different variants and the variants may be in a different order. ResolveUnionUnion { /// A mapping of the fields in the writer to those in the reader. /// If the `i`th element is `None`, the `i`th field in the writer /// did not match any field in the reader (or even if it matched by name, resolution failed). /// If the `i`th element is `(j, piece)`, then the `i`th field of the writer /// matched the `j`th field of the reader, and `piece` is their resolved node. permutation: Vec<Option<(usize, SchemaPieceOrNamed)>>, }, /// The inverse of `ResolveConcreteUnion` ResolveUnionConcrete { index: usize, inner: Box<SchemaPieceOrNamed>, }, /// A `record` Avro schema. /// /// The `lookup` table maps field names to their position in the `Vec` /// of `fields`. Record { doc: Documentation, fields: Vec<RecordField>, lookup: HashMap<String, usize>, }, /// An `enum` Avro schema. Enum { doc: Documentation, symbols: Vec<String>, }, /// A `fixed` Avro schema. Fixed { size: usize }, /// A record in the writer, resolved against a record in the reader. /// The two schemas may have different fields and the fields may be in a different order. ResolveRecord { /// Fields that do not exist in the writer schema, but had a default /// value specified in the reader schema, which we use. defaults: Vec<ResolvedDefaultValueField>, /// Fields in the order of their appearance in the writer schema. /// `Present` if they could be resolved against a field in the reader schema; /// `Absent` otherwise. fields: Vec<ResolvedRecordField>, /// The size of `defaults`, plus the number of `Present` values in `fields`. n_reader_fields: usize, }, /// An enum in the writer, resolved against an enum in the reader. /// The two schemas may have different values and the values may be in a different order. ResolveEnum { doc: Documentation, /// Symbols in the reader schema if they exist in the writer schema, /// or `None` otherwise. symbols: Vec<Option<String>>, // TODO(brennan) - These should support default values }, } /// Represents any valid Avro schema /// More information about Avro schemas can be found in the /// [Avro Specification](https://avro.apache.org/docs/current/spec.html#schemas) #[derive(Clone, Debug, PartialEq)] pub struct Schema { pub(crate) named: Vec<NamedSchemaPiece>, pub(crate) indices: HashMap<FullName, usize>, pub top: SchemaPieceOrNamed, } impl Schema { pub fn top_node(&self) -> SchemaNode { let (inner, name) = self.top.get_piece_and_name(self); SchemaNode { root: self, inner, name, } } pub fn top_node_or_named(&self) -> SchemaNodeOrNamed { SchemaNodeOrNamed { root: self, inner: self.top.as_ref(), } } pub fn lookup(&self, idx: usize) -> &NamedSchemaPiece { &self.named[idx] } } /// This type is used to simplify enum variant comparison between `Schema` and `types::Value`. /// It may have utility as part of the public API, but defining as `pub(crate)` for now. /// /// **NOTE** This type was introduced due to a limitation of `mem::discriminant` requiring a _value_ /// be constructed in order to get the discriminant, which makes it difficult to implement a /// function that maps from `Discriminant<Schema> -> Discriminant<Value>`. Conversion into this /// intermediate type should be especially fast, as the number of enum variants is small, which /// _should_ compile into a jump-table for the conversion. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) enum SchemaKind { // Fixed-length types Null, Boolean, Int, Long, Float, Date, DateTime, Double, // Variable-length types Bytes, Decimal, String, Array, Map, Union, Record, Enum, Fixed, // This can arise in resolved schemas, particularly when a union resolves to a non-union. // We would need to do a lookup to find the actual type. Unknown, } impl<'a> From<&'a SchemaPiece> for SchemaKind { #[inline(always)] fn from(piece: &'a SchemaPiece) -> SchemaKind { match piece { SchemaPiece::Null => SchemaKind::Null, SchemaPiece::Boolean => SchemaKind::Boolean, SchemaPiece::Int => SchemaKind::Int, SchemaPiece::Long => SchemaKind::Long, SchemaPiece::Float => SchemaKind::Float, SchemaPiece::Double => SchemaKind::Double, SchemaPiece::Date => SchemaKind::Date, SchemaPiece::TimestampMilli | SchemaPiece::TimestampMicro => SchemaKind::DateTime, SchemaPiece::Decimal { .. } => SchemaKind::Decimal, SchemaPiece::Bytes => SchemaKind::Bytes, SchemaPiece::String => SchemaKind::String, SchemaPiece::Array(_) => SchemaKind::Array, SchemaPiece::Map(_) => SchemaKind::Map, SchemaPiece::Union(_) => SchemaKind::Union, SchemaPiece::ResolveUnionUnion { .. } => SchemaKind::Union, SchemaPiece::ResolveIntLong => SchemaKind::Long, SchemaPiece::ResolveIntFloat => SchemaKind::Float, SchemaPiece::ResolveIntDouble => SchemaKind::Double, SchemaPiece::ResolveLongFloat => SchemaKind::Float, SchemaPiece::ResolveLongDouble => SchemaKind::Double, SchemaPiece::ResolveFloatDouble => SchemaKind::Double, SchemaPiece::ResolveConcreteUnion { .. } => SchemaKind::Union, SchemaPiece::ResolveUnionConcrete { inner: _, .. } => SchemaKind::Unknown, SchemaPiece::Record { .. } => SchemaKind::Record, SchemaPiece::Enum { .. } => SchemaKind::Enum, SchemaPiece::Fixed { .. } => SchemaKind::Fixed, SchemaPiece::ResolveRecord { .. } => SchemaKind::Record, SchemaPiece::ResolveEnum { .. } => SchemaKind::Enum, SchemaPiece::Json => SchemaKind::String, } } } impl<'a> From<SchemaNode<'a>> for SchemaKind { #[inline(always)] fn from(schema: SchemaNode<'a>) -> SchemaKind { SchemaKind::from(schema.inner) } } impl<'a> From<&'a Schema> for SchemaKind { #[inline(always)] fn from(schema: &'a Schema) -> SchemaKind { Self::from(schema.top_node()) } } impl<'a> From<&'a types::Value> for SchemaKind { #[inline(always)] fn from(value: &'a types::Value) -> SchemaKind { match value { types::Value::Null => SchemaKind::Null, types::Value::Boolean(_) => SchemaKind::Boolean, types::Value::Int(_) => SchemaKind::Int, types::Value::Long(_) => SchemaKind::Long, types::Value::Float(_) => SchemaKind::Float, types::Value::Double(_) => SchemaKind::Double, types::Value::Date(_) => SchemaKind::Date, types::Value::Timestamp(_) => SchemaKind::DateTime, // Variable-length types types::Value::Decimal { .. } => SchemaKind::Decimal, types::Value::Bytes(_) => SchemaKind::Bytes, types::Value::String(_) => SchemaKind::String, types::Value::Array(_) => SchemaKind::Array, types::Value::Map(_) => SchemaKind::Map, types::Value::Union(_, _) => SchemaKind::Union, types::Value::Record(_) => SchemaKind::Record, types::Value::Enum(_, _) => SchemaKind::Enum, types::Value::Fixed(_, _) => SchemaKind::Fixed, types::Value::Json(_) => SchemaKind::String, } } } /// Represents names for `record`, `enum` and `fixed` Avro schemas. /// /// Each of these `Schema`s have a `fullname` composed of two parts: /// * a name /// * a namespace /// /// `aliases` can also be defined, to facilitate schema evolution. /// /// More information about schema names can be found in the /// [Avro specification](https://avro.apache.org/docs/current/spec.html#names) #[derive(Clone, Debug, PartialEq)] pub struct Name { pub name: String, pub namespace: Option<String>, pub aliases: Option<Vec<String>>, } #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct FullName { name: String, namespace: String, } impl FullName { pub fn from_parts(name: &str, namespace: Option<&str>, default_namespace: &str) -> FullName { if let Some(ns) = namespace { FullName { name: name.to_owned(), namespace: ns.to_owned(), } } else { let mut split = name.rsplitn(2, '.'); let name = split.next().unwrap(); let namespace = split.next().unwrap_or(default_namespace); FullName { name: name.into(), namespace: namespace.into(), } } } } impl Display for FullName { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}.{}", self.namespace, self.name) } } /// Represents documentation for complex Avro schemas. pub type Documentation = Option<String>; impl Name { /// Create a new `Name`. /// No `namespace` nor `aliases` will be defined. pub fn new(name: &str) -> Name { Name { name: name.to_owned(), namespace: None, aliases: None, } } /// Parse a `serde_json::Value` into a `Name`. fn parse(complex: &Map<String, Value>) -> Result<Self, Error> { let name = complex .name() .ok_or_else(|| ParseSchemaError::new("No `name` field"))?; let namespace = complex.string("namespace"); let aliases: Option<Vec<String>> = complex .get("aliases") .and_then(|aliases| aliases.as_array()) .and_then(|aliases| { aliases .iter() .map(|alias| alias.as_str()) .map(|alias| alias.map(|a| a.to_string())) .collect::<Option<_>>() }); if let Some(ns) = &namespace { if name.find('.').is_some() { return Err(ParseSchemaError::new(format!( "Name {} has dot, but namespace also specified: {}", name, ns )) .into()); } } Ok(Name { name, namespace, aliases, }) } /// Return the `fullname` of this `Name` /// /// More information about fullnames can be found in the /// [Avro specification](https://avro.apache.org/docs/current/spec.html#names) pub fn fullname(&self, default_namespace: &str) -> FullName { FullName::from_parts(&self.name, self.namespace.as_deref(), default_namespace) } } #[derive(Clone, Debug, PartialEq)] pub struct ResolvedDefaultValueField { pub name: String, pub doc: Documentation, pub default: types::Value, pub order: RecordFieldOrder, pub position: usize, } #[derive(Clone, Debug, PartialEq)] pub enum ResolvedRecordField { Absent(Schema), Present(RecordField), } /// Represents a `field` in a `record` Avro schema. #[derive(Clone, Debug, PartialEq)] pub struct RecordField { /// Name of the field. pub name: String, /// Documentation of the field. pub doc: Documentation, /// Default value of the field. /// This value will be used when reading Avro datum if schema resolution /// is enabled. pub default: Option<Value>, /// Schema of the field. pub schema: SchemaPieceOrNamed, /// Order of the field. /// /// **NOTE** This currently has no effect. pub order: RecordFieldOrder, /// Position of the field in the list of `field` of its parent `Schema` pub position: usize, } /// Represents any valid order for a `field` in a `record` Avro schema. #[derive(Copy, Clone, Debug, PartialEq)] pub enum RecordFieldOrder { Ascending, Descending, Ignore, } impl RecordField {} #[derive(Debug, Clone)] pub struct UnionSchema { schemas: Vec<SchemaPieceOrNamed>, // Used to ensure uniqueness of anonymous schema inputs, and provide constant time finding of the // schema index given a value. anon_variant_index: HashMap<SchemaKind, usize>, // Same as above, for named input references named_variant_index: HashMap<usize, usize>, } impl UnionSchema { pub(crate) fn new(schemas: Vec<SchemaPieceOrNamed>) -> Result<Self, Error> { let mut avindex = HashMap::new(); let mut nvindex = HashMap::new(); for (i, schema) in schemas.iter().enumerate() { match schema { SchemaPieceOrNamed::Piece(sp) => { if let SchemaPiece::Union(_) = sp { return Err(ParseSchemaError::new( "Unions may not directly contain a union", ) .into()); } let kind = SchemaKind::from(sp); if avindex.insert(kind, i).is_some() { return Err( ParseSchemaError::new("Unions cannot contain duplicate types").into(), ); } } SchemaPieceOrNamed::Named(idx) => { if nvindex.insert(*idx, i).is_some() { return Err( ParseSchemaError::new("Unions cannot contain duplicate types").into(), ); } } } } Ok(UnionSchema { schemas, anon_variant_index: avindex, named_variant_index: nvindex, }) } /// Returns a slice to all variants of this schema. pub fn variants(&self) -> &[SchemaPieceOrNamed] { &self.schemas } /// Returns true if the first variant of this `UnionSchema` is `Null`. pub fn is_nullable(&self) -> bool { !self.schemas.is_empty() && self.schemas[0] == SchemaPieceOrNamed::Piece(SchemaPiece::Null) } pub fn resolve_piece(&self, sp: &SchemaPiece) -> Option<(usize, &SchemaPieceOrNamed)> { self.anon_variant_index .get(&SchemaKind::from(sp)) .map(|idx| (*idx, &self.schemas[*idx])) } pub fn resolve_ref( &self, other: SchemaPieceRefOrNamed, names_map: &HashMap<usize, usize>, ) -> Option<(usize, &SchemaPieceOrNamed)> { match other { SchemaPieceRefOrNamed::Piece(sp) => self.resolve_piece(sp), SchemaPieceRefOrNamed::Named(idx) => names_map .get(&idx) .and_then(|idx| self.named_variant_index.get(idx)) .map(|idx| (*idx, &self.schemas[*idx])), } } #[inline(always)] pub fn resolve( &self, other: &SchemaPieceOrNamed, names_map: &HashMap<usize, usize>, ) -> Option<(usize, &SchemaPieceOrNamed)> { self.resolve_ref(other.as_ref(), names_map) } } // No need to compare variant_index, it is derivative of schemas. impl PartialEq for UnionSchema { fn eq(&self, other: &UnionSchema) -> bool { self.schemas.eq(&other.schemas) } } #[derive(Default)] struct SchemaParser { named: Vec<Option<NamedSchemaPiece>>, indices: HashMap<FullName, usize>, } impl SchemaParser { fn parse(mut self, value: &Value) -> Result<Schema, Error> { let top = self.parse_inner("", value)?; let SchemaParser { named, indices } = self; Ok(Schema { named: named.into_iter().map(|o| o.unwrap()).collect(), indices, top, }) } fn parse_inner( &mut self, default_namespace: &str, value: &Value, ) -> Result<SchemaPieceOrNamed, Error> { match *value { Value::String(ref t) => { let name = FullName::from_parts(t.as_str(), None, default_namespace); if let Some(idx) = self.indices.get(&name) { Ok(SchemaPieceOrNamed::Named(*idx)) } else { Ok(SchemaPieceOrNamed::Piece(Schema::parse_primitive( t.as_str(), )?)) } } Value::Object(ref data) => self.parse_complex(default_namespace, data), Value::Array(ref data) => Ok(SchemaPieceOrNamed::Piece( self.parse_union(default_namespace, data)?, )), _ => Err(ParseSchemaError::new("Must be a JSON string, object or array").into()), } } fn alloc_name(&mut self, fullname: FullName) -> Result<usize, Error> { let idx = match self.indices.entry(fullname) { Entry::Vacant(ve) => *ve.insert(self.named.len()), Entry::Occupied(oe) => { return Err(ParseSchemaError::new(format!( "Sub-schema with name {} encountered multiple times", oe.key() )) .into()) } }; self.named.push(None); Ok(idx) } fn insert(&mut self, index: usize, schema: NamedSchemaPiece) { assert!(self.named[index].is_none()); self.named[index] = Some(schema); } fn parse_named_type( &mut self, type_name: &str, default_namespace: &str, complex: &Map<String, Value>, ) -> Result<usize, Error> { let name = Name::parse(complex)?; match name.name.as_str() { "null" | "boolean" | "int" | "long" | "float" | "double" | "bytes" | "string" => { return Err(ParseSchemaError::new(format!( "{} may not be used as a custom type name", name.name )) .into()) } _ => {} }; let fullname = name.fullname(default_namespace); let default_namespace = fullname.namespace.clone(); let idx = self.alloc_name(fullname.clone())?; let piece = match type_name { "record" => self.parse_record(&default_namespace, complex), "enum" => self.parse_enum(complex), "fixed" => self.parse_fixed(&default_namespace, complex), _ => unreachable!("Unknown named type kind: {}", type_name), }?; self.insert( idx, NamedSchemaPiece { name: fullname, piece, }, ); Ok(idx) } /// Parse a `serde_json::Value` representing a complex Avro type into a /// `Schema`. /// /// Avro supports "recursive" definition of types. /// e.g: {"type": {"type": "string"}} fn parse_complex( &mut self, default_namespace: &str, complex: &Map<String, Value>, ) -> Result<SchemaPieceOrNamed, Error> { match complex.get("type") { Some(&Value::String(ref t)) => Ok(match t.as_str() { "record" | "enum" | "fixed" => SchemaPieceOrNamed::Named(self.parse_named_type( t, default_namespace, complex, )?), "array" => SchemaPieceOrNamed::Piece(self.parse_array(default_namespace, complex)?), "map" => SchemaPieceOrNamed::Piece(self.parse_map(default_namespace, complex)?), "bytes" => SchemaPieceOrNamed::Piece(Self::parse_bytes(complex)?), "int" => SchemaPieceOrNamed::Piece(Self::parse_int(complex)?), "long" => SchemaPieceOrNamed::Piece(Self::parse_long(complex)?), "string" => SchemaPieceOrNamed::Piece(Self::parse_string(complex)), other => { let name = FullName { name: other.into(), namespace: default_namespace.into(), }; if let Some(idx) = self.indices.get(&name) { SchemaPieceOrNamed::Named(*idx) } else { SchemaPieceOrNamed::Piece(Schema::parse_primitive(t.as_str())?) } } }), Some(&Value::Object(ref data)) => match data.get("type") { Some(ref value) => self.parse_inner(default_namespace, value), None => Err( ParseSchemaError::new(format!("Unknown complex type: {:?}", complex)).into(), ), }, _ => Err(ParseSchemaError::new("No `type` in complex type").into()), } } /// Parse a `serde_json::Value` representing a Avro record type into a /// `Schema`. fn parse_record( &mut self, default_namespace: &str, complex: &Map<String, Value>, ) -> Result<SchemaPiece, Error> { let mut lookup = HashMap::new(); let fields: Vec<RecordField> = complex .get("fields") .and_then(|fields| fields.as_array()) .ok_or_else(|| ParseSchemaError::new("No `fields` in record").into()) .and_then(|fields| { fields .iter() .filter_map(|field| field.as_object()) .enumerate() .map(|(position, field)| { self.parse_record_field(default_namespace, field, position) }) .collect::<Result<_, _>>() })?; for field in &fields { lookup.insert(field.name.clone(), field.position); } Ok(SchemaPiece::Record { doc: complex.doc(), fields, lookup, }) } /// Parse a `serde_json::Value` into a `RecordField`. fn parse_record_field( &mut self, default_namespace: &str, field: &Map<String, Value>, position: usize, ) -> Result<RecordField, Error> { let name = field .name() .ok_or_else(|| ParseSchemaError::new("No `name` in record field"))?; let schema = field .get("type") .ok_or_else(|| ParseSchemaError::new("No `type` in record field").into()) .and_then(|type_| self.parse_inner(default_namespace, type_))?; let default = field.get("default").cloned(); let order = field .get("order") .and_then(|order| order.as_str()) .and_then(|order| match order { "ascending" => Some(RecordFieldOrder::Ascending), "descending" => Some(RecordFieldOrder::Descending), "ignore" => Some(RecordFieldOrder::Ignore), _ => None, }) .unwrap_or_else(|| RecordFieldOrder::Ascending); Ok(RecordField { name, doc: field.doc(), default, schema, order, position, }) } /// Parse a `serde_json::Value` representing a Avro enum type into a /// `Schema`. fn parse_enum(&mut self, complex: &Map<String, Value>) -> Result<SchemaPiece, Error> { let symbols: Vec<String> = complex .get("symbols") .and_then(|v| v.as_array()) .ok_or_else(|| ParseSchemaError::new("No `symbols` field in enum")) .and_then(|symbols| { symbols .iter() .map(|symbol| symbol.as_str().map(|s| s.to_string())) .collect::<Option<_>>() .ok_or_else(|| ParseSchemaError::new("Unable to parse `symbols` in enum")) })?; let mut unique_symbols: HashSet<&String> = HashSet::new(); for symbol in symbols.iter() { if unique_symbols.contains(symbol) { return Err(ParseSchemaError::new(format!( "Enum symbols must be unique, found multiple: {}", symbol )) .into()); } else { unique_symbols.insert(symbol); } } Ok(SchemaPiece::Enum { doc: complex.doc(), symbols, }) } /// Parse a `serde_json::Value` representing a Avro array type into a /// `Schema`. fn parse_array( &mut self, default_namespace: &str, complex: &Map<String, Value>, ) -> Result<SchemaPiece, Error> { complex .get("items") .ok_or_else(|| ParseSchemaError::new("No `items` in array").into()) .and_then(|items| self.parse_inner(default_namespace, items)) .map(|schema| SchemaPiece::Array(Box::new(schema))) } /// Parse a `serde_json::Value` representing a Avro map type into a /// `Schema`. fn parse_map( &mut self, default_namespace: &str, complex: &Map<String, Value>, ) -> Result<SchemaPiece, Error> { complex .get("values") .ok_or_else(|| ParseSchemaError::new("No `values` in map").into()) .and_then(|items| self.parse_inner(default_namespace, items)) .map(|schema| SchemaPiece::Map(Box::new(schema))) } /// Parse a `serde_json::Value` representing a Avro union type into a /// `Schema`. fn parse_union( &mut self, default_namespace: &str, items: &[Value], ) -> Result<SchemaPiece, Error> { items .iter() .map(|value| self.parse_inner(default_namespace, value)) .collect::<Result<Vec<_>, _>>() .and_then(|schemas| Ok(SchemaPiece::Union(UnionSchema::new(schemas)?))) } /// Parse a `serde_json::Value` representing a logical decimal type into a /// `Schema`. fn parse_decimal(complex: &Map<String, Value>) -> Result<(usize, usize), Error> { let precision = complex .get("precision") .and_then(|v| v.as_i64()) .ok_or_else(|| ParseSchemaError::new("No `precision` in decimal"))?; let scale = complex.get("scale").and_then(|v| v.as_i64()).unwrap_or(0); if scale < 0 { return Err(ParseSchemaError::new("Decimal scale must be greater than zero").into()); } if precision < 0 { return Err( ParseSchemaError::new("Decimal precision must be greater than zero").into(), ); } if scale > precision { return Err(ParseSchemaError::new("Decimal scale is greater than precision").into()); } Ok((precision as usize, scale as usize)) } /// Parse a `serde_json::Value` representing an Avro bytes type into a /// `Schema`. fn parse_bytes(complex: &Map<String, Value>) -> Result<SchemaPiece, Error> { let logical_type = complex.get("logicalType").and_then(|v| v.as_str()); Ok(match logical_type { Some("decimal") => { let (precision, scale) = Self::parse_decimal(complex)?; SchemaPiece::Decimal { precision, scale, fixed_size: None, } } _ => { debug!("parsing complex type as regular bytes: {:?}", complex); SchemaPiece::Bytes } }) } /// Parse a [`serde_json::Value`] representing an Avro Int type /// /// If the complex type has a `connect.name` tag (as [emitted by /// Debezium][1]) that matches a `Date` tag, we specify that the correct /// schema to use is `Date`. /// /// [1]: https://debezium.io/docs/connectors/mysql/#temporal-values fn parse_int(complex: &Map<String, Value>) -> Result<SchemaPiece, Error> { const AVRO_DATE: &str = "date"; const DEBEZIUM_DATE: &str = "io.debezium.time.Date"; const KAFKA_DATE: &str = "org.apache.kafka.connect.data.Date"; if let Some(name) = complex.get("connect.name") { if name == DEBEZIUM_DATE || name == KAFKA_DATE { if name == KAFKA_DATE { warn!("using deprecated debezium date format"); } return Ok(SchemaPiece::Date); } } // Put this after the custom semantic types so that the debezium // warning is emitted, since the logicalType tag shows up in the // deprecated debezium format :-/ if let Some(name) = complex.get("logicalType") { if name == AVRO_DATE { return Ok(SchemaPiece::Date); } } if !complex.is_empty() { debug!("parsing complex type as regular int: {:?}", complex); } Ok(SchemaPiece::Int) } /// Parse a [`serde_json::Value`] representing an Avro Int64/Long type /// /// The debezium/kafka types are document at [the debezium site][1], and the /// avro ones are documented at [Avro][2]. /// /// [1]: https://debezium.io/docs/connectors/mysql/#temporal-values /// [2]: https://avro.apache.org/docs/1.9.0/spec.html fn parse_long(complex: &Map<String, Value>) -> Result<SchemaPiece, Error> { const AVRO_MILLI_TS: &str = "timestamp-millis"; const AVRO_MICRO_TS: &str = "timestamp-micros"; const CONNECT_MILLI_TS: &[&str] = &[ "io.debezium.time.Timestamp", "org.apache.kafka.connect.data.Timestamp", ]; const CONNECT_MICRO_TS: &str = "io.debezium.time.MicroTimestamp"; if let Some(serde_json::Value::String(name)) = complex.get("connect.name") { if CONNECT_MILLI_TS.contains(&&**name) { return Ok(SchemaPiece::TimestampMilli); } if name == CONNECT_MICRO_TS { return Ok(SchemaPiece::TimestampMicro); } } if let Some(name) = complex.get("logicalType") { if name == AVRO_MILLI_TS { return Ok(SchemaPiece::TimestampMilli); } if name == AVRO_MICRO_TS { return Ok(SchemaPiece::TimestampMicro); } } if !complex.is_empty() { debug!("parsing complex type as regular long: {:?}", complex); } Ok(SchemaPiece::Long) } fn parse_string(complex: &Map<String, Value>) -> SchemaPiece { const CONNECT_JSON: &str = "io.debezium.data.Json"; if let Some(serde_json::Value::String(name)) = complex.get("connect.name") { if CONNECT_JSON == name.as_str() { return SchemaPiece::Json; } } debug!("parsing complex type as regular string: {:?}", complex); SchemaPiece::String } /// Parse a `serde_json::Value` representing a Avro fixed type into a /// `Schema`. fn parse_fixed( &mut self, _default_namespace: &str, complex: &Map<String, Value>, ) -> Result<SchemaPiece, Error> { let _name = Name::parse(complex)?; let size = complex .get("size") .and_then(|v| v.as_i64()) .ok_or_else(|| ParseSchemaError::new("No `size` in fixed"))?; let logical_type = complex.get("logicalType").and_then(|v| v.as_str()); Ok(match logical_type { Some("decimal") => { let (precision, scale) = Self::parse_decimal(complex)?; let max = ((2_usize.pow((8 * size - 1) as u32) - 1) as f64).log10() as usize; if precision > max { return Err(ParseSchemaError::new(format!( "Decimal precision {} requires more than {} bytes of space", precision, size, )) .into()); } SchemaPiece::Decimal { precision, scale, fixed_size: Some(size as usize), } } _ => SchemaPiece::Fixed { size: size as usize, }, }) } } impl Schema { /// Create a `Schema` from a string representing a JSON Avro schema. pub fn parse_str(input: &str) -> Result<Self, Error> { let value = serde_json::from_str(input)?; Self::parse(&value) } /// Create a `Schema` from a `serde_json::Value` representing a JSON Avro /// schema. pub fn parse(value: &Value) -> Result<Self, Error> { let p = SchemaParser { named: vec![], indices: Default::default(), }; p.parse(value) } /// Converts `self` into its [Parsing Canonical Form]. /// /// [Parsing Canonical Form]: /// https://avro.apache.org/docs/1.8.2/spec.html#Parsing+Canonical+Form+for+Schemas pub fn canonical_form(&self) -> String { let json = serde_json::to_value(self).unwrap(); parsing_canonical_form(&json) } /// Generate [fingerprint] of Schema's [Parsing Canonical Form]. /// /// [Parsing Canonical Form]: /// https://avro.apache.org/docs/1.8.2/spec.html#Parsing+Canonical+Form+for+Schemas /// [fingerprint]: /// https://avro.apache.org/docs/current/spec.html#schema_fingerprints pub fn fingerprint<D: Digest>(&self) -> SchemaFingerprint { let mut d = D::new(); d.input(self.canonical_form()); SchemaFingerprint { bytes: d.result().to_vec(), } } /// Parse a `serde_json::Value` representing a primitive Avro type into a /// `Schema`. fn parse_primitive(primitive: &str) -> Result<SchemaPiece, Error> { match primitive { "null" => Ok(SchemaPiece::Null), "boolean" => Ok(SchemaPiece::Boolean), "int" => Ok(SchemaPiece::Int), "long" => Ok(SchemaPiece::Long), "double" => Ok(SchemaPiece::Double), "float" => Ok(SchemaPiece::Float), "bytes" => Ok(SchemaPiece::Bytes), "string" => Ok(SchemaPiece::String), other => Err(ParseSchemaError::new(format!("Unknown type: {}", other)).into()), } } } #[derive(Clone, Debug, PartialEq)] pub struct NamedSchemaPiece { pub(crate) name: FullName, pub(crate) piece: SchemaPiece, } #[derive(Copy, Clone, Debug)] pub struct SchemaNode<'a> { pub root: &'a Schema, pub inner: &'a SchemaPiece, pub name: Option<&'a FullName>, } #[derive(Copy, Clone)] pub enum SchemaPieceRefOrNamed<'a> { Piece(&'a SchemaPiece), Named(usize), } impl<'a> SchemaPieceRefOrNamed<'a> { #[inline(always)] pub fn get_piece_and_name(self, root: &'a Schema) -> (&'a SchemaPiece, Option<&'a FullName>) { match self { SchemaPieceRefOrNamed::Piece(sp) => (sp, None), SchemaPieceRefOrNamed::Named(index) => { let named_piece = root.lookup(index); (&named_piece.piece, Some(&named_piece.name)) } } } } #[derive(Copy, Clone)] pub struct SchemaNodeOrNamed<'a> { pub root: &'a Schema, pub inner: SchemaPieceRefOrNamed<'a>, } impl<'a> SchemaNodeOrNamed<'a> { #[inline(always)] pub fn lookup(self) -> SchemaNode<'a> { let (inner, name) = self.inner.get_piece_and_name(self.root); SchemaNode { root: self.root, inner, name, } } #[inline(always)] pub fn step(self, next: &'a SchemaPieceOrNamed) -> Self { self.step_ref(next.as_ref()) } #[inline(always)] pub fn step_ref(self, next: SchemaPieceRefOrNamed<'a>) -> Self { Self { root: self.root, inner: match next { SchemaPieceRefOrNamed::Piece(piece) => SchemaPieceRefOrNamed::Piece(piece), SchemaPieceRefOrNamed::Named(index) => SchemaPieceRefOrNamed::Named(index), }, } } pub fn to_schema(self) -> Schema { let mut cloner = SchemaSubtreeDeepCloner { old_root: self.root, old_to_new_names: Default::default(), named: Default::default(), }; let piece = cloner.clone_piece_or_named(self.inner); let named: Vec<NamedSchemaPiece> = cloner.named.into_iter().map(Option::unwrap).collect(); let indices: HashMap<FullName, usize> = named .iter() .enumerate() .map(|(i, nsp)| (nsp.name.clone(), i)) .collect(); Schema { named, indices, top: piece, } } } struct SchemaSubtreeDeepCloner<'a> { old_root: &'a Schema, old_to_new_names: HashMap<usize, usize>, named: Vec<Option<NamedSchemaPiece>>, } impl<'a> SchemaSubtreeDeepCloner<'a> { fn clone_piece(&mut self, piece: &SchemaPiece) -> SchemaPiece { match piece { SchemaPiece::Null => SchemaPiece::Null, SchemaPiece::Boolean => SchemaPiece::Boolean, SchemaPiece::Int => SchemaPiece::Int, SchemaPiece::Long => SchemaPiece::Long, SchemaPiece::Float => SchemaPiece::Float, SchemaPiece::Double => SchemaPiece::Double, SchemaPiece::Date => SchemaPiece::Date, SchemaPiece::TimestampMilli => SchemaPiece::TimestampMilli, SchemaPiece::TimestampMicro => SchemaPiece::TimestampMicro, SchemaPiece::Json => SchemaPiece::Json, SchemaPiece::Decimal { scale, precision, fixed_size, } => SchemaPiece::Decimal { scale: *scale, precision: *precision, fixed_size: *fixed_size, }, SchemaPiece::Bytes => SchemaPiece::Bytes, SchemaPiece::String => SchemaPiece::String, SchemaPiece::Array(inner) => { SchemaPiece::Array(Box::new(self.clone_piece_or_named(inner.as_ref().as_ref()))) } SchemaPiece::Map(inner) => { SchemaPiece::Map(Box::new(self.clone_piece_or_named(inner.as_ref().as_ref()))) } SchemaPiece::Union(us) => SchemaPiece::Union(UnionSchema { schemas: us .schemas .iter() .map(|s| self.clone_piece_or_named(s.as_ref())) .collect(), anon_variant_index: us.anon_variant_index.clone(), named_variant_index: us.named_variant_index.clone(), }), SchemaPiece::ResolveIntLong => SchemaPiece::ResolveIntLong, SchemaPiece::ResolveIntFloat => SchemaPiece::ResolveIntFloat, SchemaPiece::ResolveIntDouble => SchemaPiece::ResolveIntDouble, SchemaPiece::ResolveLongFloat => SchemaPiece::ResolveLongFloat, SchemaPiece::ResolveLongDouble => SchemaPiece::ResolveLongDouble, SchemaPiece::ResolveFloatDouble => SchemaPiece::ResolveFloatDouble, SchemaPiece::ResolveConcreteUnion { index, inner } => { SchemaPiece::ResolveConcreteUnion { index: *index, inner: Box::new(self.clone_piece_or_named(inner.as_ref().as_ref())), } } SchemaPiece::ResolveUnionUnion { permutation } => SchemaPiece::ResolveUnionUnion { permutation: permutation .iter() .map(|o| { o.as_ref() .map(|(idx, piece)| (*idx, self.clone_piece_or_named(piece.as_ref()))) }) .collect(), }, SchemaPiece::ResolveUnionConcrete { index, inner } => { SchemaPiece::ResolveUnionConcrete { index: *index, inner: Box::new(self.clone_piece_or_named(inner.as_ref().as_ref())), } } SchemaPiece::Record { doc, fields, lookup, } => SchemaPiece::Record { doc: doc.clone(), fields: fields .iter() .map(|rf| RecordField { name: rf.name.clone(), doc: rf.doc.clone(), default: rf.default.clone(), schema: self.clone_piece_or_named(rf.schema.as_ref()), order: rf.order, position: rf.position, }) .collect(), lookup: lookup.clone(), }, SchemaPiece::Enum { doc, symbols } => SchemaPiece::Enum { doc: doc.clone(), symbols: symbols.clone(), }, SchemaPiece::Fixed { size } => SchemaPiece::Fixed { size: *size }, SchemaPiece::ResolveRecord { defaults, fields, n_reader_fields, } => SchemaPiece::ResolveRecord { defaults: defaults.clone(), fields: fields .iter() .map(|rf| match rf { ResolvedRecordField::Present(rf) => { ResolvedRecordField::Present(RecordField { name: rf.name.clone(), doc: rf.doc.clone(), default: rf.default.clone(), schema: self.clone_piece_or_named(rf.schema.as_ref()), order: rf.order, position: rf.position, }) } ResolvedRecordField::Absent(writer_schema) => { ResolvedRecordField::Absent(writer_schema.clone()) } }) .collect(), n_reader_fields: *n_reader_fields, }, SchemaPiece::ResolveEnum { doc, symbols } => SchemaPiece::ResolveEnum { doc: doc.clone(), symbols: symbols.clone(), }, } } fn clone_piece_or_named(&mut self, piece: SchemaPieceRefOrNamed) -> SchemaPieceOrNamed { match piece { SchemaPieceRefOrNamed::Piece(piece) => self.clone_piece(piece).into(), SchemaPieceRefOrNamed::Named(index) => { let new_index = match self.old_to_new_names.entry(index) { Entry::Vacant(ve) => { let new_index = self.named.len(); self.named.push(None); ve.insert(new_index); let old_named_piece = self.old_root.lookup(index); let new_named_piece = NamedSchemaPiece { name: old_named_piece.name.clone(), piece: self.clone_piece(&old_named_piece.piece), }; self.named[new_index] = Some(new_named_piece); new_index } Entry::Occupied(oe) => *oe.get(), }; SchemaPieceOrNamed::Named(new_index) } } } } impl<'a> SchemaNode<'a> { #[inline(always)] pub fn step(self, next: &'a SchemaPieceOrNamed) -> Self { let (inner, name) = next.get_piece_and_name(self.root); Self { root: self.root, inner, name, } } pub fn json_to_value(self, json: &serde_json::Value) -> Result<AvroValue, ParseSchemaError> { use serde_json::Value::*; let val = match (json, self.inner) { // A default value always matches the first variant of a union (json, SchemaPiece::Union(us)) => match us.schemas.first() { Some(variant) => { AvroValue::Union(0, Box::new(self.step(variant).json_to_value(json)?)) } None => return Err(ParseSchemaError("Union schema has no variants".to_owned())), }, (Null, SchemaPiece::Null) => AvroValue::Null, (Bool(b), SchemaPiece::Boolean) => AvroValue::Boolean(*b), (Number(n), piece) => { match piece { SchemaPiece::Int => { let i = n.as_i64() .and_then(|i| i32::try_from(i).ok()) .ok_or_else(|| { ParseSchemaError(format!("{} is not a 32-bit integer", n)) })?; AvroValue::Int(i) } SchemaPiece::Long => { let i = n.as_i64().ok_or_else(|| { ParseSchemaError(format!("{} is not a 64-bit integer", n)) })?; AvroValue::Long(i) } SchemaPiece::Float => { // Unwrap is okay -- in standard json, (i.e., not using the `arbitrary_precision` // feature of serde), all numbers are representible as doubles. AvroValue::Float(n.as_f64().unwrap() as f32) } SchemaPiece::Double => AvroValue::Double(n.as_f64().unwrap()), _ => { return Err(ParseSchemaError(format!( "Unexpected number in default: {}", n ))) } } } (String(s), SchemaPiece::Bytes) => AvroValue::Bytes(s.clone().into_bytes()), ( String(s), SchemaPiece::Decimal { precision, scale, .. }, ) => AvroValue::Decimal(DecimalValue { precision: *precision, scale: *scale, unscaled: s.clone().into_bytes(), }), (String(s), SchemaPiece::String) => AvroValue::String(s.clone()), (Object(map), SchemaPiece::Record { fields, .. }) => { let field_values = fields .iter() .map(|rf| { let jval = map.get(&rf.name).ok_or_else(|| { ParseSchemaError(format!( "Field not found in default value: {}", rf.name )) })?; let value = self.step(&rf.schema).json_to_value(jval)?; Ok((rf.name.clone(), value)) }) .collect::<Result<Vec<(std::string::String, AvroValue)>, ParseSchemaError>>()?; AvroValue::Record(field_values) } (String(s), SchemaPiece::Enum { symbols, .. }) => { match symbols.iter().find_position(|sym| s == *sym) { Some((index, sym)) => AvroValue::Enum(index as i32, sym.clone()), None => return Err(ParseSchemaError(format!("Enum variant not found: {}", s))), } } (Array(vals), SchemaPiece::Array(inner)) => { let node = self.step(&**inner); let vals = vals .iter() .map(|val| node.json_to_value(val)) .collect::<Result<Vec<_>, ParseSchemaError>>()?; AvroValue::Array(vals) } (Object(map), SchemaPiece::Map(inner)) => { let node = self.step(&**inner); let map = map .iter() .map(|(k, v)| node.json_to_value(v).map(|v| (k.clone(), v))) .collect::<Result<HashMap<_, _>, ParseSchemaError>>()?; AvroValue::Map(map) } (String(s), SchemaPiece::Fixed { size }) if s.len() == *size => { AvroValue::Fixed(*size, s.clone().into_bytes()) } _ => { return Err(ParseSchemaError(format!( "Json default value {} does not match schema", json ))) } }; Ok(val) } } #[derive(Clone)] struct SchemaSerContext<'a> { node: SchemaNodeOrNamed<'a>, // This does not logically need Rc<RefCell<_>> semantics -- // it is only ever mutated in one stack frame at a time. // But AFAICT serde doesn't expose a way to // provide some mutable context to every node in the tree... seen_named: Rc<RefCell<HashMap<usize, String>>>, } #[derive(Clone)] struct RecordFieldSerContext<'a> { outer: &'a SchemaSerContext<'a>, inner: &'a RecordField, } impl<'a> SchemaSerContext<'a> { fn step(&'a self, next: SchemaPieceRefOrNamed<'a>) -> Self { Self { node: self.node.step_ref(next), seen_named: self.seen_named.clone(), } } } impl<'a> Serialize for SchemaSerContext<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self.node.inner { SchemaPieceRefOrNamed::Piece(piece) => match piece { SchemaPiece::Null => serializer.serialize_str("null"), SchemaPiece::Boolean => serializer.serialize_str("boolean"), SchemaPiece::Int => serializer.serialize_str("int"), SchemaPiece::Long => serializer.serialize_str("long"), SchemaPiece::Float => serializer.serialize_str("float"), SchemaPiece::Double => serializer.serialize_str("double"), SchemaPiece::Date => { let mut map = serializer.serialize_map(Some(2))?; map.serialize_entry("type", "int")?; map.serialize_entry("logicalType", "date")?; map.end() } SchemaPiece::TimestampMilli | SchemaPiece::TimestampMicro => { let mut map = serializer.serialize_map(Some(2))?; map.serialize_entry("type", "long")?; if piece == &SchemaPiece::TimestampMilli { map.serialize_entry("logicalType", "timestamp-millis")?; } else { map.serialize_entry("logicalType", "timestamp-micros")?; } map.end() } SchemaPiece::Decimal { precision, scale, fixed_size, } => { let mut map = serializer.serialize_map(None)?; map.serialize_entry("type", "bytes")?; if let Some(fixed_size) = fixed_size { map.serialize_entry("size", fixed_size)?; } map.serialize_entry("precision", precision)?; map.serialize_entry("scale", scale)?; map.end() } SchemaPiece::Bytes => serializer.serialize_str("bytes"), SchemaPiece::String => serializer.serialize_str("string"), SchemaPiece::Array(inner) => { let mut map = serializer.serialize_map(Some(2))?; map.serialize_entry("type", "array")?; map.serialize_entry("items", &self.step(inner.as_ref().as_ref()))?; map.end() } SchemaPiece::Map(inner) => { let mut map = serializer.serialize_map(Some(2))?; map.serialize_entry("type", "map")?; map.serialize_entry("values", &self.step(inner.as_ref().as_ref()))?; map.end() } SchemaPiece::Union(inner) => { let variants = inner.variants(); let mut seq = serializer.serialize_seq(Some(variants.len()))?; for v in variants { seq.serialize_element(&self.step(v.as_ref()))?; } seq.end() } SchemaPiece::Json => { let mut map = serializer.serialize_map(Some(2))?; map.serialize_entry("type", "string")?; map.serialize_entry("connect.name", "io.debezium.data.Json")?; map.end() } SchemaPiece::Record { .. } | SchemaPiece::Enum { .. } | SchemaPiece::Fixed { .. } => { unreachable!("Unexpected named schema piece in anonymous schema position") } SchemaPiece::ResolveIntLong | SchemaPiece::ResolveIntFloat | SchemaPiece::ResolveIntDouble | SchemaPiece::ResolveLongFloat | SchemaPiece::ResolveLongDouble | SchemaPiece::ResolveFloatDouble | SchemaPiece::ResolveConcreteUnion { .. } | SchemaPiece::ResolveUnionUnion { .. } | SchemaPiece::ResolveUnionConcrete { .. } | SchemaPiece::ResolveRecord { .. } | SchemaPiece::ResolveEnum { .. } => { panic!("Attempted to serialize resolved schema") } }, SchemaPieceRefOrNamed::Named(index) => { let mut map = self.seen_named.borrow_mut(); let named_piece = match map.get(&index) { Some(name) => { return serializer.serialize_str(name.as_str()); } None => self.node.root.lookup(index), }; let name = named_piece.name.to_string(); map.insert(index, name.clone()); std::mem::drop(map); match &named_piece.piece { SchemaPiece::Record { doc, fields, .. } => { let mut map = serializer.serialize_map(None)?; map.serialize_entry("type", "record")?; map.serialize_entry("name", &name)?; if let Some(ref docstr) = doc { map.serialize_entry("doc", docstr)?; } // TODO (brennan) - serialize aliases map.serialize_entry( "fields", &fields .iter() .map(|f| RecordFieldSerContext { outer: self, inner: f, }) .collect::<Vec<_>>(), )?; map.end() } SchemaPiece::Enum { symbols, .. } => { let mut map = serializer.serialize_map(None)?; map.serialize_entry("type", "enum")?; map.serialize_entry("name", &name)?; map.serialize_entry("symbols", symbols)?; map.end() } SchemaPiece::Fixed { size } => { let mut map = serializer.serialize_map(None)?; map.serialize_entry("type", "fixed")?; map.serialize_entry("name", &name)?; map.serialize_entry("size", size)?; map.end() } SchemaPiece::Null | SchemaPiece::Boolean | SchemaPiece::Int | SchemaPiece::Long | SchemaPiece::Float | SchemaPiece::Double | SchemaPiece::Date | SchemaPiece::TimestampMilli | SchemaPiece::TimestampMicro | SchemaPiece::Decimal { .. } | SchemaPiece::Bytes | SchemaPiece::String | SchemaPiece::Array(_) | SchemaPiece::Map(_) | SchemaPiece::Union(_) | SchemaPiece::Json => { unreachable!("Unexpected anonymous schema piece in named schema position") } SchemaPiece::ResolveIntLong | SchemaPiece::ResolveIntFloat | SchemaPiece::ResolveIntDouble | SchemaPiece::ResolveLongFloat | SchemaPiece::ResolveLongDouble | SchemaPiece::ResolveFloatDouble | SchemaPiece::ResolveConcreteUnion { .. } | SchemaPiece::ResolveUnionUnion { .. } | SchemaPiece::ResolveUnionConcrete { .. } | SchemaPiece::ResolveRecord { .. } | SchemaPiece::ResolveEnum { .. } => { panic!("Attempted to serialize resolved schema") } } } } } } impl Serialize for Schema { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let ctx = SchemaSerContext { node: SchemaNodeOrNamed { root: self, inner: self.top.as_ref(), }, seen_named: Rc::new(RefCell::new(Default::default())), }; ctx.serialize(serializer) } } impl<'a> Serialize for RecordFieldSerContext<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut map = serializer.serialize_map(None)?; map.serialize_entry("name", &self.inner.name)?; map.serialize_entry("type", &self.outer.step(self.inner.schema.as_ref()))?; if let Some(default) = &self.inner.default { map.serialize_entry("default", default)?; } map.end() } } /// Parses a **valid** avro schema into the Parsing Canonical Form. /// https://avro.apache.org/docs/1.8.2/spec.html#Parsing+Canonical+Form+for+Schemas fn parsing_canonical_form(schema: &serde_json::Value) -> String { match schema { serde_json::Value::Object(map) => pcf_map(map), serde_json::Value::String(s) => pcf_string(s), serde_json::Value::Array(v) => pcf_array(v), _ => unreachable!(), } } fn pcf_map(schema: &Map<String, serde_json::Value>) -> String { // Look for the namespace variant up front. let ns = schema.get("namespace").and_then(|v| v.as_str()); let mut fields = Vec::new(); for (k, v) in schema { // Reduce primitive types to their simple form. ([PRIMITIVE] rule) if schema.len() == 1 && k == "type" { // Invariant: function is only callable from a valid schema, so this is acceptable. if let serde_json::Value::String(s) = v { return pcf_string(s); } } // Strip out unused fields ([STRIP] rule) if field_ordering_position(k).is_none() { continue; } // Fully qualify the name, if it isn't already ([FULLNAMES] rule). if k == "name" { // Invariant: Only valid schemas. Must be a string. let name = v.as_str().unwrap(); let n = match ns { Some(namespace) if !name.contains('.') => { Cow::Owned(format!("{}.{}", namespace, name)) } _ => Cow::Borrowed(name), }; fields.push((k, format!("{}:{}", pcf_string(k), pcf_string(&*n)))); continue; } // Strip off quotes surrounding "size" type, if they exist ([INTEGERS] rule). if k == "size" { let i = match v.as_str() { Some(s) => s.parse::<i64>().expect("Only valid schemas are accepted!"), None => v.as_i64().unwrap(), }; fields.push((k, format!("{}:{}", pcf_string(k), i))); continue; } // For anything else, recursively process the result. fields.push(( k, format!("{}:{}", pcf_string(k), parsing_canonical_form(v)), )); } // Sort the fields by their canonical ordering ([ORDER] rule). fields.sort_unstable_by_key(|(k, _)| field_ordering_position(k).unwrap()); let inter = fields .into_iter() .map(|(_, v)| v) .collect::<Vec<_>>() .join(","); format!("{{{}}}", inter) } fn pcf_array(arr: &[serde_json::Value]) -> String { let inter = arr .iter() .map(parsing_canonical_form) .collect::<Vec<String>>() .join(","); format!("[{}]", inter) } fn pcf_string(s: &str) -> String { format!("\"{}\"", s) } // Used to define the ordering and inclusion of fields. fn
(field: &str) -> Option<usize> { let v = match field { "name" => 1, "type" => 2, "fields" => 3, "symbols" => 4, "items" => 5, "values" => 6, "size" => 7, _ => return None, }; Some(v) } #[cfg(test)] mod tests { use super::*; #[test] fn test_invalid_schema() { assert!(Schema::parse_str("invalid").is_err()); } #[test] fn test_primitive_schema() { assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Null), Schema::parse_str("\"null\"").unwrap().top ); assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Int), Schema::parse_str("\"int\"").unwrap().top ); assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Double), Schema::parse_str("\"double\"").unwrap().top ); } #[test] fn test_array_schema() { let schema = Schema::parse_str(r#"{"type": "array", "items": "string"}"#).unwrap(); assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Array(Box::new(SchemaPieceOrNamed::Piece( SchemaPiece::String )))), schema.top ); } #[test] fn test_map_schema() { let schema = Schema::parse_str(r#"{"type": "map", "values": "double"}"#).unwrap(); assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Map(Box::new(SchemaPieceOrNamed::Piece( SchemaPiece::Double )))), schema.top ); } #[test] fn test_union_schema() { let schema = Schema::parse_str(r#"["null", "int"]"#).unwrap(); assert_eq!( SchemaPieceOrNamed::Piece(SchemaPiece::Union( UnionSchema::new(vec![ SchemaPieceOrNamed::Piece(SchemaPiece::Null), SchemaPieceOrNamed::Piece(SchemaPiece::Int) ]) .unwrap() )), schema.top ); } #[test] fn test_union_unsupported_schema() { let schema = Schema::parse_str(r#"["null", ["null", "int"], "string"]"#); assert!(schema.is_err()); } #[test] fn test_multi_union_schema() { let schema = Schema::parse_str(r#"["null", "int", "float", "string", "bytes"]"#); assert!(schema.is_ok()); let schema = schema.unwrap(); let node = schema.top_node(); assert_eq!(SchemaKind::from(&schema), SchemaKind::Union); let union_schema = match node.inner { SchemaPiece::Union(u) => u, _ => unreachable!(), }; assert_eq!(union_schema.variants().len(), 5); let mut variants = union_schema.variants().iter(); assert_eq!( SchemaKind::from(node.step(variants.next().unwrap())), SchemaKind::Null ); assert_eq!( SchemaKind::from(node.step(variants.next().unwrap())), SchemaKind::Int ); assert_eq!( SchemaKind::from(node.step(variants.next().unwrap())), SchemaKind::Float ); assert_eq!( SchemaKind::from(node.step(variants.next().unwrap())), SchemaKind::String ); assert_eq!( SchemaKind::from(node.step(variants.next().unwrap())), SchemaKind::Bytes ); assert_eq!(variants.next(), None); } #[test] fn test_record_schema() { let schema = Schema::parse_str( r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"} ] } "#, ) .unwrap(); let mut lookup = HashMap::new(); lookup.insert("a".to_owned(), 0); lookup.insert("b".to_owned(), 1); let expected = SchemaPiece::Record { doc: None, fields: vec![ RecordField { name: "a".to_string(), doc: None, default: Some(Value::Number(42i64.into())), schema: SchemaPiece::Long.into(), order: RecordFieldOrder::Ascending, position: 0, }, RecordField { name: "b".to_string(), doc: None, default: None, schema: SchemaPiece::String.into(), order: RecordFieldOrder::Ascending, position: 1, }, ], lookup, }; assert_eq!(&expected, schema.top_node().inner); } #[test] fn test_enum_schema() { let schema = Schema::parse_str( r#"{"type": "enum", "name": "Suit", "symbols": ["diamonds", "spades", "clubs", "hearts"]}"#, ).unwrap(); let expected = SchemaPiece::Enum { doc: None, symbols: vec![ "diamonds".to_owned(), "spades".to_owned(), "clubs".to_owned(), "hearts".to_owned(), ], }; assert_eq!(&expected, schema.top_node().inner); } #[test] fn test_fixed_schema() { let schema = Schema::parse_str(r#"{"type": "fixed", "name": "test", "size": 16}"#).unwrap(); let expected = SchemaPiece::Fixed { size: 16usize }; assert_eq!(&expected, schema.top_node().inner); } #[test] fn test_date_schema() { let kinds = &[ r#"{ "type": "int", "name": "datish", "logicalType": "date" }"#, r#"{ "type": "int", "name": "datish", "connect.name": "io.debezium.time.Date" }"#, r#"{ "type": "int", "name": "datish", "connect.name": "org.apache.kafka.connect.data.Date" }"#, ]; for kind in kinds { let schema = Schema::parse_str(kind).unwrap(); assert_eq!(schema.top_node().inner, &SchemaPiece::Date); assert_eq!( serde_json::to_string(&schema).unwrap(), r#"{"type":"int","logicalType":"date"}"# ); } } #[test] fn test_decimal_schemas() { let schema = Schema::parse_str( r#"{ "type": "fixed", "name": "dec", "size": 8, "logicalType": "decimal", "precision": 12, "scale": 5 }"#, ) .unwrap(); let expected = SchemaPiece::Decimal { precision: 12, scale: 5, fixed_size: Some(8), }; assert_eq!(schema.top_node().inner, &expected); let schema = Schema::parse_str( r#"{ "type": "bytes", "logicalType": "decimal", "precision": 12, "scale": 5 }"#, ) .unwrap(); let expected = SchemaPiece::Decimal { precision: 12, scale: 5, fixed_size: None, }; assert_eq!(schema.top_node().inner, &expected); let res = Schema::parse_str( r#"{ "type": "bytes", "logicalType": "decimal", "precision": 12, "scale": 13 }"#, ); assert_eq!( res.unwrap_err().to_string(), "Failed to parse schema: Decimal scale is greater than precision" ); let res = Schema::parse_str( r#"{ "type": "bytes", "logicalType": "decimal", "precision": -12 }"#, ); assert_eq!( res.unwrap_err().to_string(), "Failed to parse schema: Decimal precision must be greater than zero" ); let res = Schema::parse_str( r#"{ "type": "bytes", "logicalType": "decimal", "precision": 12, "scale": -5 }"#, ); assert_eq!( res.unwrap_err().to_string(), "Failed to parse schema: Decimal scale must be greater than zero" ); let res = Schema::parse_str( r#"{ "type": "fixed", "name": "dec", "size": 5, "logicalType": "decimal", "precision": 12, "scale": 5 }"#, ); assert_eq!( res.unwrap_err().to_string(), "Failed to parse schema: Decimal precision 12 requires more than 5 bytes of space" ); } #[test] fn test_no_documentation() { let schema = Schema::parse_str(r#"{"type": "enum", "name": "Coin", "symbols": ["heads", "tails"]}"#) .unwrap(); let doc = match schema.top_node().inner { SchemaPiece::Enum { doc, .. } => doc.clone(), _ => panic!(), }; assert!(doc.is_none()); } #[test] fn test_documentation() { let schema = Schema::parse_str( r#"{"type": "enum", "name": "Coin", "doc": "Some documentation", "symbols": ["heads", "tails"]}"# ).unwrap(); let doc = match schema.top_node().inner { SchemaPiece::Enum { doc, .. } => doc.clone(), _ => None, }; assert_eq!("Some documentation".to_owned(), doc.unwrap()); } // Tests to ensure Schema is Send + Sync. These tests don't need to _do_ anything, if they can // compile, they pass. #[test] fn test_schema_is_send() { fn send<S: Send>(_s: S) {} let schema = Schema { named: vec![], indices: Default::default(), top: SchemaPiece::Null.into(), }; send(schema); } #[test] fn test_schema_is_sync() { fn sync<S: Sync>(_s: S) {} let schema = Schema { named: vec![], indices: Default::default(), top: SchemaPiece::Null.into(), }; sync(&schema); sync(schema); } #[test] fn test_schema_fingerprint() { use md5::Md5; use sha2::Sha256; let raw_schema = r#" { "type": "record", "name": "test", "fields": [ {"name": "a", "type": "long", "default": 42}, {"name": "b", "type": "string"} ] } "#; let schema = Schema::parse_str(raw_schema).unwrap(); assert_eq!( "5ecb2d1f0eaa647d409e6adbd5d70cd274d85802aa9167f5fe3b73ba70b32c76", format!("{}", schema.fingerprint::<Sha256>()) ); assert_eq!( "a2c99a3f40ea2eea32593d63b483e962", format!("{}", schema.fingerprint::<Md5>()) ); } }
field_ordering_position
generate_virtualwanvpnserverconfigurationvpnprofile.go
// Copyright (c) Microsoft and contributors. All rights reserved. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. package network import ( "context" "log" "time" "github.com/Azure-Samples/azure-sdk-for-go-samples/internal/config" "github.com/Azure/azure-sdk-for-go/sdk/armcore" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/network/armnetwork" ) func
() armnetwork.NetworkManagementClient { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) } client := armnetwork.NewNetworkManagementClient(armcore.NewDefaultConnection(cred, nil), config.SubscriptionID()) return *client } // Generates a unique VPN profile for P2S clients for VirtualWan and associated VpnServerConfiguration // combination in the specified resource group. func Generatevirtualwanvpnserverconfigurationvpnprofile(ctx context.Context, virtualWANName string, vpnClientParams armnetwork.VirtualWanVPNProfileParameters) error { client := getBeginGeneratevirtualwanvpnserverconfigurationvpnprofilesClient() poller, err := client.BeginGeneratevirtualwanvpnserverconfigurationvpnprofile( ctx, config.GroupName(), virtualWANName, vpnClientParams, nil) if err != nil { return err } _, err = poller.PollUntilDone(ctx, 30*time.Second) if err != nil { return err } return nil }
getBeginGeneratevirtualwanvpnserverconfigurationvpnprofilesClient
verbs_qp.rs
// This file is part of mlnx-ofed. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/mlnx-ofed/master/COPYRIGHT. No part of mlnx-ofed, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright ยฉ 2016 The developers of mlnx-ofed. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/mlnx-ofed/master/COPYRIGHT. #[repr(C)] pub struct verbs_qp { pub qp: ibv_qp, pub comp_mask: u32, pub xrcd: *mut verbs_xrcd, } impl Default for verbs_qp { #[inline(always)] fn default() -> Self { unsafe { zeroed() } } } impl Debug for verbs_qp { #[inline(always)] fn fmt(&self, f: &mut Formatter) -> Result {
}
write!(f, "verbs_qp {{ qp: {:?}, xrcd: {:?} }}", self.qp, self.xrcd) }
fortios_dnsfilter_profile.py
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_dnsfilter_profile short_description: Configure DNS domain filter profiles in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify dnsfilter feature and profile category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true dnsfilter_profile: description: - Configure DNS domain filter profiles. default: null suboptions: state: description: - Indicates whether to create or remove the object choices: - present - absent block-action: description: - Action to take for blocked domains. choices: - block - redirect block-botnet: description: - Enable/disable blocking botnet C&C DNS lookups. choices: - disable - enable comment: description: - Comment. domain-filter: description: - Domain filter settings. suboptions: domain-filter-table: description: - DNS domain filter table ID. Source dnsfilter.domain-filter.id. external-ip-blocklist: description: - One or more external IP block lists. suboptions: name: description: - External domain block list name. Source system.external-resource.name. required: true ftgd-dns: description: - FortiGuard DNS Filter settings. suboptions: filters: description: - FortiGuard DNS domain filters. suboptions: action: description: - Action to take for DNS requests matching the category. choices: - block - monitor category: description: - Category number. id: description: - ID number. required: true log: description: - Enable/disable DNS filter logging for this DNS profile. choices: - enable - disable options: description: - FortiGuard DNS filter options. choices: - error-allow - ftgd-disable log-all-domain: description: - Enable/disable logging of all domains visited (detailed DNS logging). choices: - enable - disable name: description: - Profile name. required: true redirect-portal: description: - IP address of the SDNS redirect portal. safe-search: description: - Enable/disable Google, Bing, and YouTube safe search. choices: - disable - enable sdns-domain-log: description: - Enable/disable domain filtering and botnet domain logging. choices: - enable - disable sdns-ftgd-err-log: description: - Enable/disable FortiGuard SDNS rating error logging. choices: - enable - disable youtube-restrict: description: - Set safe search for YouTube restriction level. choices: - strict - moderate ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure DNS domain filter profiles. fortios_dnsfilter_profile: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" dnsfilter_profile: state: "present" block-action: "block" block-botnet: "disable" comment: "Comment." domain-filter: domain-filter-table: "7 (source dnsfilter.domain-filter.id)" external-ip-blocklist: - name: "default_name_9 (source system.external-resource.name)" ftgd-dns: filters: - action: "block" category: "13" id: "14" log: "enable" options: "error-allow" log-all-domain: "enable" name: "default_name_18" redirect-portal: "<your_own_value>" safe-search: "disable" sdns-domain-log: "enable" sdns-ftgd-err-log: "enable" youtube-restrict: "strict" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule def login(data, fos): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_dnsfilter_profile_data(json): option_list = ['block-action', 'block-botnet', 'comment', 'domain-filter', 'external-ip-blocklist', 'ftgd-dns', 'log-all-domain', 'name', 'redirect-portal', 'safe-search', 'sdns-domain-log', 'sdns-ftgd-err-log', 'youtube-restrict'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def dnsfilter_profile(data, fos): vdom = data['vdom'] dnsfilter_profile_data = data['dnsfilter_profile'] filtered_data = filter_dnsfilter_profile_data(dnsfilter_profile_data) if dnsfilter_profile_data['state'] == "present": return fos.set('dnsfilter', 'profile', data=filtered_data, vdom=vdom) elif dnsfilter_profile_data['state'] == "absent": return fos.delete('dnsfilter', 'profile', mkey=filtered_data['name'], vdom=vdom) def
(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_dnsfilter(data, fos): login(data, fos) if data['dnsfilter_profile']: resp = dnsfilter_profile(data, fos) fos.logout() return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "dnsfilter_profile": { "required": False, "type": "dict", "options": { "state": {"required": True, "type": "str", "choices": ["present", "absent"]}, "block-action": {"required": False, "type": "str", "choices": ["block", "redirect"]}, "block-botnet": {"required": False, "type": "str", "choices": ["disable", "enable"]}, "comment": {"required": False, "type": "str"}, "domain-filter": {"required": False, "type": "dict", "options": { "domain-filter-table": {"required": False, "type": "int"} }}, "external-ip-blocklist": {"required": False, "type": "list", "options": { "name": {"required": True, "type": "str"} }}, "ftgd-dns": {"required": False, "type": "dict", "options": { "filters": {"required": False, "type": "list", "options": { "action": {"required": False, "type": "str", "choices": ["block", "monitor"]}, "category": {"required": False, "type": "int"}, "id": {"required": True, "type": "int"}, "log": {"required": False, "type": "str", "choices": ["enable", "disable"]} }}, "options": {"required": False, "type": "str", "choices": ["error-allow", "ftgd-disable"]} }}, "log-all-domain": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "name": {"required": True, "type": "str"}, "redirect-portal": {"required": False, "type": "str"}, "safe-search": {"required": False, "type": "str", "choices": ["disable", "enable"]}, "sdns-domain-log": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "sdns-ftgd-err-log": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "youtube-restrict": {"required": False, "type": "str", "choices": ["strict", "moderate"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() is_error, has_changed, result = fortios_dnsfilter(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
is_successful_status
deviceaddr.rs
#[doc = "Register `DEVICEADDR[%s]` reader"] pub struct R(crate::R<DEVICEADDR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<DEVICEADDR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<DEVICEADDR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<DEVICEADDR_SPEC>) -> Self { R(reader) } } #[doc = "Field `DEVICEADDR` reader - 48 bit device address"] pub struct DEVICEADDR_R(crate::FieldReader<u32, u32>); impl DEVICEADDR_R { #[inline(always)] pub(crate) fn new(bits: u32) -> Self { DEVICEADDR_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DEVICEADDR_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn
(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bits 0:31 - 48 bit device address"] #[inline(always)] pub fn deviceaddr(&self) -> DEVICEADDR_R { DEVICEADDR_R::new(self.bits) } } #[doc = "Description collection\\[n\\]: Device address n\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [deviceaddr](index.html) module"] pub struct DEVICEADDR_SPEC; impl crate::RegisterSpec for DEVICEADDR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [deviceaddr::R](R) reader structure"] impl crate::Readable for DEVICEADDR_SPEC { type Reader = R; } #[doc = "`reset()` method sets DEVICEADDR[%s] to value 0xffff_ffff"] impl crate::Resettable for DEVICEADDR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0xffff_ffff } }
deref
RotateArrayTest.py
from unittest import TestCase from RotateArray import RotateArray
class TestRotateArray(TestCase): def test_rotate(self): ra = RotateArray() array0 = [1] ra.rotate(array0, 1) self.assertEqual(array0, [1]) array1 = [1, 2] ra.rotate(array1, 1) self.assertEqual(array1, [2, 1]) array2 = [1, 2, 3, 4, 5, 6, 7] ra.rotate(array2, 3) self.assertEqual(array2, [5, 6, 7, 1, 2, 3, 4])
train.py
from dataset import tiny_dataset from bbox_codec import bbox_encode from resnet50_base import Localization_net2 from torch.utils.data import DataLoader,random_split import torch as t import tqdm from torch.utils.tensorboard import SummaryWriter import torch.nn as nn import torch.optim as optim import argparse from loss import Loss_for_localization from evaluate import compute_three_acc import os def parser(): parser = argparse.ArgumentParser() parser.add_argument('--lr',help='learning rate',type=float,default=1e-2,dest='lr') parser.add_argument('--batch-size',help='batchsize',type=int,default=32,dest='batch_size') parser.add_argument('--lr-decay',help='the decay of lr',type=float,default=0.1,dest='lr_decay') parser.add_argument('--root',help='root directory of dataset',type=str, default=r'E:\BS_learning\4_1\CV_basis\experiment\2\tiny_vid',dest='root') parser.add_argument('--weight-decay',help='weight decay of optimizer',type=float, default=1e-5,dest='weight_decay') parser.add_argument('--epochs',help='set the num of epochs',type=int,default=100) parser.add_argument('--log-dir',help='tensorboard log dir',type=str,required=True) parser.add_argument('--save-file-name', help='the pth file name', type=str,required=True) parser.add_argument('--class-weight',help='the weight of classification of the loss',default=1,type=int) parser.add_argument('--regre-weight', help='the weight of regression of the loss', default=2,type=int) return parser def weight_init(net): for name,child in net.named_children(): if name == 'feature_extraction': continue if isinstance(child,nn.Conv2d): nn.init.kaiming_normal_(child.weight) if child.bias != None: nn.init.zeros_(child.bias) elif isinstance(child,nn.Linear): nn.init.kaiming_normal_(child.weight) if child.bias != None: nn.init.zeros_(child.bias) return net def
(): args = parser().parse_args() t.manual_seed(777) t.cuda.manual_seed(777) dataset = tiny_dataset(root=args.root) train_set,val_set = random_split(dataset=dataset,lengths=[150*5,30*5], generator=t.Generator().manual_seed(777)) train_loader = DataLoader(dataset=train_set,batch_size=args.batch_size,shuffle=True,num_workers=2) val_loader = DataLoader(dataset=val_set,batch_size=1,shuffle=False,num_workers=0) print('establish the net ...') net = Localization_net2(class_num=5).cuda() print('initialize the net') net = weight_init(net=net) high_lr_list = [] low_lr_list = [] for name,param in net.named_parameters(): if 'feature_extraction' in name: low_lr_list.append(param) else: high_lr_list.append(param) optimizer = optim.SGD([{'params':low_lr_list,'lr':0.1*args.lr},{'params':high_lr_list}], lr=args.lr,weight_decay=args.weight_decay,momentum=0.9) # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, # mode='min', patience=2,factor=args.lr_decay) writer = SummaryWriter(log_dir=args.log_dir,comment='curves_log') criterion = Loss_for_localization().cuda() for i in tqdm.tqdm(range(args.epochs)): t_loss = 0. tc_acc = 0. tr_acc = 0. t_acc = 0. v_loss = 0. vc_acc = 0. vr_acc = 0. v_acc = 0. print('\n%dth epoch'%(i+1)) if i+1 == args.epochs//4: optimizer.param_groups[0]['lr'] *= args.lr_decay optimizer.param_groups[1]['lr'] *= args.lr_decay if i+1 == args.epochs//2: optimizer.param_groups[0]['lr'] *= args.lr_decay optimizer.param_groups[1]['lr'] *= args.lr_decay if i+1 == 3*args.epochs//4: optimizer.param_groups[0]['lr'] *= args.lr_decay optimizer.param_groups[1]['lr'] *= args.lr_decay for item in train_loader: tc_acc_num = 0 tr_acc_num = 0 t_acc_num = 0 net.train() img = item['img'].cuda() label = item['label'].cuda() bbox = item['bbox'].cuda() objects, scores, locs = net(img) gt = bbox_encode(bbox=bbox,feature_map_size=(4,4),img_size=(128,128)).cuda() loss = criterion(objects,scores,locs,label,gt,args.regre_weight,0.5,args.class_weight) t_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() for j in range(img.size()[0]): a,b,c = compute_three_acc(objects=objects[j].view(1,*objects[j].size()), score=scores[j].view(1,*scores[j].size()), loc=locs[j].view(1,*locs[j].size()), label=label[j].view(1,*label[j].size()), bbox=bbox[j].view(1,*bbox[j].size())) tc_acc_num += a tr_acc_num += b t_acc_num += c tc_acc += tc_acc_num/float(img.size()[0]) tr_acc += tr_acc_num / float(img.size()[0]) t_acc += t_acc_num / float(img.size()[0]) net.eval() with t.no_grad(): for item2 in val_loader: img = item2['img'].cuda() label = item2['label'].cuda() bbox = item2['bbox'].cuda() objects, scores, locs = net(img) class_acc,regression_acc,acc = compute_three_acc(objects=objects,score=scores, loc=locs,label=label,bbox=bbox) gt = bbox_encode(bbox=bbox, feature_map_size=(4, 4), img_size=(128, 128)).cuda() vc_acc += class_acc vr_acc += regression_acc v_acc += acc loss = criterion(objects, scores, locs,label, gt,args.regre_weight,0.5,args.class_weight) v_loss +=loss.item() v_loss /= len(val_loader) vc_acc /= len(val_loader) vr_acc /= len(val_loader) v_acc /= len(val_loader) # scheduler.step(v_loss) print('train_loss: %.5f val_loss : %.5f' % (t_loss/len(train_loader),v_loss)) writer.add_scalar('low_lr_curve', optimizer.param_groups[0]["lr"], i + 1) writer.add_scalar('high_lr_curve', optimizer.param_groups[1]["lr"], i + 1) writer.add_scalars('loss', {'Train':t_loss / len(train_loader)}, i+1) writer.add_scalars('loss', {'Val':v_loss}, i+1) writer.add_scalars('train_acc', {'class_acc': tc_acc/ len(train_loader)}, i + 1) writer.add_scalars('train_acc', {'regression_acc': tr_acc/ len(train_loader)}, i + 1) writer.add_scalars('train_acc', {'two_task_acc': t_acc/ len(train_loader)}, i + 1) writer.add_scalars('val_acc',{'class_acc':vc_acc},i+1) writer.add_scalars('val_acc', {'regression_acc': vr_acc}, i + 1) writer.add_scalars('val_acc', {'two_task_acc': v_acc}, i + 1) if optimizer.param_groups[0]['lr'] <= 1e-8: break t.save(net,os.path.join(args.log_dir,args.save_file_name + 'epoch%d.pth'%i)) if __name__ == '__main__': train()
train
model_200_response.go
/* * OpenAPI Petstore * * This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ * * API version: 1.0.0 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package openapi import ( "bytes" "encoding/json" ) // Model200Response Model for testing model name starting with number type Model200Response struct { Name *int32 `json:"name,omitempty"` Class *string `json:"class,omitempty"` } // GetName returns the Name field value if set, zero value otherwise. func (o *Model200Response) GetName() int32 { if o == nil || o.Name == nil { var ret int32 return ret } return *o.Name } // GetNameOk returns a tuple with the Name field value if set, zero value otherwise // and a boolean to check if the value has been set. func (o *Model200Response) GetNameOk() (int32, bool) { if o == nil || o.Name == nil { var ret int32 return ret, false } return *o.Name, true } // HasName returns a boolean if a field has been set. func (o *Model200Response) HasName() bool { if o != nil && o.Name != nil { return true } return false } // SetName gets a reference to the given int32 and assigns it to the Name field. func (o *Model200Response) SetName(v int32) { o.Name = &v } // GetClass returns the Class field value if set, zero value otherwise. func (o *Model200Response) GetClass() string { if o == nil || o.Class == nil { var ret string return ret } return *o.Class } // GetClassOk returns a tuple with the Class field value if set, zero value otherwise // and a boolean to check if the value has been set. func (o *Model200Response) GetClassOk() (string, bool) { if o == nil || o.Class == nil { var ret string return ret, false } return *o.Class, true } // HasClass returns a boolean if a field has been set. func (o *Model200Response) HasClass() bool { if o != nil && o.Class != nil { return true } return false } // SetClass gets a reference to the given string and assigns it to the Class field. func (o *Model200Response) SetClass(v string) { o.Class = &v } type NullableModel200Response struct { Value Model200Response ExplicitNull bool } func (v NullableModel200Response) MarshalJSON() ([]byte, error) { switch { case v.ExplicitNull: return []byte("null"), nil default: return json.Marshal(v.Value) }
func (v *NullableModel200Response) UnmarshalJSON(src []byte) error { if bytes.Equal(src, []byte("null")) { v.ExplicitNull = true return nil } return json.Unmarshal(src, &v.Value) }
}
radix_sort.py
def radsort(unslist): """Returns a sorted list. Accepts only a list containing positive integers.""" # find max for iterative solution maxval = max(unslist) ntimes = len(str(maxval)) slist = unslist[:] for n in range(ntimes): # Making radix bins bins = [[] for _ in range(10)] # Place each list item in appropriate bin for i, item in enumerate(slist): inspecting = slist[i] digval = _get_nth_digit(inspecting, n) bins[digval].append(inspecting) slist = [] # Flatten bins to list for bin in bins: slist.extend(bin) return slist def _get_nth_digit(num, n):
if __name__ == "__main__": """Test time performance for best and worst cases""" import time size = 1000 # Best case: when all numbers in the list have the same number of digits. good_list = range(size + 1) start = time.time() for i in range(1000): radsort(good_list) stop = time.time() best_time = (stop - start) # Worst case: When there is one very large outlier. bad_list = [1 for _ in range(size)] + [10**10] start = time.time() for i in range(1000): radsort(bad_list) stop = time.time() worst_time = (stop - start) print "Best case is {} times better than worst for n=1000\n".format( worst_time/best_time) print "Best case: {0:.{1}f} ms\nWorst case: {2:.{3}f} ms".format( best_time, 5, worst_time, 5)
"""For a positive integer, get the value at the nth digit; indexing starts at 0""" return ((num % (10 ** (n + 1))) - (num % (10 ** n))) // 10 ** n
grasshopper_combat.py
"""Grasshopper - Terminal game combat function - Return remaining health after taking damage. # 1 Best Practices solution by ZozoFouchtra and others def combat(health, damage): return max(0, health-damage) """ def
(health, damage): """Find remaining health after taking damage.""" return 0 if health - damage < 0 else health - damage
combat
realm.rs
//! Conceptually, a realm consists of a set of intrinsic objects, an ECMAScript global environment, //! all of the ECMAScript code that is loaded within the scope of that global environment, //! and other associated state and resources. //! //! A realm is represented in this implementation as a Realm struct with the fields specified from the spec. use crate::{ environments::{CompileTimeEnvironmentStack, DeclarativeEnvironmentStack}, object::{GlobalPropertyMap, JsObject, ObjectData, PropertyMap}, BoaProfiler, }; /// Representation of a Realm. /// /// In the specification these are called Realm Records. #[derive(Debug)] pub struct Realm { global_object: JsObject, pub(crate) global_extensible: bool, pub(crate) global_property_map: PropertyMap, pub(crate) environments: DeclarativeEnvironmentStack, pub(crate) compile_env: CompileTimeEnvironmentStack, } impl Realm { #[inline] pub fn create() -> Self
#[inline] pub(crate) fn global_object(&self) -> &JsObject { &self.global_object } #[inline] pub(crate) fn global_bindings(&self) -> &GlobalPropertyMap { self.global_property_map.string_property_map() } #[inline] pub(crate) fn global_bindings_mut(&mut self) -> &mut GlobalPropertyMap { self.global_property_map.string_property_map_mut() } /// Set the number of bindings on the global environment. #[inline] pub(crate) fn set_global_binding_number(&mut self) { let binding_number = self.compile_env.get_binding_number(); self.environments.set_global_binding_number(binding_number); } }
{ let _timer = BoaProfiler::global().start_event("Realm::create", "realm"); // Create brand new global object // Global has no prototype to pass None to new_obj // Allow identification of the global object easily let global_object = JsObject::from_proto_and_data(None, ObjectData::global()); Self { global_object, global_extensible: true, global_property_map: PropertyMap::default(), environments: DeclarativeEnvironmentStack::new(), compile_env: CompileTimeEnvironmentStack::new(), } }
rxportal.go
package dilithium import ( "github.com/emirpasic/gods/trees/btree" "github.com/emirpasic/gods/utils" "github.com/openziti/dilithium/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" "math" "time" ) type RxPortal struct { adapter Adapter sink Sink tree *btree.Tree accepted int32 rxs chan *WireMessage rxPortalSize int readPool *Pool ackPool *Pool txp *TxPortal seq *util.Sequence closer *Closer closed bool ii InstrumentInstance } type RxRead struct { Buf []byte Size int Eof bool } func NewRxPortal(adapter Adapter, sink Sink, txp *TxPortal, seq *util.Sequence, closer *Closer, ii InstrumentInstance) *RxPortal
func (rxp *RxPortal) SetAccepted(accepted int32) { rxp.accepted = accepted } func (rxp *RxPortal) Rx(wm *WireMessage) (err error) { defer func() { if r := recover(); r != nil { err = errors.Wrap(err, "send on closed rxs") } }() select { case rxp.rxs <- wm: default: logrus.Info("dropped") } return err } func (rxp *RxPortal) Close() { if !rxp.closed { rxp.sink.Close() rxp.closed = true close(rxp.rxs) } } func (rxp *RxPortal) run() { logrus.Info("started") defer logrus.Warn("exited") defer func() { if r := recover(); r != nil { logrus.Errorf("recovered (%v)", r) } }() for { var wm *WireMessage var ok bool select { case wm, ok = <-rxp.rxs: if !ok { return } //case <-time.After(time.Duration(rxp.txp.alg.Profile().ConnectionTimeout) * time.Millisecond): // rxp.Closer.timeout() //return } switch wm.messageType() { case DATA: _, found := rxp.tree.Get(wm.Seq) if !found && (wm.Seq > rxp.accepted || (wm.Seq == 0 && rxp.accepted == math.MaxInt32)) { if size, err := wm.asDataSize(); err == nil { rxp.tree.Put(wm.Seq, wm) rxp.rxPortalSize += int(size) rxp.ii.RxPortalSzChanged(rxp.rxPortalSize) } else { logrus.Errorf("unexpected as data size (%v)", err) } } var rtt *uint16 if wm.hasFlag(RTT) { if _, rttIn, err := wm.asData(); err == nil { rtt = rttIn } else { logrus.Errorf("unexpected as data (%v)", err) } } if ack, err := newAck([]Ack{{wm.Seq, wm.Seq}}, int32(rxp.rxPortalSize), rtt, rxp.ackPool); err == nil { if err := writeWireMessage(ack, rxp.adapter); err != nil { logrus.Errorf("error sending ack (%v)", err) rxp.ii.WriteError(err) } else { rxp.ii.WireMessageTx(ack) rxp.ii.TxAck(ack) } ack.buf.Unref() } if found { wm.buf.Unref() } if rxp.tree.Size() > 0 { startingRxPortalSize := rxp.rxPortalSize var next int32 if rxp.accepted < math.MaxInt32 { next = rxp.accepted + 1 } else { next = 0 } keys := rxp.tree.Keys() for _, key := range keys { if key.(int32) == next { v, _ := rxp.tree.Get(key) wm := v.(*WireMessage) if data, _, err := wm.asData(); err == nil { if err := rxp.sink.Accept(data); err != nil { logrus.WithError(err).Error("write to data sink failed, exiting rx loop") return } rxp.tree.Remove(key) rxp.rxPortalSize -= len(data) rxp.ii.RxPortalSzChanged(rxp.rxPortalSize) wm.buf.Unref() rxp.accepted = next if next < math.MaxInt32 { next++ } else { next = 0 } } else { logrus.Errorf("unexpected mt [%d]", wm.Mt) } } } // Send "pacing" KEEPALIVE? if rxp.txp.alg.RxPortalPacing(startingRxPortalSize, rxp.rxPortalSize) { if keepalive, err := newKeepalive(rxp.rxPortalSize, rxp.ackPool); err == nil { if err := writeWireMessage(keepalive, rxp.adapter); err != nil { logrus.Errorf("error sending pacing keepalive (%v)", err) rxp.ii.WriteError(err) } else { rxp.ii.WireMessageTx(keepalive) rxp.ii.TxKeepalive(keepalive) } keepalive.buf.Unref() } } } case KEEPALIVE: wm.buf.Unref() case CLOSE: if closeAck, err := newAck([]Ack{{wm.Seq, wm.Seq}}, int32(rxp.rxPortalSize), nil, rxp.ackPool); err == nil { if err := writeWireMessage(closeAck, rxp.adapter); err != nil { logrus.Errorf("error writing close ack (%v)", err) rxp.ii.WriteError(err) } else { rxp.ii.WireMessageTx(closeAck) rxp.ii.TxAck(closeAck) } } else { logrus.Errorf("error creating close ack (%v)", err) } wm.buf.Unref() default: logrus.Errorf("unexpected message type [%d]", wm.messageType()) wm.buf.Unref() } } } func (rxp *RxPortal) rxer() { logrus.Info("started") defer logrus.Warn("exited") for { wm, err := readWireMessage(rxp.adapter, rxp.readPool) if err != nil { rxp.ii.ReadError(err) logrus.Errorf("error reading (%v)", err) rxp.closer.EmergencyStop() return } rxp.ii.WireMessageRx(wm) switch wm.messageType() { case DATA: if err := rxp.Rx(wm); err != nil { logrus.Errorf("error rx-ing (%v)", err) continue } case ACK: acks, rxPortalSz, rttTs, err := wm.asAck() if err != nil { logrus.Errorf("as ack error (%v)", err) continue } if rttTs != nil { now := time.Now().UnixNano() clockTs := uint16(now / int64(time.Millisecond)) rttMs := clockTs - *rttTs rxp.txp.alg.UpdateRTT(int(rttMs)) } rxp.txp.alg.UpdateRxPortalSize(int(rxPortalSz)) if err := rxp.txp.ack(acks); err != nil { logrus.Errorf("error acking (%v)", err) continue } rxp.ii.RxAck(wm) wm.buf.Unref() case KEEPALIVE: rxPortalSz, err := wm.asKeepalive() if err != nil { logrus.Errorf("as keepalive error (%v)", err) continue } rxp.txp.alg.UpdateRxPortalSize(rxPortalSz) if err := rxp.Rx(wm); err != nil { logrus.Errorf("error forwarding keepalive to rxPortal (%v)", err) continue } rxp.ii.RxKeepalive(wm) wm.buf.Unref() case CLOSE: if err := rxp.Rx(wm); err != nil { logrus.Errorf("error rx-ing close (%v)", err) } default: logrus.Errorf("unexpected message type: %d", wm.messageType()) wm.buf.Unref() rxp.ii.UnexpectedMessageType(wm.messageType()) } } }
{ rxp := &RxPortal{ adapter: adapter, sink: sink, tree: btree.NewWith(txp.alg.Profile().MaxTreeSize, utils.Int32Comparator), accepted: -1, rxs: make(chan *WireMessage, 4), readPool: NewPool("readPool", uint32(txp.alg.Profile().PoolBufferSize), ii), ackPool: NewPool("ackPool", uint32(txp.alg.Profile().PoolBufferSize), ii), txp: txp, seq: seq, closer: closer, ii: ii, } go rxp.run() go rxp.rxer() return rxp }
ReversalOfTransferOutConfirmationV07.go
package sese import ( "encoding/xml" "github.com/fgrid/iso20022" ) type Document00400107 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:sese.004.001.07 Document"` Message *ReversalOfTransferOutConfirmationV07 `xml:"RvslOfTrfOutConf"` } func (d *Document00400107) AddMessage() *ReversalOfTransferOutConfirmationV07 { d.Message = new(ReversalOfTransferOutConfirmationV07) return d.Message } // Scope // An executing party, for example, a transfer agent, sends the ReversalOfTransferOutConfirmation message to the instructing party, for example, an investment manager or its authorised representative, to cancel a previously sent TransferOutConfirmation message. // Usage // The ReversalOfTransferOutConfirmation message is used to reverse a previously sent TransferOutConfirmation. // There are two ways to specify the reversal of the transfer out confirmation. Either: // - the business references, for example, TransferReference, TransferConfirmationIdentification, of the transfer confirmation are quoted, or, // - all the details of the transfer confirmation (this includes TransferReference and TransferConfirmationIdentification) are quoted but this is not recommended. // The message identification of the TransferOutConfirmation message in which the transfer out confirmation was conveyed may also be quoted in PreviousReference. The message identification of the TransferOutInstruction message in which the transfer out instruction was conveyed may also be quoted in RelatedReference. type ReversalOfTransferOutConfirmationV07 struct { // Reference that uniquely identifies a message from a business application standpoint. MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"` // Reference to the transaction identifier issued by the counterparty. Building block may also be used to reference a previous transaction, or tie a set of messages together. References []*iso20022.References20 `xml:"Refs,omitempty"` // Choice between reversal by reference or by reversal details. Reversal *iso20022.Reversal8Choice `xml:"Rvsl"` // Identifies the market practice to which the message conforms. MarketPracticeVersion *iso20022.MarketPracticeVersion1 `xml:"MktPrctcVrsn,omitempty"` // Information provided when the message is a copy of a previous message. CopyDetails *iso20022.CopyInformation4 `xml:"CpyDtls,omitempty"` } func (r *ReversalOfTransferOutConfirmationV07) AddMessageIdentification() *iso20022.MessageIdentification1 { r.MessageIdentification = new(iso20022.MessageIdentification1) return r.MessageIdentification } func (r *ReversalOfTransferOutConfirmationV07) AddReferences() *iso20022.References20 { newValue := new(iso20022.References20) r.References = append(r.References, newValue) return newValue } func (r *ReversalOfTransferOutConfirmationV07) AddReversal() *iso20022.Reversal8Choice {
r.Reversal = new(iso20022.Reversal8Choice) return r.Reversal } func (r *ReversalOfTransferOutConfirmationV07) AddMarketPracticeVersion() *iso20022.MarketPracticeVersion1 { r.MarketPracticeVersion = new(iso20022.MarketPracticeVersion1) return r.MarketPracticeVersion } func (r *ReversalOfTransferOutConfirmationV07) AddCopyDetails() *iso20022.CopyInformation4 { r.CopyDetails = new(iso20022.CopyInformation4) return r.CopyDetails }
eta.rs
#![allow( unused, clippy::no_effect, clippy::redundant_closure_call, clippy::many_single_char_names, clippy::needless_pass_by_value, clippy::option_map_unit_fn, clippy::trivially_copy_pass_by_ref )] #![warn(clippy::redundant_closure, clippy::needless_borrow)] use std::path::PathBuf; fn main() { let a = Some(1u8).map(|a| foo(a)); meta(|a| foo(a)); let c = Some(1u8).map(|a| {1+2; foo}(a)); let d = Some(1u8).map(|a| foo((|b| foo2(b))(a))); //is adjusted? all(&[1, 2, 3], &&2, |x, y| below(x, y)); //is adjusted unsafe { Some(1u8).map(|a| unsafe_fn(a)); // unsafe fn } // See #815 let e = Some(1u8).map(|a| divergent(a)); let e = Some(1u8).map(|a| generic(a)); let e = Some(1u8).map(generic); // See #515 let a: Option<Box<::std::ops::Deref<Target = [i32]>>> = Some(vec![1i32, 2]).map(|v| -> Box<::std::ops::Deref<Target = [i32]>> { Box::new(v) }); } trait TestTrait { fn trait_foo(self) -> bool; fn trait_foo_ref(&self) -> bool; } struct TestStruct<'a> { some_ref: &'a i32, }
fn foo(self) -> bool { false } unsafe fn foo_unsafe(self) -> bool { true } } impl<'a> TestTrait for TestStruct<'a> { fn trait_foo(self) -> bool { false } fn trait_foo_ref(&self) -> bool { false } } impl<'a> std::ops::Deref for TestStruct<'a> { type Target = char; fn deref(&self) -> &char { &'a' } } fn test_redundant_closures_containing_method_calls() { let i = 10; let e = Some(TestStruct { some_ref: &i }).map(|a| a.foo()); let e = Some(TestStruct { some_ref: &i }).map(TestStruct::foo); let e = Some(TestStruct { some_ref: &i }).map(|a| a.trait_foo()); let e = Some(TestStruct { some_ref: &i }).map(|a| a.trait_foo_ref()); let e = Some(TestStruct { some_ref: &i }).map(TestTrait::trait_foo); let e = Some(&mut vec![1, 2, 3]).map(|v| v.clear()); let e = Some(&mut vec![1, 2, 3]).map(std::vec::Vec::clear); unsafe { let e = Some(TestStruct { some_ref: &i }).map(|a| a.foo_unsafe()); } let e = Some("str").map(|s| s.to_string()); let e = Some("str").map(str::to_string); let e = Some('a').map(|s| s.to_uppercase()); let e = Some('a').map(char::to_uppercase); let e: std::vec::Vec<usize> = vec!['a', 'b', 'c'].iter().map(|c| c.len_utf8()).collect(); let e: std::vec::Vec<char> = vec!['a', 'b', 'c'].iter().map(|c| c.to_ascii_uppercase()).collect(); let e: std::vec::Vec<char> = vec!['a', 'b', 'c'].iter().map(char::to_ascii_uppercase).collect(); let p = Some(PathBuf::new()); let e = p.as_ref().and_then(|s| s.to_str()); let c = Some(TestStruct { some_ref: &i }) .as_ref() .map(|c| c.to_ascii_uppercase()); fn test_different_borrow_levels<T>(t: &[&T]) where T: TestTrait, { t.iter().filter(|x| x.trait_foo_ref()); t.iter().map(|x| x.trait_foo_ref()); } } fn meta<F>(f: F) where F: Fn(u8), { f(1u8) } fn foo(_: u8) {} fn foo2(_: u8) -> u8 { 1u8 } fn all<X, F>(x: &[X], y: &X, f: F) -> bool where F: Fn(&X, &X) -> bool, { x.iter().all(|e| f(e, y)) } fn below(x: &u8, y: &u8) -> bool { x < y } unsafe fn unsafe_fn(_: u8) {} fn divergent(_: u8) -> ! { unimplemented!() } fn generic<T>(_: T) -> u8 { 0 }
impl<'a> TestStruct<'a> {
buyer.guard.ts
import { Injectable, CanActivate, ExecutionContext, UnauthorizedException, } from '@nestjs/common'; import * as jwt from 'jsonwebtoken'; @Injectable() export class
implements CanActivate { async canActivate(context: ExecutionContext) { const request = context.switchToHttp().getRequest(); const token = request?.headers?.authorization?.split(' ')[1]; if (!token) { throw new UnauthorizedException('Authorization header missing'); } let user = null; try { user = jwt.verify(token, process.env.ACCESS_TOKEN_JWT_SECRET); } catch (err) { console.log(err); } if (!user) { throw new UnauthorizedException('Invalid or expired token!'); } if (user.role !== 'buyer') { throw new UnauthorizedException('You are not a buyer!'); } return true; } }
BuyerGuard
results.py
#!/usr/bin/env python # coding=utf-8 import logging from typing import NamedTuple, List from dataclasses import dataclass from collections import OrderedDict as odict, defaultdict import numpy as np from ioos_qc.qartod import QartodFlags L = logging.getLogger(__name__) # noqa class CallResult(NamedTuple): package: str test: str function: callable results: np.ndarray def __repr__(self): return f'<CallResult package={self.package} test={self.test}>' class ContextResult(NamedTuple): stream_id: str results: List[CallResult] subset_indexes: np.ndarray data: np.ndarray = None tinp: np.ndarray = None zinp: np.ndarray = None lat: np.ndarray = None lon: np.ndarray = None def __repr__(self): return f'<ContextResult stream_id={self.stream_id}>' @dataclass class CollectedResult: stream_id: str package: str test: str function: callable results: np.ma.core.MaskedArray = None data: np.ndarray = None tinp: np.ndarray = None zinp: np.ndarray = None lat: np.ndarray = None lon: np.ndarray = None def __repr__(self): return f'<CollectedResult stream_id={self.stream_id} package={self.package} test={self.test}>' def function_name(self) -> str: return self.function.__name__ @property def hash_key(self) -> str: return f'{self.stream_id}:{self.package}.{self.test}' def
(results, how='list'): if how in ['list', list]: return collect_results_list(results) elif how in ['dict', dict]: return collect_results_dict(results) def collect_results_list(results): """ Turns a list of ContextResult objects into an iterator of CollectedResult objects by combining the subset_index information in each ContextResult together into a single array of results. """ collected = odict() # ContextResults for r in results: cr = None # Shortcut for CallResult objects when someone uses QcConfig.run() directly # and doesn't go through a Stream object if isinstance(r, CallResult): cr = CollectedResult( stream_id=None, package=r.package, test=r.test, function=r.function, results=r.results, ) collected[cr.hash_key] = cr continue # CallResults for tr in r.results: cr = CollectedResult( stream_id=r.stream_id, package=tr.package, test=tr.test, function=tr.function ) if cr.hash_key not in collected: # Set the initial values cr.results = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=tr.results.dtype) cr.data = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.data.dtype) cr.tinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.tinp.dtype) cr.zinp = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.zinp.dtype) cr.lat = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lat.dtype) cr.lon = np.ma.masked_all(shape=r.subset_indexes.shape, dtype=r.lon.dtype) collected[cr.hash_key] = cr collected[cr.hash_key].results[r.subset_indexes] = tr.results if cr is not None: if r.subset_indexes.all(): collected[cr.hash_key].data = r.data collected[cr.hash_key].tinp = r.tinp collected[cr.hash_key].zinp = r.zinp collected[cr.hash_key].lat = r.lat collected[cr.hash_key].lon = r.lon else: collected[cr.hash_key].data[r.subset_indexes] = r.data collected[cr.hash_key].tinp[r.subset_indexes] = r.tinp collected[cr.hash_key].zinp[r.subset_indexes] = r.zinp collected[cr.hash_key].lat[r.subset_indexes] = r.lat collected[cr.hash_key].lon[r.subset_indexes] = r.lon return list(collected.values()) def collect_results_dict(results): """ Turns a list of ContextResult objects into a dictionary of test results by combining the subset_index information in each ContextResult together into a single array of results. This is mostly here for historical purposes. Users should migrate to using the Result objects directly. """ # Magic for nested key generation # https://stackoverflow.com/a/27809959 collected = defaultdict(lambda: defaultdict(odict)) # ContextResults for r in results: # Shortcut for CallResult objects when someone uses QcConfig.run() directly # and doesn't go through a Stream object if isinstance(r, CallResult): collected[r.package][r.test] = r.results continue flag_arr = np.ma.empty_like(r.subset_indexes, dtype='uint8') flag_arr.fill(QartodFlags.UNKNOWN) # iterate over the CallResults for tr in r.results: testpackage = tr.package testname = tr.test testresults = tr.results if testname not in collected[r.stream_id][testpackage]: collected[r.stream_id][testpackage][testname] = np.copy(flag_arr) collected[r.stream_id][testpackage][testname][r.subset_indexes] = testresults return collected
collect_results
timeseries.py
#!/usr/bin/env python # encoding: utf-8 # # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ''' Timeseries related classes and functions ''' from __future__ import (print_function, division, absolute_import, unicode_literals) from swat.cas.table import CASTable from .utils import random_name, get_cas_host_type, char_to_double, int_to_double from dlpy.utils import DLPyError from swat.cas import datamsghandlers import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings import datetime import numbers import re import swat def plot_timeseries(tbl, timeid, timeseries, figure=None, groupid=None, start_time=None, end_time=None, xlim=None, ylim=None, xlabel=None, ylabel=None, xdate_format=None, title=None, figsize=None, fontsize_spec=None, **kwargs): ''' Create an timeseries line plot from a CASTable or pandas DataFrame Parameters ---------- tbl : :class:`CASTable` or :class:`pandas.DataFrame` or :class:`pandas.Series` The input table for the plot. If it is CASTable, it will be fetched to the client. If it is pandas.Series, the index name will become timeid, the series name will become timeseries. timeid : str The name of the timeid variable. It will be the value to be used in the x-axis. timeseries : str The name of the column contains the timeseries value. It will be the value to be used in the y-axis. figure : two-element-tuple, optional The tuple must be in the form (:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes.Axes`). These are the figure and axes that the user wants to plot on. It can be used to plot new timeseries plot on pre-existing figures. Default: None groupid : dict, optional It is in the format {column1 : value1, column2 : value2, ...}. It is used to plot subset of the data where column1 = value1 and column2 = value2, etc. Default: None, which means do not subset the data. start_time : :class:`datetime.datetime` or :class:`datetime.date`, optional The start time of the plotted timeseries. Default: None, which means the plot starts at the beginning of the timeseries. end_time : :class:`datetime.datetime` or :class:`datetime.date`, optional The end time of the plotted timeseries. Default: None, which means the plot ends at the end of the timeseries. xlim : tuple, optional Set the data limits for the x-axis. Default: None ylim : tuple, optional Set the data limits for the y-axis. Default: None xlabel : string, optional Set the label for the x-axis. ylabel : string, optional Set the label for the y-axis. xdate_format : string, optional If the x-axis represents date or datetime, this is the date or datetime format string. (e.g. '%Y-%m-%d' is the format of 2000-03-10, refer to documentation for :meth:`datetime.datetime.strftime`) Default: None title : string, optional Set the title of the figure. Default: None figsize : tuple, optional The size of the figure. Default: None fontsize_spec : dict, optional It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick', 'legend' and 'title'. (e.g. {'xlabel':14, 'ylabel':14}). If None, and figure is specified, then it will take from provided figure object. Otherwise, it will take the default fontsize, which are {'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'legend':14, 'title':20} Default: None `**kwargs` : keyword arguments, optional Options to pass to matplotlib plotting method. Returns ------- (:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes.Axes`) ''' default_fontsize_spec = {'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'legend':14, 'title':20} if figure is None: fig, ax = plt.subplots(1, 1, figsize=figsize) if fontsize_spec is not None: default_fontsize_spec.update(fontsize_spec) fontsize_spec = default_fontsize_spec else: fig, ax = figure if fontsize_spec is None: fontsize_spec = {} if 'legend' not in fontsize_spec.keys(): fontsize_spec['legend'] = default_fontsize_spec['legend'] if isinstance(tbl, CASTable): if groupid is None: tbl = tbl.to_frame() else: where_clause_list = [] for gid in groupid.keys(): where_clause_list.append(gid + '=' + str(groupid[gid])) where_clause = ' and '.join(where_clause_list) tbl = tbl.query(where_clause) tbl = tbl.to_frame() else: if isinstance(tbl, pd.Series): timeseries = tbl.name tbl = tbl.reset_index() timeid = [colname for colname in tbl.columns if colname != timeseries][0] if groupid is not None: for gid in groupid.keys(): tbl = tbl.loc[tbl[gid]==groupid[gid]] if not (np.issubdtype(tbl[timeid].dtype, np.integer) or np.issubdtype(tbl[timeid].dtype, np.floating)): tbl[timeid] = pd.to_datetime(tbl[timeid]) fig.autofmt_xdate() if xdate_format is not None: import matplotlib.dates as mdates xfmt = mdates.DateFormatter(xdate_format) ax.xaxis.set_major_formatter(xfmt) if start_time is not None: if isinstance(start_time, datetime.date): start_time = pd.Timestamp(start_time) tbl = tbl.loc[tbl[timeid]>=start_time] if end_time is not None: if isinstance(start_time, datetime.date): end_time = pd.Timestamp(end_time) tbl = tbl.loc[tbl[timeid]<=end_time] tbl = tbl.sort_values(timeid) ax.plot(tbl[timeid], tbl[timeseries], **kwargs) if xlabel is not None: if 'xlabel' in fontsize_spec.keys(): ax.set_xlabel(xlabel, fontsize=fontsize_spec['xlabel']) else: ax.set_xlabel(xlabel) elif figure is not None: if 'xlabel' in fontsize_spec.keys(): ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize_spec['xlabel']) else: ax.set_xlabel(timeid, fontsize=fontsize_spec['xlabel']) if ylabel is not None: if 'ylabel' in fontsize_spec.keys(): ax.set_ylabel(ylabel, fontsize=fontsize_spec['ylabel']) else: ax.set_ylabel(ylabel) elif figure is not None: if 'ylabel' in fontsize_spec.keys(): ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize_spec['ylabel']) else: ax.set_ylabel(timeseries, fontsize=fontsize_spec['ylabel']) if xlim is not None: ax.set_xlim(xlim) if ylim is not None: ax.set_ylim(ylim) if title is not None: if 'title' in fontsize_spec.keys(): ax.set_title(title, fontsize=fontsize_spec['title']) else: ax.set_title(title) elif figure is not None: if 'title' in fontsize_spec.keys(): ax.set_title(ax.get_title(), fontsize=fontsize_spec['title']) ax.legend(loc='best', bbox_to_anchor=(1, 1), prop={'size': fontsize_spec['legend']}) if 'xtick' in fontsize_spec.keys(): ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize_spec['xtick']) else: ax.get_xaxis().set_tick_params(direction='out') if 'ytick' in fontsize_spec.keys(): ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize_spec['ytick']) else: ax.get_yaxis().set_tick_params(direction='out') return (fig, ax) class TimeseriesTable(CASTable): ''' Table for preprocessing timeseries It creates an instance of :class:`TimeseriesTable` by loading from files on the server side, or files on the client side, or in memory :class:`CASTable`, :class:`pandas.DataFrame` or :class:`pandas.Series. It then performs inplace timeseries formatting, timeseries accumulation, timeseries subsequence generation, and timeseries partitioning to prepare the timeseries into a format that can be followed by subsequent deep learning models. Parameters ---------- name : string, optional Name of the CAS table timeid : string, optional Specifies the column name for the timeid. Default: None groupby_var : string or list-of-strings, optional The groupby variables. Default: None. sequence_opt : dict, optional Dictionary with keys: 'input_length', 'target_length' and 'token_size'. It will be created by the prepare_subsequences method. Default: None inputs_target : dict, optional Dictionary with keys: 'inputs', 'target'. It will be created by the prepare_subsequences method. Default: None Returns ------- :class:`TimeseriesTable` ''' running_caslib = None def __init__(self, name, timeid=None, groupby_var=None, sequence_opt=None, inputs_target=None, **table_params): CASTable.__init__(self, name, **table_params) self.timeid = timeid self.groupby_var = groupby_var self.sequence_opt = sequence_opt self.inputs_target = inputs_target @classmethod def from_table(cls, tbl, columns=None, casout=None): ''' Create an TimeseriesTable from a CASTable Parameters ---------- tbl : :class:`CASTable` The CASTable object to use as the source. columns : list-of-strings, optional Columns to keep when loading the data. None means it will include all the columns from the source. Empty list means include no column, which will generate empty data. Default: None casout : dict or :class:`CASTable`, optional if it is dict, it specifies the output CASTable parameters. if it is CASTable, it is the CASTable that will be overwritten. None means a new CASTable with random name will be generated. Default: None Returns ------- :class:`TimeseriesTable` ''' input_tbl_params = tbl.to_outtable_params() input_tbl_name = input_tbl_params['name'] conn = tbl.get_connection() if casout is None: casout_params = {} elif isinstance(casout, CASTable): casout_params = casout.to_outtable_params() elif isinstance(casout, dict): casout_params = casout if 'name' not in casout_params: casout_params['name'] = random_name('Timeseries', 6) output_tbl_name = casout_params['name'] if columns is None: keep_col_sascode = ''' data {0}; set {1}; run; '''.format(output_tbl_name, input_tbl_name) conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode) else: if not isinstance(columns, list): columns = [columns] keepcol = ' '.join(columns) keep_col_sascode = ''' data {0}; set {1}; keep {2}; run; '''.format(output_tbl_name, input_tbl_name, keepcol) conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode) out = cls(**casout_params) out.set_connection(conn) return out @classmethod def from_pandas(cls, conn, pandas_df, casout=None): ''' Create an TimeseriesTable from a pandas DataFrame or Series Parameters ---------- conn : CAS The CAS connection object pandas_df : :class:`pandas.DataFrame` or :class:`pandas.Series` The pandas dataframe or series to use as the source. casout : dict or :class:`CASTable`, optional if it is dict, it specifies the output CASTable parameters. if it is CASTable, it is the CASTable that will be overwritten. None means a new CASTable with random name will be generated. Default: None Returns ------- :class:`TimeseriesTable` ''' if isinstance(pandas_df, pd.Series): pandas_df = pandas_df.reset_index() if casout is None: casout_params = {} elif isinstance(casout, CASTable): casout_params = casout.to_outtable_params() elif isinstance(casout, dict): casout_params = casout if 'name' not in casout_params: casout_params['name'] = random_name('Timeseries', 6) output_tbl_name = casout_params['name'] handler = datamsghandlers.PandasDataFrame(pandas_df) conn.addtable(table=output_tbl_name, replace=True, **handler.args.addtable) tbl = conn.CASTable(name=output_tbl_name) return cls.from_table(tbl, columns=None, casout=casout_params) @classmethod def from_localfile(cls, conn, path, columns=None, importoptions=None, casout=None): ''' Create an TimeseriesTable from a file on the client side. Parameters ---------- conn : CAS The CAS connection object path : string The full path to the local file that will be uploaded to the server. columns : list-of-strings, optional Columns to keep when loading the data. None means it will include all the columns from the source. Empty list means to include no column, which will generate empty data. Default: None importoptions : dict, optional Options to import data and upload to the server, such as filetype, delimiter, etc. None means use the default 'auto' method in the importoptions from CAS.upload. Default: None casout : dict or :class:`CASTable`, optional If it is dict, it specifies the output CASTable parameters. If it is CASTable, it is the CASTable that will be overwritten. None means a new CASTable with random name will be generated. Default: None Returns ------- :class:`TimeseriesTable` ''' if casout is None: casout_params = {} elif isinstance(casout, CASTable): casout_params = casout.to_outtable_params() elif isinstance(casout, dict): casout_params = casout if 'name' not in casout_params: casout_params['name'] = random_name('Timeseries', 6) if importoptions is None: importoptions = {} upload_result = conn.upload(path, importoptions=importoptions, casout=casout_params) tbl = conn.CASTable(**casout_params) return cls.from_table(tbl, columns=columns, casout=casout_params) @classmethod def from_serverfile(cls, conn, path, columns=None, caslib=None, importoptions=None, casout=None): ''' Create an TimeseriesTable from a file on the server side Parameters ---------- conn : CAS The CAS connection object path : string The path that the server can access. If the caslib is specified, it is relative path to the file with respect to the caslib. otherwise, it is the full path to the file. columns : list-of-strings, optional columns to keep when loading the data. None means it will include all the columns from the source. Empty list means include no column, which will generate empty data. Default: None caslib : string, optional The name of the caslib which contains the file to be uploaded. Default: None importoptions : dict, optional Options to import data and upload to the server, such as filetype, delimiter, etc. None means use the default 'auto' method in the importoptions from CAS.upload. Default: None casout : dict or :class:`CASTable`, optional If it is dict, it specifies the output CASTable parameters. If it is CASTable, it is the CASTable that will be overwritten. None means a new CASTable with random name will be generated. Default: None Returns ------- :class:`TimeseriesTable` ''' if casout is None: casout_params = {} elif isinstance(casout, CASTable): casout_params = casout.to_outtable_params() elif isinstance(casout, dict): casout_params = casout if 'name' not in casout_params: casout_params['name'] = random_name('Timeseries', 6) if importoptions is None: importoptions = {} if caslib is None: caslib, rest_path = cls.find_file_caslib(conn, path) if caslib is None: server_type = get_cas_host_type(conn).lower() if server_type.startswith("lin") or server_type.startswith("osx"): path_split = path.rsplit("/", 1) else: path_split = path.rsplit("\\", 1) caslib = random_name('Caslib', 6) rt1 = conn.retrieve('addcaslib', _messagelevel='error', name=caslib, path=path_split[0], activeonadd=False, subdirectories=False, datasource={'srctype':'path'}) if rt1.severity < 2: rt2 = conn.retrieve('table.loadTable', _messagelevel='error', casout=casout_params, caslib=caslib, importoptions=importoptions, path=path_split[1]) if rt2.severity > 1: for msg in rt2.messages: print(msg) raise DLPyError('cannot load files, something is wrong!') else: for msg in rt1.messages: print(msg) raise DLPyError('''cannot create caslib with path:{}, something is wrong!'''.format(path_split[0])) else: rt3 = conn.retrieve('table.loadTable', _messagelevel='error', casout=casout_params, caslib=caslib, importoptions=importoptions, path=rest_path) if rt3.severity > 1: for msg in rt3.messages: print(msg) raise DLPyError('cannot load files, something is wrong!') else: rt4 = conn.retrieve('table.loadTable', _messagelevel='error', casout=casout_params, caslib=caslib, importoptions=importoptions, path=path) if rt4.severity > 1: for msg in rt4.messages: print(msg) raise DLPyError('cannot load files, something is wrong!') tbl = conn.CASTable(**casout_params) return cls.from_table(tbl, columns=columns, casout=casout_params) def timeseries_formatting(self, timeid, timeseries, timeid_informat=None, timeid_format=None, extra_columns=None): ''' Format the TimeseriesTable Format timeid into appropriate format and check and format timeseries columns into numeric columns. Parameters ---------- timeid : string Specifies the column name for the timeid. timeseries : string or list-of-strings Specifies the column name for the timeseries, that will be part of the input or output of the RNN. If str, then it is univariate time series. If list of strings, then it is multivariate timeseries. timeid_informat : string, optional if timeid is in the string format, this is required to parse the timeid column. Default: None timeid_format : string, optional Specifies the SAS format that the timeid column will be stored in after parsing. None means it will be stored in numeric form, not a specific date or datetime format. Default: None extra_columns : string or list-of-strings, optional Specifies the addtional columns to be included. Empty list means to include no extra columns other than timeid and timeseries. if None, all columns are included. Default: None ''' self.timeid = timeid self.timeseries = timeseries self.timeid_format = timeid_format self.timeid_informat = timeid_informat self.extra_columns = extra_columns input_tbl_params = self.to_outtable_params() input_tbl_name = input_tbl_params['name'] conn = self.get_connection() tbl_colinfo = self.columninfo().ColumnInfo if self.timeid_format is None: if self.timeid_informat is None: self.timeid_format = self.timeid_informat elif self.timeid_informat.lower().startswith('anydtdtm'): self.timeid_format = 'DATETIME19.' else: self.timeid_format = self.timeid_informat if (((self.timeid_type not in ['double', 'date', 'datetime']) and (not self.timeid_type.startswith('int'))) and (self.timeid_informat is not None)): fmt_code = ''' data {0}; set {0}(rename=({1}=c_{1})); {1} = input(c_{1},{2}); drop c_{1}; format {1} {3}; run; '''.format(input_tbl_name, self.timeid, self.timeid_informat, self.timeid_format) conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code) elif (((self.timeid_type not in ['double', 'date', 'datetime']) and (not self.timeid_type.startswith('int'))) and (self.timeid_informat is None)): raise ValueError('''timeid variable is not in the numeric format, so timeid_informat is required for parsing the timeid variable. ''') elif (self.timeid_format is not None): fmt_code = ''' data {0}; set {0}; format {1} {2}; run; '''.format(input_tbl_name, self.timeid, self.timeid_format) conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code) else: fmt_code = ''' data {0}; set {0}; run; '''.format(input_tbl_name) conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code) tbl_colinfo = self.columninfo().ColumnInfo if not isinstance(self.timeseries, list): self.timeseries = [self.timeseries] if set(self.timeseries).issubset(tbl_colinfo.Column): char_to_double(conn, tbl_colinfo, input_tbl_name, input_tbl_name, self.timeseries) else: raise ValueError('''One or more variables specified in 'timeseries' do not exist in the input table. ''') if self.extra_columns is not None: if not isinstance(self.extra_columns, list): self.extra_columns = [self.extra_columns] keepcol = [self.timeid] keepcol.extend(self.timeseries + self.extra_columns) keepcol = ' '.join(keepcol) keep_col_sascode = ''' data {0}; set {0}; keep {1}; run; '''.format(input_tbl_name, keepcol) conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode) print('NOTE: Timeseries formatting is completed.') def timeseries_accumlation(self, acc_interval='day',timeid=None, timeseries=None, groupby=None, extra_num_columns=None, default_ts_acc='sum', default_col_acc = 'avg', acc_method_byvar=None):
def prepare_subsequences(self, seq_len, target, predictor_timeseries=None, timeid=None, groupby=None, input_length_name='xlen', target_length_name='ylen', missing_handling='drop'): ''' Prepare the subsequences that will be pass into RNN Parameters ---------- seq_len : int subsequence length that will be passed onto RNN. target : string the target variable for RNN. Currenly only support univariate target, so only string is accepted here, not list of strings. predictor_timeseries : string or list-of-strings, optional Timeseries that will be used to predict target. They will be preprocessed into subsequences as well. If None, it will take the target timeseries as the predictor, which corresponds to auto-regressive models. Default: None timeid : string, optional Specifies the column name for the timeid. If None, it will take the timeid specified in timeseries_accumlation. Default: None groupby : string or list-of-strings, optional The groupby variables. if None, it will take the groupby specified in timeseries_accumlation. Default: None input_length_name : string, optional The column name in the CASTable specifying input sequence length. Default: xlen target_length_name : string, optional The column name in the CASTable specifying target sequence length. currently target length only support length 1 for numeric sequence. Default: ylen missing_handling : string, optional How to handle missing value in the subsequences. default: drop ''' tbl_colinfo = self.columninfo().ColumnInfo input_tbl_params = self.to_outtable_params() input_tbl_name = input_tbl_params['name'] conn = self.get_connection() if timeid is not None: self.timeid = timeid elif self.timeid is None: raise ValueError('''timeid is not specified''') if self.timeid not in tbl_colinfo.Column.values: raise ValueError('''timeid does not exist in the input table''') if groupby is not None: self.groupby_var = groupby if self.groupby_var is None: self.groupby_var = [] elif not isinstance(self.groupby_var, list): self.groupby_var = [self.groupby_var] if set(self.groupby_var).issubset(tbl_colinfo.Column): int_to_double(conn, tbl_colinfo, input_tbl_name, input_tbl_name, self.groupby_var) else: raise ValueError('''One or more variables specified in 'groupby' do not exist in the input table. ''') if isinstance(target, list): if len(target) > 1: raise DLPyError('''currently only support univariate target''') else: target = [target] if predictor_timeseries is None: predictor_timeseries = target elif not isinstance(predictor_timeseries, list): predictor_timeseries = [predictor_timeseries] if set(target).issubset(predictor_timeseries): independent_pred = [var for var in predictor_timeseries if var not in target] self.auto_regressive = True else: independent_pred = predictor_timeseries self.auto_regressive = False if not set(target).issubset(tbl_colinfo.Column): raise ValueError('''invalid target variable''') if len(independent_pred) > 0: if not set(independent_pred).issubset(tbl_colinfo.Column): raise ValueError('''columns in predictor_timeseries are absent from the accumulated timeseriest table.''') if self.timeseries is None: warnings.warn('''timeseries has not been formatted by timeseries_formatting, consider reload the data and use timeseries_formatting to format the data, unless the data has already been pre-formatted.''') else: if not set(target).issubset(self.timeseries): warnings.warn('''target is not in pre-formatted timeseries, consider reload the data and use timeseries_formatting to format the data, unless the data has already been pre-formatted.''') if len(independent_pred) > 0: if not set(independent_pred).issubset(self.timeseries): warnings.warn(''' some of predictor_timeseries are not in pre-accumulated timeseries,\n consider reload the data and use timeseries_accumulation to accumulate the data,\n unless the data has already been pre-formatted. ''') self.target = target[0] self.independent_pred = independent_pred self.seq_len = seq_len if self.seq_len < 1: raise ValueError('''RNN sequence length at least need to be 1''') sasCode = 'data {0}; set {0}; by {1} {2};'.format( input_tbl_name, ' '.join(self.groupby_var), self.timeid) if self.seq_len > 1: for var in self.independent_pred: sasCode += self.create_lags(var, self.seq_len - 1, self.groupby_var) if self.auto_regressive: sasCode += self.create_lags(self.target, self.seq_len, self.groupby_var) sasCode += '{0} = {1};'.format(input_length_name, self.seq_len) sasCode += '{} = 1;'.format(target_length_name) # Currently only support one timestep numeric output. if missing_handling == 'drop': sasCode += 'if not cmiss(of _all_) then output {};'.format(input_tbl_name) sasCode += 'run;' if len(self.groupby_var) == 0: conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode, single='Yes') else: conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode) self.input_vars = [] for i in range(self.seq_len): if self.auto_regressive: self.input_vars.append('{0}_lag{1}'.format(self.target, i+1)) for var in self.independent_pred: if i == 0: self.input_vars.append(var) else: self.input_vars.append('{0}_lag{1}'.format(var, i)) self.input_vars.reverse() self.tokensize = len(predictor_timeseries) self.sequence_opt = dict(input_length=input_length_name, target_length=target_length_name, token_size=self.tokensize) self.inputs_target = dict(inputs=self.input_vars, target=self.target) print('NOTE: timeseries subsequences are prepared with subsequence length = {}'.format(seq_len)) @property def timeid_type(self): tbl_colinfo = self.columninfo().ColumnInfo timeid_type = self.identify_coltype(self.timeid, tbl_colinfo) return timeid_type @staticmethod def identify_coltype(col, tbl_colinfo): if col not in tbl_colinfo.Column.values: raise ValueError('''variable {} does not exist in input table. '''.format(col)) if 'Format' in tbl_colinfo.columns: cas_timeid_fmt = tbl_colinfo.Format[tbl_colinfo.Column == col].values[0] else: cas_timeid_fmt = None col_type = tbl_colinfo.Type[tbl_colinfo.Column == col].values[0] if cas_timeid_fmt: for pattern in swat.options.cas.dataset.date_formats: if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt): col_type = 'date' break for pattern in swat.options.cas.dataset.datetime_formats: if re.match(r'{}\Z'.format(pattern), cas_timeid_fmt): if col_type == 'date': raise DLPyError('''{} format in CASTable is ambiguous, and can match both sas date and sas datetime format'''.format(col)) else: col_type = 'datetime' break return col_type def timeseries_partition(self, training_start=None, validation_start=None, testing_start=None, end_time=None, partition_var_name='split_id', traintbl_suffix='train', validtbl_suffix='valid', testtbl_suffix='test'): ''' Split the dataset into training, validation and testing set Parameters ---------- training_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional The training set starting time stamp. if None, the training set start at the earliest observation record in the table. Default: None validation_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional The validation set starting time stamp. The training set ends right before it. If None, there is no validation set, and the training set ends right before the start of testing set. Default: None testing_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional The testing set starting time stamp. The validation set (or training set if validation set is not specified) ends right before it. If None, there is no testing set, and the validation set (or training set if validation set is not set) ends at the end_time. Default: None end_time : float or :class:`datetime.datetime` or :class:`datetime.date`, optional The end time for the table. partition_var_name : string, optional The name of the indicator column that indicates training, testing and validation. Default: 'split_id'. traintbl_suffix : string, optional The suffix name of the CASTable for the training set. Default: 'train' validtbl_suffix : string, optional The suffix name of the CASTable for the validation set. Default: 'valid' testtbl_suffix : string, optional The suffix name of the CASTable for the testing set. Default: 'test' Returns ------- ( training TimeseriesTable, validation TimeseriesTable, testing TimeseriesTable ) ''' self.partition_var_name = partition_var_name conn = self.get_connection() training_start = self.convert_to_sas_time_format(training_start, self.timeid_type) validation_start = self.convert_to_sas_time_format(validation_start, self.timeid_type) testing_start = self.convert_to_sas_time_format(testing_start, self.timeid_type) end_time = self.convert_to_sas_time_format(end_time, self.timeid_type) if testing_start is None: testing_start = end_time test_statement = ';' else: test_statement = self.generate_splitting_code( self.timeid, testing_start, end_time, True, self.partition_var_name, 'test') if validation_start is None: validation_start = testing_start valid_statement = ';' else: if testing_start == end_time: valid_statement = self.generate_splitting_code( self.timeid, validation_start, testing_start, True, self.partition_var_name, 'valid') else: valid_statement = self.generate_splitting_code( self.timeid, validation_start, testing_start, False, self.partition_var_name, 'valid') if validation_start == end_time: train_statement = self.generate_splitting_code( self.timeid, training_start, validation_start, True, self.partition_var_name, 'train') else: train_statement = self.generate_splitting_code( self.timeid, training_start, validation_start, False, self.partition_var_name, 'train') input_tbl_params = self.to_outtable_params() input_tbl_name = input_tbl_params['name'] traintbl_name = '_'.join([input_tbl_name, traintbl_suffix]) validtbl_name = '_'.join([input_tbl_name, validtbl_suffix]) testtbl_name = '_'.join([input_tbl_name, testtbl_suffix]) splitting_code = ''' data {4} {5} {6}; set {0}; {1} {2} {3} if {7} = 'train' then output {4}; if {7} = 'valid' then output {5}; if {7} = 'test' then output {6}; run; '''.format(input_tbl_name, train_statement, valid_statement, test_statement, traintbl_name, validtbl_name, testtbl_name, self.partition_var_name) conn.retrieve('dataStep.runCode', _messagelevel='error', code=splitting_code) train_out = dict(name=traintbl_name, timeid=self.timeid, groupby_var=self.groupby_var, sequence_opt=self.sequence_opt, inputs_target=self.inputs_target) valid_out = dict(name=validtbl_name, timeid=self.timeid, groupby_var=self.groupby_var, sequence_opt=self.sequence_opt, inputs_target=self.inputs_target) test_out = dict(name=testtbl_name, timeid=self.timeid, groupby_var=self.groupby_var, sequence_opt=self.sequence_opt, inputs_target=self.inputs_target) train_out_tbl = TimeseriesTable(**train_out) train_out_tbl.set_connection(conn) valid_out_tbl = TimeseriesTable(**valid_out) valid_out_tbl.set_connection(conn) test_out_tbl = TimeseriesTable(**test_out) test_out_tbl.set_connection(conn) print('NOTE: Training set has {} observations'.format(train_out_tbl.shape[0])) print('NOTE: Validation set has {} observations'.format(valid_out_tbl.shape[0])) print('NOTE: Testing set has {} observations'.format(test_out_tbl.shape[0])) return train_out_tbl, valid_out_tbl, test_out_tbl @staticmethod def generate_splitting_code(timeid, start, end, right_inclusive, partition_var_name, partition_val): if (start is None) and (end is not None): if right_inclusive: statement = '''if {0} <= {1} then {2} = '{3}';'''.format( timeid, end, partition_var_name, partition_val) else: statement = '''if {0} < {1} then {2} = '{3}';'''.format( timeid, end, partition_var_name, partition_val) elif (start is not None) and (end is None): statement = '''if {0} >= {1} then {2} = '{3}';'''.format( timeid, start, partition_var_name, partition_val) elif (start is not None) and (end is not None): if right_inclusive: statement = '''if {0} >= {1} and {0} <= {2} then {3} = '{4}';'''.format( timeid, start, end, partition_var_name, partition_val) else: statement = '''if {0} >= {1} and {0} < {2} then {3} = '{4}';'''.format( timeid, start, end, partition_var_name, partition_val) else: statement = '''{0} = '{1}';'''.format(partition_var_name, partition_val) return statement @staticmethod def convert_to_sas_time_format(python_time, sas_format_type): if sas_format_type == 'date': if isinstance(python_time, datetime.date): sas_time_str = 'mdy({0},{1},{2})'.format(python_time.month, python_time.day, python_time.year) return sas_time_str elif python_time is None: return None else: raise ValueError('''The timeid type is date format, so the input python time variable should be date or datetime format''') elif sas_format_type == 'datetime': if isinstance(python_time, datetime.datetime): sas_time_str = 'dhms(mdy({0},{1},{2}), {3}, {4}, {5})'.format( python_time.month, python_time.day, python_time.year, python_time.hour, python_time.minute, python_time.second) return sas_time_str elif isinstance(python_time, datetime.date): sas_time_str = 'dhms(mdy({0},{1},{2}), 0, 0, 0)'.format( python_time.month, python_time.day, python_time.year) return sas_time_str elif python_time is None: return None else: raise ValueError('''The timeid type is datetime format, so the input python time variable should be date or datetime format''') elif sas_format_type == 'double': if isinstance(python_time, numbers.Real): return python_time elif python_time is None: return None else: raise ValueError('''The timeid type is double, so the input python time variable should be int or float''') else: raise DLPyError('''timeid format in CASTable is wrong, consider reload the table and formatting it with timeseries_formatting''') @staticmethod def create_lags(varname, nlags, byvar): if not isinstance(byvar, list): byvar = [byvar] byvar_strlist = ['first.{}'.format(var) for var in byvar] sasCode = '' for i in range(nlags): if i == 0: sasCode += '{0}_lag{1} = lag({0});'.format(varname, i+1) else: sasCode += '{0}_lag{1} = lag({0}_lag{2});'.format(varname, i+1, i) if len(byvar) > 0: sasCode += 'if ' + ' or '.join(byvar_strlist) sasCode += ' then {0}_lag{1} = .;'.format(varname, i+1) return sasCode @staticmethod def find_file_caslib(conn, path): ''' Check whether the specified path is in the caslibs of the current session Parameters ---------- conn : CAS Specifies the CAS connection object path : string Specifies the name of the path. Returns ------- ( flag, caslib_name ) flag specifies if path exist in session. caslib_name specifies the name of the caslib that contains the path. ''' paths = conn.caslibinfo().CASLibInfo.Path.tolist() caslibs = conn.caslibinfo().CASLibInfo.Name.tolist() subdirs = conn.caslibinfo().CASLibInfo.Subdirs.tolist() server_type = get_cas_host_type(conn).lower() if server_type.startswith("lin") or server_type.startswith("osx"): sep = '/' else: sep = '\\' for i, directory in enumerate(paths): if path.startswith(directory) and (subdirs[i]==1): rest_path = path[len(directory):] caslibname = caslibs[i] return (caslibname, rest_path) elif path.startswith(directory) and (subdirs[i]==0): rest_path = path[len(directory):] if sep in rest_path: continue else: caslibname = caslibs[i] return (caslibname, rest_path) return (None, None)
''' Accumulate the TimeseriesTable into regular consecutive intervals Parameters ---------- acc_interval : string, optional The accumulation interval, such as 'year', 'qtr', 'month', 'week', 'day', 'hour', 'minute', 'second'. timeid : string, optional Specifies the column name for the timeid. If None, it will take the timeid specified in timeseries_formatting. Default: None timeseries : string or list-of-strings, optional Specifies the column name for the timeseries, that will be part of the input or output of the RNN. If str, then it is univariate time series. If list of strings, then it is multivariate timeseries. If None, it will take the timeseries specified in timeseries_formatting. Default: None groupby : string or list-of-strings, optional The groupby variables. Default: None extra_num_columns : string or list-of-strings, optional Specifies the addtional numeric columns to be included for accumulation. These columns can include static feature, and might be accumulated differently than the timeseries that will be used in RNN. if None, it means no additional numeric columns will be accumulated for later processing and modeling. Default: None default_ts_acc : string, optional Default accumulation method for timeseries. Default: sum default_col_acc : string, optional Default accumulation method for additional numeric columns Default: avg acc_method_byvar : dict, optional It specifies specific accumulation method for individual columns, if the method is different from the default. It has following structure: {'column1 name': 'accumulation method1', 'column2 name': 'accumulation method2', ...} Default: None ''' if (timeid is None) and (self.timeid is None): raise DLPyError('''timeid is not specified, consider specifying and formatting it with timeseries_formatting''') elif (timeid is not None) and (timeid != self.timeid): warnings.warn('''timeid has not been formatted by timeseries_formatting, consider reload the data and use timeseries_formatting to format the data, unless the data has already been pre-formatted.''') self.timeid = timeid if timeseries is None: if ((hasattr(self, 'timeseries') and self.timeseries is None) or (not hasattr(self, 'timeseries'))): raise DLPyError('''timeseries is not specified, consider specifying and formatting it with timeseries_formatting''') else: if not isinstance(timeseries, list): timeseries = [timeseries] if ((hasattr(self, 'timeseries') and (self.timeseries is None)) or (not hasattr(self, 'timeseries'))): warnings.warn('''timeseries has not been formatted by timeseries_formatting, consider reload the data and use timeseries_formatting to format the data, unless the data has already been pre-formatted.''') elif not set(timeseries).issubset(self.timeseries): warnings.warn('''timeseries contains variable(s) that has not been formatted by timeseries_formatting, consider reload the data and use timeseries_formatting to format the data, unless the data has already been pre-formatted.''') self.timeseries = timeseries self.groupby_var = groupby self.extra_num_columns = extra_num_columns input_tbl_params = self.to_outtable_params() input_tbl_name = input_tbl_params['name'] conn = self.get_connection() conn.loadactionset('timeData') tbl_colinfo = self.columninfo().ColumnInfo if self.groupby_var is None: self.groupby_var = [] elif not isinstance(self.groupby_var, list): self.groupby_var = [self.groupby_var] if set(self.groupby_var).issubset(tbl_colinfo.Column): int_to_double(conn, tbl_colinfo, input_tbl_name, input_tbl_name, self.groupby_var) else: raise ValueError('''One or more variables specified in 'groupby' do not exist in the input table. ''') tbl_colinfo = self.columninfo().ColumnInfo #Check timeid is in the input columns if self.timeid not in tbl_colinfo.Column.values: raise ValueError('''variable 'timeid' does not exist in input table. ''') #Check timeseries is in the input columns if not isinstance(self.timeseries, list): self.timeseries = [self.timeseries] if not set(self.timeseries).issubset(tbl_colinfo.Column): raise ValueError('''One or more variables specified in 'timeseries' do not exist in the input table. ''') #Check extra_num_columns is in the input columns if self.extra_num_columns is None: self.extra_num_columns = [] elif not isinstance(self.extra_num_columns, list): self.extra_num_columns = [self.extra_num_columns] if not set(self.extra_num_columns).issubset(tbl_colinfo.Column): raise ValueError('''One or more variables specified in 'extra_num_columns' do not exist in the input table. ''') if self.timeid_type == 'datetime': acc_interval = 'dt' + acc_interval elif ((self.timeid_type == 'date') and (acc_interval.lower() in ['hour', 'minute', 'second'])): raise ValueError('''the acc_interval has higher frequency than day, yet the timeid variable is in the date format. ''') if acc_method_byvar is None: acc_method_byvar = {} serieslist = [] for ts in self.timeseries: if ts in acc_method_byvar.keys(): method_dict = {'acc':acc_method_byvar[ts],'name':ts} serieslist.append(method_dict) else: method_dict = {'acc':default_ts_acc,'name':ts} serieslist.append(method_dict) for extra_col in self.extra_num_columns: if extra_col in self.timeseries: warnings.warn(''' columns in extra_num_columns are also found in timeseries, and will be ignored. ''') continue elif extra_col in acc_method_byvar.keys(): method_dict = {'acc':acc_method_byvar[extra_col],'name':extra_col} serieslist.append(method_dict) else: method_dict = {'acc':default_col_acc,'name':extra_col} serieslist.append(method_dict) acc_result = conn.retrieve('timedata.timeseries', _messagelevel='error', table={'groupby':self.groupby_var,'name': input_tbl_name}, series=serieslist, timeid=self.timeid, interval=acc_interval, trimid='BOTH', sumout=dict(name=input_tbl_name + '_summary', replace=True), casout=dict(name=input_tbl_name, replace=True)) if acc_interval.startswith('dt'): print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval[2:])) else: print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval))
kbdi_legacy.rs
use kbdi::*; use structopt::StructOpt; #[derive(StructOpt)] #[structopt( about = "Configure Windows registry values for keyboards", author = "Brendan Molloy <[email protected]>" )] enum Opt { #[structopt( name = "keyboard_install", about = "Installs a keyboard layout to the registry" )] KeyboardInstall { /// Language tag in BCP 47 format (eg: sma-Latn-NO) #[structopt(short, long)] tag: String, /// Layout name (eg: Skolt Sami (Norway)) #[structopt(short = "n", long)] layout: String, /// Product code GUID (eg: {42c3de12-28...}) #[structopt(short, long)] guid: String, /// Name of keyboard DLL (eg: kbdfoo01.dll) #[structopt(short, long)] dll: String, /// Native language name, if required (eg: Norsk) #[structopt(short, long)] lang: Option<String>, /// Enable keyboard immediately after installing #[structopt(short, long)] enable: bool, }, #[structopt( name = "keyboard_uninstall", about = "Uninstalls a keyboard layout from the registry" )] KeyboardUninstall { /// Product code GUID (eg: {42c3de12-28...}) guid: String, }, #[structopt(name = "keyboard_enable", about = "Enables a keyboard for a user")] KeyboardEnable { /// Language tag in BCP 47 format (eg: sma-Latn-NO) #[structopt(short, long)] tag: String, /// Product code GUID (eg: {42c3de12-28...}) #[structopt(short, long)] guid: String, }, #[structopt(name = "language_query", about = "Get data about language tag")] LanguageQuery { /// Language tag in BCP 47 format (eg: sma-Latn-NO) tag: String, }, #[structopt( name = "keyboard_list", about = "Lists all keyboards installed on the system" )] KeyboardList, #[structopt(about = "Remove empty languages and invalid keyboards")] Clean, } fn
() { let opt = Opt::from_args(); match opt { Opt::KeyboardInstall { tag, layout, guid, dll, lang, enable, } => { println!("Installing keyboard..."); match keyboard::install(&tag, &layout, &guid, &dll, lang.as_deref()) { Ok(_) => (), Err(err) => match err { keyboard::Error::AlreadyExists => { println!("Keyboard already installed."); } _ => panic!(err), }, } if enable { println!("Enabling keyboard..."); keyboard::enable(&tag, &guid).unwrap(); } } Opt::KeyboardUninstall { guid } => { keyboard::uninstall(&guid).unwrap(); } Opt::KeyboardEnable { tag, guid } => { keyboard::enable(&tag, &guid).unwrap(); } Opt::LanguageQuery { tag } => { println!("{}", query_language(&tag)); } Opt::KeyboardList => { for k in keyboard::installed().iter() { println!("{}", k); } } Opt::Clean => { clean().unwrap(); } } }
main
uva_701.py
import sys import math
pow(2, i) if __name__ == '__main__': main()
def main(): for i in range(100000):
eval.rs
//! Evaluation of a Nickel term. //! The implementation of the Nickel abstract machine which evaluates a term. Note that this //! machine is not currently formalized somewhere and is just a convenient name to designate the //! current implementation. //! //! # The Nickel Abstract Machine //! The abstract machine is a stack machine composed of the following elements: //! - The term being currently evaluated //! - The main stack, storing arguments, thunks and pending computations //! - A pair of [environments](type.Environment.html), mapping identifiers to [closures](type.Closure.html): //! * The global environment contains builtin functions accessible from anywhere, and alive //! during the whole evaluation //! * The local environment contains the variables in scope of the current term and is subject //! to garbage collection (currently reference counting based) //! - A [callstack](type.CallStack.html), mainly for error reporting purpose //! //! Depending on the shape of the current term, the following actions are preformed: //! //! ## Core calculus //! - **Var(id)**: the term bound to `id` in the environment is fetched, and an update thunk is //! pushed on the stack to indicate that once this term has been evaluated, the content of the //! variable must be updated //! - **App(func, arg)**: a closure containing the argument and the current environment is pushed //! on the stack, and the applied term `func` is evaluated //! - **Let(id, term, body)**: `term` is bound to `id` in the environment, and the machine proceeds with the evaluation of the body //! - **Fun(id, body)**: Try to pop an argument from the stack. If there is some, we bound it to //! `id` in the environment, and proceed with the body of the function. Otherwise, we are done: the //! end result is an unapplied function //! - **Thunk on stack**: If the evaluation of the current term is done, and there is one (or //! several) thunk on the stack, this means we have to perform an update. Consecutive thunks are //! popped from the stack and are updated to point to the current evaluated term. //! - **Import**: Import must have been resolved before the evaluation starts. An unresolved import //! causes an [`InternalError`](../error/enum.EvalError.html#variant.InternalError). A resolved //! import, identified by a `FileId`, is retrieved from the import resolver and evaluation proceeds. //! //! ## Contracts //! //! - **`Assume(type, label, term)`** (or `Promise(type, label, term)`): replace the current term //! with the contract corresponding to `types`, applied to label and term (`contract label term`). //! //! ## Operators //! //! Operators are strict by definition. To evaluate say `exp1 + exp2`, the following steps //! have to be performed: //! - `exp1` needs to be evaluated. The result must be saved somewhere, together with the resulting //! environment //! - `exp2`: same thing for `exp2` //! - Finally, the implementation of `+` can proceed with the computation //! //! We detail the case of binary operators, as the case of unary ones is similar and simpler. //! //! - **Op(op, first, second)**: push an `OpFirst` element on the stack, which saves the operator //! `op`, the second argument `second` and the current environment, and proceed with the evaluation //! of `first` //! - **OpFirst on stack**: if the evaluation of the current term is done and there is an `OpFirst` //! marker on the stack, then: //! 1. Extract the saved operator, the second argument and the environment `env2` from the marker //! 2. Push an `OpSecond` marker, saving the operator and the evaluated form of the first //! argument with its environment //! 3. Proceed with the evaluation of the second argument in environment `env2` //! - **OpSecond on stack**: once the second term is evaluated, we can get back the operator and //! the first term evaluated, and forward all both arguments evaluated and their respective //! environment to the specific implementation of the operator (located in //! [operation](../operation/index.html), or in [merge](../merge/index.html) for `merge`). //! //! ## Enriched values //! //! The evaluation of enriched values is controlled by the parameter `enriched_strict`. If it is //! set to true (which is usually the case), the machine tries to extract a simple value from it: //! - **Contract**: raise an error. This usually means that an access to a field was attempted, //! and that this field had a contract to satisfy, but it was never defined. //! - **Default(value)**: an access to a field which has a default value. Proceed with the //! evaluation of this value //! - **ContractDefault(type, label, value)**: same as above, but the field also has an attached //! contract. Proceed with the evaluation of `Assume(type, label, value)` to ensure that the //! default value satisfies this contract. //! //! If `enriched_strict` is set to false, as it is when evaluating `merge`, the machine does not //! evaluate enriched values further, and consider the term evaluated. //! //! # Garbage collection //! //! Currently the machine relies on Rust's reference counting to manage memory. Precisely, the //! environment stores `Rc<RefCell<Closure>>` objects, which are reference-counted pointers to a //! mutable memory cell. This means that we do not deep copy everything everywhere, but this is //! probably suboptimal for a functional language and is unable to collect cyclic data, which may //! appear inside recursive records in the future. An adapted garbage collector is probably //! something to consider at some point. use crate::cache::ImportResolver; use crate::environment::Environment as GenericEnvironment; use crate::error::EvalError; use crate::identifier::Ident; use crate::mk_app; use crate::operation::{continuate_operation, OperationCont}; use crate::position::TermPos; use crate::stack::Stack; use crate::term::{make as mk_term, BinaryOp, MetaValue, RichTerm, StrChunk, Term, UnaryOp}; use std::cell::{Ref, RefCell, RefMut}; use std::rc::{Rc, Weak}; /// The state of a thunk. /// /// When created, a thunk is flagged as suspended. When accessed for the first time, a corresponding /// [`ThunkUpdateFrame`](./struct.ThunkUpdateFrame.html) is pushed on the stack and the thunk is /// flagged as black-hole. This prevents direct infinite recursions, since if a thunk is /// re-accessed while still in a black-hole state, we are sure that the evaluation will loop, and /// we can thus error out before overflowing the stack or looping forever. Finally, once the /// content of a thunk has been evaluated, the thunk is updated with the new value and flagged as /// evaluated, so that future accesses won't even push an update frame on the stack. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ThunkState { Blackholed, Suspended, Evaluated, } /// The mutable data stored inside a thunk. #[derive(Clone, Debug, PartialEq)] pub struct ThunkData { closure: Closure, state: ThunkState, } impl ThunkData { pub fn new(closure: Closure) -> Self { ThunkData { closure, state: ThunkState::Suspended, } } } /// A thunk. /// /// A thunk is a shared suspended computation. It is the primary device for the implementation of /// lazy evaluation. #[derive(Clone, Debug, PartialEq)] pub struct Thunk { data: Rc<RefCell<ThunkData>>, ident_kind: IdentKind, } /// A black-holed thunk was accessed, which would lead to infinite recursion. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct BlackholedError; impl Thunk { pub fn new(closure: Closure, ident_kind: IdentKind) -> Self { Thunk { data: Rc::new(RefCell::new(ThunkData::new(closure))), ident_kind, } } pub fn state(&self) -> ThunkState { self.data.borrow().state } /// Set the state to evaluated. pub fn set_evaluated(&mut self) { self.data.borrow_mut().state = ThunkState::Evaluated; } /// Generate an update frame from this thunk and set the state to `Blackholed`. Return an /// error if the thunk was already black-holed. pub fn mk_update_frame(&mut self) -> Result<ThunkUpdateFrame, BlackholedError> { if self.data.borrow().state == ThunkState::Blackholed { return Err(BlackholedError); } self.data.borrow_mut().state = ThunkState::Blackholed; Ok(ThunkUpdateFrame { data: Rc::downgrade(&self.data), ident_kind: self.ident_kind, }) } /// Immutably borrow the inner closure. Panic if there is another active mutable borrow. pub fn borrow(&self) -> Ref<'_, Closure> { let (closure, _) = Ref::map_split(self.data.borrow(), |data| { let ThunkData { ref closure, ref state, } = data; (closure, state) }); closure } /// Mutably borrow the inner closure. Panic if there is any other active borrow. pub fn borrow_mut(&mut self) -> RefMut<'_, Closure> { let (closure, _) = RefMut::map_split(self.data.borrow_mut(), |data| { let ThunkData { ref mut closure, ref mut state, } = data; (closure, state) }); closure } /// Get an owned clone of the inner closure. pub fn get_owned(&self) -> Closure { self.data.borrow().closure.clone() } pub fn ident_kind(&self) -> IdentKind { self.ident_kind } /// Consume the thunk and return an owned closure. Avoid cloning if this thunk is the only /// reference to the inner closure. pub fn into_closure(self) -> Closure { match Rc::try_unwrap(self.data) { Ok(inner) => inner.into_inner().closure, Err(rc) => rc.borrow().clone().closure, } } } /// A thunk update frame. /// /// A thunk update frame is put on the stack whenever a variable is entered, such that once this /// variable is evaluated, the corresponding thunk can be updated. It is similar to a thunk but it /// holds a weak reference to the inner closure, to avoid unnecessarily keeping the underlying /// closure alive. #[derive(Clone, Debug)] pub struct ThunkUpdateFrame { data: Weak<RefCell<ThunkData>>, ident_kind: IdentKind, } impl ThunkUpdateFrame { /// Update the corresponding thunk with a closure. Set the state to `Evaluated` /// /// # Return /// /// - `true` if the thunk was successfully updated /// - `false` if the corresponding closure has been dropped since pub fn update(self, closure: Closure) -> bool { if let Some(data) = Weak::upgrade(&self.data) { *data.borrow_mut() = ThunkData { closure, state: ThunkState::Evaluated, }; true } else { false } } } /// A call stack, saving the history of function calls. /// /// In a lazy language as Nickel, there are no well delimited stack frames due to how function /// application is evaluated. This can make things hard to debug for the user, hence additional /// information about the history of function calls is stored in the call stack, for error /// reporting and debugging purposes. pub type CallStack = Vec<StackElem>; /// A call stack element. #[derive(Debug, Clone, Eq, PartialEq)] pub enum StackElem { /// A function body was entered. The position is the position of the original application. App(TermPos), /// A variable was entered. Var(IdentKind, Ident, TermPos), } /// Kind of an identifier. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum IdentKind { Let(), Lam(), Record(), } /// A closure, a term together with an environment. #[derive(Clone, Debug, PartialEq)] pub struct Closure { pub body: RichTerm, pub env: Environment, } impl Closure { pub fn atomic_closure(body: RichTerm) -> Closure { Closure { body, env: Environment::new(), } } } pub type Environment = GenericEnvironment<Ident, Thunk>; /// Raised when trying to build an environment from a term which is not a record. #[derive(Clone, Debug)] pub enum EnvBuildError { NotARecord(RichTerm), } /// Add the bindings of a record to an environment. Ignore the fields defined by interpolation. pub fn env_add_term(env: &mut Environment, rt: RichTerm) -> Result<(), EnvBuildError> { let RichTerm { term, pos } = rt; match *term { Term::Record(bindings, _) | Term::RecRecord(bindings, ..) => { let ext = bindings.into_iter().map(|(id, t)| { ( id, Thunk::new(Closure::atomic_closure(t), IdentKind::Record()), ) }); env.extend(ext); Ok(()) } t => Err(EnvBuildError::NotARecord(RichTerm::new(t, pos))), } } /// Bind a closure in an environment. pub fn env_add(env: &mut Environment, id: Ident, rt: RichTerm, local_env: Environment) { let closure = Closure { body: rt, env: local_env, }; env.insert(id, Thunk::new(closure, IdentKind::Let())); } /// Determine if a thunk is worth being put on the stack for future update. /// /// Typically, WHNFs and enriched values will not be evaluated to a simpler expression and are not /// worth updating. fn should_update(t: &Term) -> bool { !t.is_whnf() && !t.is_metavalue() } /// Evaluate a Nickel term. Wrapper around [eval_closure](fn.eval_closure.html) that starts from an /// empty local environment and drops the final environment. pub fn eval<R>(t0: RichTerm, global_env: &Environment, resolver: &mut R) -> Result<Term, EvalError> where R: ImportResolver, { eval_closure(Closure::atomic_closure(t0), global_env, resolver, true).map(|(term, _)| term) } /// Fully evaluate a Nickel term: the result is not a WHNF but to a value with all variables substituted. pub fn eval_full<R>( t0: RichTerm, global_env: &Environment, resolver: &mut R, ) -> Result<Term, EvalError> where R: ImportResolver, { use crate::transformations::fresh_var; let var = fresh_var(); // Desugar to let x = term in deepSeq x x let wrapper = mk_term::let_in( var.clone(), t0, mk_app!( mk_term::op1(UnaryOp::DeepSeq(), Term::Var(var.clone())), Term::Var(var) ), ); eval_closure(Closure::atomic_closure(wrapper), global_env, resolver, true) .map(|(term, env)| subst(term.into(), &global_env, &env).into()) } /// Evaluate a Nickel Term, stopping when a meta value is encountered at the top-level without /// unwrapping it. Then evaluate the underlying value, and substitute variables in order to obtain /// a WHNF that is printable. /// /// Used to query the metadata of a value. pub fn eval_meta<R>( t: RichTerm, global_env: &Environment, resolver: &mut R, ) -> Result<Term, EvalError> where R: ImportResolver, { let (term, env) = eval_closure(Closure::atomic_closure(t), &global_env, resolver, false)?; match term { Term::MetaValue(mut meta) => { if let Some(t) = meta.value.take() { let pos = t.pos; let (evaluated, env) = eval_closure(Closure { body: t, env }, global_env, resolver, true)?; let substituted = subst(RichTerm::new(evaluated, pos), global_env, &env); meta.value.replace(substituted); } Ok(Term::MetaValue(meta)) } term => Ok(term), } } /// The main loop of evaluation. /// /// Implement the evaluation of the core language, which includes application, thunk update, /// evaluation of the arguments of operations, and a few others. The specific implementations of /// primitive operations is delegated to the modules [operation](../operation/index.html) and /// [merge](../merge/index.html). /// /// # Arguments /// /// - `t0`: the term to evaluate /// - `global_env`: the global environment containing the builtin functions of the language. Accessible from anywhere in the /// program. /// - `resolver`: the interface to fetch imports. /// - `enriched_strict`: if evaluation is strict with respect to enriched values (metavalues). /// Standard evaluation should be strict, but set to false when extracting the metadata of value. /// /// # Return /// /// Either: /// - an evaluation error /// - the evaluated term with its final environment pub fn eval_closure<R>( mut clos: Closure, global_env: &Environment, resolver: &mut R, mut enriched_strict: bool, ) -> Result<(Term, Environment), EvalError> where R: ImportResolver, { let mut call_stack = CallStack::new(); let mut stack = Stack::new(); loop { let Closure { body: RichTerm { term: boxed_term, pos, }, mut env, } = clos; let term = *boxed_term; clos = match term { Term::Var(x) => { let mut thunk = env .get(&x) .or_else(|| global_env.get(&x)) .ok_or_else(|| EvalError::UnboundIdentifier(x.clone(), pos))?; std::mem::drop(env); // thunk may be a 1RC pointer if thunk.state() != ThunkState::Evaluated { if should_update(&thunk.borrow().body.term) { match thunk.mk_update_frame() { Ok(thunk_upd) => stack.push_thunk(thunk_upd), Err(BlackholedError) => { return Err(EvalError::InfiniteRecursion(call_stack, pos)) } } } // If the thunk isn't to be updated, directly set the evaluated flag. else { thunk.set_evaluated(); } } call_stack.push(StackElem::Var(thunk.ident_kind(), x, pos)); thunk.into_closure() } Term::App(t1, t2) => { stack.push_arg( Closure { body: t2, env: env.clone(), }, pos, ); Closure { body: t1, env } } Term::Let(x, s, t) => { let closure = Closure { body: s, env: env.clone(), }; env.insert(x, Thunk::new(closure, IdentKind::Let())); Closure { body: t, env } } Term::Switch(exp, cases, default) => { let has_default = default.is_some(); if let Some(t) = default { stack.push_arg( Closure { body: t, env: env.clone(), }, pos, ); } stack.push_arg( Closure { body: RichTerm::new(Term::Record(cases, Default::default()), pos), env: env.clone(), }, pos, ); Closure { body: RichTerm::new(Term::Op1(UnaryOp::Switch(has_default), exp), pos), env, } } Term::Op1(op, t) => { let prev_strict = enriched_strict; enriched_strict = true; stack.push_op_cont( OperationCont::Op1(op, t.pos, prev_strict), call_stack.len(), pos, ); Closure { body: t, env } } Term::Op2(op, fst, snd) => { let prev_strict = enriched_strict; enriched_strict = op.is_strict(); stack.push_op_cont( OperationCont::Op2First( op, Closure { body: snd, env: env.clone(), }, fst.pos, prev_strict, ), call_stack.len(), pos, ); Closure { body: fst, env } } Term::OpN(op, mut args) => { let prev_strict = enriched_strict; enriched_strict = op.is_strict(); // Arguments are passed as a stack to the operation continuation, so we reverse the // original list. args.reverse(); let fst = args .pop() .ok_or_else(|| EvalError::NotEnoughArgs(op.arity(), op.to_string(), pos))?; let pending: Vec<Closure> = args .into_iter() .map(|t| Closure { body: t, env: env.clone(), }) .collect(); stack.push_op_cont( OperationCont::OpN { op, evaluated: Vec::with_capacity(pending.len() + 1), pending, current_pos: fst.pos, prev_enriched_strict: prev_strict, }, call_stack.len(), pos, ); Closure { body: fst, env } } Term::StrChunks(mut chunks) => match chunks.pop() { None => Closure { body: Term::Str(String::new()).into(), env: Environment::new(), }, Some(chunk) => { let (arg, indent) = match chunk { StrChunk::Literal(s) => (Term::Str(s).into(), 0), StrChunk::Expr(e, indent) => (e, indent), }; stack.push_str_chunks(chunks.into_iter()); stack.push_str_acc(String::new(), indent, env.clone()); Closure { body: RichTerm::new(Term::Op1(UnaryOp::ChunksConcat(), arg), pos), env, } } }, Term::Promise(ty, mut l, t) => { l.arg_pos = t.pos; let thunk = Thunk::new( Closure { body: t, env: env.clone(), }, IdentKind::Lam(), ); l.arg_thunk = Some(thunk.clone()); stack.push_tracked_arg(thunk, pos.into_inherited()); stack.push_arg( Closure::atomic_closure(Term::Lbl(l).into()), pos.into_inherited(), ); Closure { body: ty.contract(), env, } } Term::RecRecord(ts, dyn_fields, attrs) => { // Thanks to the share normal form transformation, the content is either a constant or a // variable. let rec_env = ts.iter().try_fold::<_, _, Result<Environment, EvalError>>( Environment::new(), |mut rec_env, (id, rt)| match rt.as_ref() { Term::Var(ref var_id) => { let thunk = env.get(var_id).ok_or_else(|| { EvalError::UnboundIdentifier(var_id.clone(), rt.pos) })?; rec_env.insert(id.clone(), thunk.clone()); Ok(rec_env) } _ => { // If we are in this branch, the term must be a constant after the // share normal form transformation, hence it should not need an // environment, which is why it is dropped. let closure = Closure { body: rt.clone(), env: Environment::new(), }; rec_env.insert(id.clone(), Thunk::new(closure, IdentKind::Let())); Ok(rec_env) } }, )?; let new_ts = ts.into_iter().map(|(id, rt)| { let RichTerm { term, pos } = rt; match *term { Term::Var(var_id) => { // We already checked for unbound identifier in the previous fold, // so function should always succeed let mut thunk = env.get(&var_id).unwrap(); thunk.borrow_mut().env.extend( rec_env .iter_elems() .map(|(id, thunk)| (id.clone(), thunk.clone())), ); ( id, RichTerm { term: Box::new(Term::Var(var_id)), pos, }, ) } _ => (id, RichTerm { term, pos }), } }); let static_part = RichTerm::new(Term::Record(new_ts.collect(), attrs), pos); // Transform the static part `{stat1 = val1, ..., statn = valn}` and the dynamic // part `{exp1 = dyn_val1, ..., expm = dyn_valm}` to a sequence of extensions // `{stat1 = val1, ..., statn = valn} $[ exp1 = dyn_val1] ... $[ expn = dyn_valn ]` // The `dyn_val` are given access to the recursive environment, but not the dynamic // field names. let extended = dyn_fields .into_iter() .try_fold::<_, _, Result<RichTerm, EvalError>>( static_part, |acc, (id_t, t)| { let RichTerm { term, pos } = t; match *term { Term::Var(var_id) => { let mut thunk = env.get(&var_id).ok_or_else(|| { EvalError::UnboundIdentifier(var_id.clone(), pos) })?; thunk.borrow_mut().env.extend( rec_env .iter_elems() .map(|(id, thunk)| (id.clone(), thunk.clone())), ); Ok(Term::App( mk_term::op2(BinaryOp::DynExtend(), id_t, acc), mk_term::var(var_id).with_pos(pos), ) .into()) } _ => Ok(Term::App( mk_term::op2(BinaryOp::DynExtend(), id_t, acc), RichTerm { term, pos }, ) .into()), } }, )?; Closure { body: extended.with_pos(pos), env, } } // Unwrapping of enriched terms Term::MetaValue(meta) if enriched_strict => { if meta.value.is_some() { /* Since we are forcing a metavalue, we are morally evaluating `force t` rather * than `t` iteself. Updating a thunk after having performed this forcing may * alter the semantics of the program in an unexpected way (see issue * https://github.com/tweag/nickel/issues/123): we update potential thunks now * so that their content remains a meta value. */ let update_closure = Closure { body: RichTerm { term: Box::new(Term::MetaValue(meta)), pos, }, env, }; update_thunks(&mut stack, &update_closure); let Closure { body: RichTerm { term, .. }, env, } = update_closure; match *term { Term::MetaValue(MetaValue { value: Some(inner), .. }) => Closure { body: inner, env }, _ => unreachable!(), } } // TODO: improve error message using some positions else { return Err(EvalError::Other(String::from("empty metavalue"), pos)); } } Term::ResolvedImport(id) => { if let Some(t) = resolver.get(id) { Closure::atomic_closure(t) } else { return Err(EvalError::InternalError( format!("Resolved import not found ({:?})", id), pos, )); } } Term::Import(path) => { return Err(EvalError::InternalError( format!("Unresolved import ({})", path.to_string_lossy()), pos, )) } // Continuation of operations and thunk update _ if stack.is_top_thunk() || stack.is_top_cont() => { clos = Closure { body: RichTerm { term: Box::new(term), pos, }, env, }; if stack.is_top_thunk() { update_thunks(&mut stack, &clos); clos } else { continuate_operation(clos, &mut stack, &mut call_stack, &mut enriched_strict)? } } // Function call Term::Fun(x, t) => { if let Some((thunk, pos_app)) = stack.pop_arg_as_thunk() { call_stack.push(StackElem::App(pos_app)); env.insert(x, thunk); Closure { body: t, env } } else { return Ok((Term::Fun(x, t), env)); } } // Otherwise, this is either an ill-formed application, or we are done t => { if let Some((arg, pos_app)) = stack.pop_arg() { return Err(EvalError::NotAFunc( RichTerm { term: Box::new(t), pos, }, arg.body, pos_app, )); } else { return Ok((t, env)); } } } } } /// Pop and update all the thunks on the top of the stack with the given closure. fn update_thunks(stack: &mut Stack, closure: &Closure) { while let Some(thunk) = stack.pop_thunk() { thunk.update(closure.clone()); } } /// Recursively substitute each variable occurrence of a term for its value in the environment. pub fn subst(rt: RichTerm, global_env: &Environment, env: &Environment) -> RichTerm { use std::borrow::Cow; use std::collections::HashSet; // Maintain an additional set of variables bound by abstractions (`fun x => ..`), that must not // be substituted. fn
( rt: RichTerm, global_env: &Environment, env: &Environment, bound: Cow<HashSet<Ident>>, ) -> RichTerm { let RichTerm { term, pos } = rt; match *term { Term::Var(id) if !bound.as_ref().contains(&id) => env .get(&id) .or_else(|| global_env.get(&id)) .map(|thunk| { let closure = thunk.get_owned(); subst_(closure.body, global_env, &closure.env, bound) }) .unwrap_or_else(|| RichTerm::new(Term::Var(id), pos)), v @ Term::Null | v @ Term::Bool(_) | v @ Term::Num(_) | v @ Term::Str(_) // Do not substitute under lambdas: mutually recursive function could cause an infinite // loop. Although avoidable, this requires some care and is not currently needed. | v @ Term::Fun(..) | v @ Term::Lbl(_) | v @ Term::Sym(_) | v @ Term::Var(_) | v @ Term::Enum(_) | v @ Term::Import(_) | v @ Term::ResolvedImport(_) => RichTerm::new(v, pos), Term::Let(id, t1, t2) => { let t1 = subst_(t1, global_env, env, Cow::Borrowed(bound.as_ref())); let t2 = subst_(t2, global_env, env, bound); RichTerm::new(Term::Let(id, t1, t2), pos) } Term::App(t1, t2) => { let t1 = subst_(t1, global_env, env, Cow::Borrowed(bound.as_ref())); let t2 = subst_(t2, global_env, env, bound); RichTerm::new(Term::App(t1, t2), pos) } Term::Switch(t, cases, default) => { let default = default.map(|d| subst_(d, global_env, env, Cow::Borrowed(bound.as_ref()))); let cases = cases .into_iter() .map(|(id, t)| { ( id, subst_(t, global_env, env, Cow::Borrowed(bound.as_ref())), ) }) .collect(); let t = subst_(t, global_env, env, bound); RichTerm::new(Term::Switch(t, cases, default), pos) } Term::Op1(op, t) => { let t = subst_(t, global_env, env, bound); RichTerm::new(Term::Op1(op, t), pos) } Term::Op2(op, t1, t2) => { let t1 = subst_(t1, global_env, env, Cow::Borrowed(bound.as_ref())); let t2 = subst_(t2, global_env, env, bound); RichTerm::new(Term::Op2(op, t1, t2), pos) } Term::OpN(op, ts) => { let ts = ts .into_iter() .map(|t| subst_(t, global_env, env, Cow::Borrowed(bound.as_ref()))) .collect(); RichTerm::new(Term::OpN(op, ts), pos) } Term::Promise(ty, l, t) => { let t = subst_(t, global_env, env, bound); RichTerm::new(Term::Promise(ty, l, t), pos) } Term::Wrapped(i, t) => { let t = subst_(t, global_env, env, bound); RichTerm::new(Term::Wrapped(i, t), pos) } Term::Record(map, attrs) => { let map = map .into_iter() .map(|(id, t)| { ( id, subst_(t, global_env, env, Cow::Borrowed(bound.as_ref())), ) }) .collect(); RichTerm::new(Term::Record(map, attrs), pos) } Term::RecRecord(map, dyn_fields, attrs) => { let map = map .into_iter() .map(|(id, t)| { ( id, subst_(t, global_env, env, Cow::Borrowed(bound.as_ref())), ) }) .collect(); let dyn_fields = dyn_fields .into_iter() .map(|(id_t, t)| { ( subst_(id_t, global_env, env, Cow::Borrowed(bound.as_ref())), subst_(t, global_env, env, Cow::Borrowed(bound.as_ref())), ) }) .collect(); RichTerm::new(Term::RecRecord(map, dyn_fields, attrs), pos) } Term::List(ts) => { let ts = ts .into_iter() .map(|t| subst_(t, global_env, env, Cow::Borrowed(bound.as_ref()))) .collect(); RichTerm::new(Term::List(ts), pos) } Term::StrChunks(chunks) => { let chunks = chunks .into_iter() .map(|chunk| match chunk { chunk @ StrChunk::Literal(_) => chunk, StrChunk::Expr(t, indent) => StrChunk::Expr( subst_(t, global_env, env, Cow::Borrowed(bound.as_ref())), indent, ), }) .collect(); RichTerm::new(Term::StrChunks(chunks), pos) } Term::MetaValue(meta) => { // Currently, there is no interest in replacing variables inside contracts, thus we // limit the work of `subst`. If this is needed at some point, just uncomment the // following code. // let contracts: Vec<_> = meta // .contracts // .into_iter() // .map(|ctr| { // let types = match ctr.types { // Types(AbsType::Flat(t)) => Types(AbsType::Flat(subst_( // t, // global_env, // env, // Cow::Borrowed(bound.as_ref()), // ))), // ty => ty, // }; // // Contract { types, ..ctr } // }) // .collect(); // // let types = meta.types.map(|ctr| { // let types = match ctr.types { // Types(AbsType::Flat(t)) => Types(AbsType::Flat(subst_( // t, // global_env, // env, // Cow::Borrowed(bound.as_ref()), // ))), // ty => ty, // }; // // Contract { types, ..ctr } // }); let value = meta.value.map(|t| subst_(t, global_env, env, bound)); let meta = MetaValue { doc: meta.doc, value, ..meta }; RichTerm::new(Term::MetaValue(meta), pos) } } } subst_(rt, global_env, env, Cow::Owned(HashSet::new())) } #[cfg(test)] mod tests { use super::*; use crate::cache::resolvers::{DummyResolver, SimpleResolver}; use crate::error::ImportError; use crate::label::Label; use crate::parser::{grammar, lexer}; use crate::term::make as mk_term; use crate::term::{BinaryOp, StrChunk, UnaryOp}; use crate::transformations::resolve_imports; use crate::{mk_app, mk_fun}; use codespan::Files; /// Evaluate a term without import support. fn eval_no_import(t: RichTerm) -> Result<Term, EvalError> { eval(t, &Environment::new(), &mut DummyResolver {}) } fn parse(s: &str) -> Option<RichTerm> { let id = Files::new().add("<test>", String::from(s)); grammar::TermParser::new() .parse(id, lexer::Lexer::new(&s)) .map(|mut t| { t.clean_pos(); t }) .map_err(|err| println!("{:?}", err)) .ok() } #[test] fn identity_over_values() { let num = Term::Num(45.3); assert_eq!(Ok(num.clone()), eval_no_import(num.into())); let boolean = Term::Bool(true); assert_eq!(Ok(boolean.clone()), eval_no_import(boolean.into())); let lambda = mk_fun!("x", mk_app!(mk_term::var("x"), mk_term::var("x"))); assert_eq!(Ok(lambda.as_ref().clone()), eval_no_import(lambda.into())); } #[test] fn blame_panics() { let label = Label::dummy(); if let Err(EvalError::BlameError(l, ..)) = eval_no_import(mk_term::op1(UnaryOp::Blame(), Term::Lbl(label.clone()))) { assert_eq!(l, label); } else { panic!("This evaluation should've returned a BlameError!"); } } #[test] #[should_panic] fn lone_var_panics() { eval_no_import(mk_term::var("unbound")).unwrap(); } #[test] fn only_fun_are_applicable() { eval_no_import(mk_app!(Term::Bool(true), Term::Num(45.))).unwrap_err(); } #[test] fn simple_app() { let t = mk_app!(mk_term::id(), Term::Num(5.0)); assert_eq!(Ok(Term::Num(5.0)), eval_no_import(t)); } #[test] fn simple_let() { let t = mk_term::let_in("x", Term::Num(5.0), mk_term::var("x")); assert_eq!(Ok(Term::Num(5.0)), eval_no_import(t)); } #[test] fn simple_ite() { let t = mk_term::if_then_else(Term::Bool(true), Term::Num(5.0), Term::Bool(false)); assert_eq!(Ok(Term::Num(5.0)), eval_no_import(t)); } #[test] fn simple_plus() { let t = mk_term::op2(BinaryOp::Plus(), Term::Num(5.0), Term::Num(7.5)); assert_eq!(Ok(Term::Num(12.5)), eval_no_import(t)); } #[test] fn asking_for_various_types() { let num = mk_term::op1(UnaryOp::IsNum(), Term::Num(45.3)); assert_eq!(Ok(Term::Bool(true)), eval_no_import(num)); let boolean = mk_term::op1(UnaryOp::IsBool(), Term::Bool(true)); assert_eq!(Ok(Term::Bool(true)), eval_no_import(boolean)); let lambda = mk_term::op1( UnaryOp::IsFun(), mk_fun!("x", mk_app!(mk_term::var("x"), mk_term::var("x"))), ); assert_eq!(Ok(Term::Bool(true)), eval_no_import(lambda)); } fn mk_default(t: RichTerm) -> Term { use crate::term::MergePriority; let mut meta = MetaValue::from(t); meta.priority = MergePriority::Default; Term::MetaValue(meta) } fn mk_docstring<S>(t: RichTerm, s: S) -> Term where S: Into<String>, { let mut meta = MetaValue::from(t); meta.doc.replace(s.into()); Term::MetaValue(meta) } #[test] fn enriched_terms_unwrapping() { let t = mk_default(mk_default(mk_docstring(Term::Bool(false).into(), "a").into()).into()) .into(); assert_eq!(Ok(Term::Bool(false)), eval_no_import(t)); } #[test] fn merge_enriched_default() { let t = mk_term::op2( BinaryOp::Merge(), Term::Num(1.0), mk_default(Term::Num(2.0).into()), ); assert_eq!(Ok(Term::Num(1.0)), eval_no_import(t)); } #[test] fn merge_incompatible_defaults() { let t = mk_term::op2( BinaryOp::Merge(), mk_default(Term::Num(1.0).into()), mk_default(Term::Num(2.0).into()), ); eval_no_import(t).unwrap_err(); } #[test] fn imports() { let mut resolver = SimpleResolver::new(); resolver.add_source(String::from("two"), String::from("1 + 1")); resolver.add_source(String::from("lib"), String::from("{f = true}")); resolver.add_source(String::from("bad"), String::from("^$*/.23ab 0ยฐ@")); resolver.add_source( String::from("nested"), String::from("let x = import \"two\" in x + 1"), ); resolver.add_source( String::from("cycle"), String::from("let x = import \"cycle_b\" in {a = 1, b = x.a}"), ); resolver.add_source( String::from("cycle_b"), String::from("let x = import \"cycle\" in {a = x.a}"), ); fn mk_import<R>( var: &str, import: &str, body: RichTerm, resolver: &mut R, ) -> Result<RichTerm, ImportError> where R: ImportResolver, { resolve_imports( mk_term::let_in(var, mk_term::import(import), body), resolver, ) .map(|(t, _)| t) } // let x = import "does_not_exist" in x match mk_import("x", "does_not_exist", mk_term::var("x"), &mut resolver).unwrap_err() { ImportError::IOError(_, _, _) => (), _ => assert!(false), }; // let x = import "bad" in x match mk_import("x", "bad", mk_term::var("x"), &mut resolver).unwrap_err() { ImportError::ParseError(_, _) => (), _ => assert!(false), }; // let x = import "two" in x assert_eq!( eval( mk_import("x", "two", mk_term::var("x"), &mut resolver).unwrap(), &Environment::new(), &mut resolver ) .unwrap(), Term::Num(2.0) ); // let x = import "lib" in x.f assert_eq!( eval( mk_import( "x", "lib", mk_term::op1(UnaryOp::StaticAccess(Ident::from("f")), mk_term::var("x")), &mut resolver, ) .unwrap(), &Environment::new(), &mut resolver ) .unwrap(), Term::Bool(true) ); } #[test] fn interpolation_simple() { let mut chunks = vec![ StrChunk::Literal(String::from("Hello")), StrChunk::expr( mk_term::op2( BinaryOp::StrConcat(), mk_term::string(", "), mk_term::string("World!"), ) .into(), ), StrChunk::Literal(String::from(" How")), StrChunk::expr(mk_term::if_then_else( Term::Bool(true), mk_term::string(" are"), mk_term::string(" is"), )), StrChunk::Literal(String::from(" you?")), ]; chunks.reverse(); let t: RichTerm = Term::StrChunks(chunks).into(); assert_eq!( eval_no_import(t), Ok(Term::Str(String::from("Hello, World! How are you?"))) ); } #[test] fn interpolation_nested() { let mut inner_chunks = vec![ StrChunk::Literal(String::from(" How")), StrChunk::expr( Term::Op2( BinaryOp::StrConcat(), mk_term::string(" ar"), mk_term::string("e"), ) .into(), ), StrChunk::expr(mk_term::if_then_else( Term::Bool(true), mk_term::string(" you"), mk_term::string(" me"), )), ]; inner_chunks.reverse(); let mut chunks = vec![ StrChunk::Literal(String::from("Hello, World!")), StrChunk::expr(Term::StrChunks(inner_chunks).into()), StrChunk::Literal(String::from("?")), ]; chunks.reverse(); let t: RichTerm = Term::StrChunks(chunks).into(); assert_eq!( eval_no_import(t), Ok(Term::Str(String::from("Hello, World! How are you?"))) ); } #[test] fn global_env() { let mut global_env = Environment::new(); let mut resolver = DummyResolver {}; global_env.insert( Ident::from("g"), Thunk::new( Closure::atomic_closure(Term::Num(1.0).into()), IdentKind::Let(), ), ); let t = mk_term::let_in("x", Term::Num(2.0), mk_term::var("x")); assert_eq!(eval(t, &global_env, &mut resolver), Ok(Term::Num(2.0))); let t = mk_term::let_in("x", Term::Num(2.0), mk_term::var("g")); assert_eq!(eval(t, &global_env, &mut resolver), Ok(Term::Num(1.0))); // Shadowing of global environment let t = mk_term::let_in("g", Term::Num(2.0), mk_term::var("g")); assert_eq!(eval(t, &global_env, &mut resolver), Ok(Term::Num(2.0))); } fn mk_env(bindings: Vec<(&str, RichTerm)>) -> Environment { bindings .into_iter() .map(|(id, t)| { ( id.into(), Thunk::new(Closure::atomic_closure(t), IdentKind::Let()), ) }) .collect() } #[test] fn substitution() { let global_env = mk_env(vec![ ("glob1", Term::Num(1.0).into()), ("glob2", parse("\"Glob2\"").unwrap()), ("glob3", Term::Bool(false).into()), ]); let env = mk_env(vec![ ("loc1", Term::Bool(true).into()), ("loc2", parse("if glob3 then glob1 else glob2").unwrap()), ]); let t = parse("let x = 1 in if loc1 then 1 + loc2 else glob3").unwrap(); assert_eq!( subst(t, &global_env, &env), parse("let x = 1 in if true then 1 + (if false then 1 else \"Glob2\") else false") .unwrap() ); let t = parse("switch {x => [1, glob1], y => loc2, z => {id = true, other = glob3}} loc1") .unwrap(); assert_eq!( subst(t, &global_env, &env), parse("switch {x => [1, 1], y => (if false then 1 else \"Glob2\"), z => {id = true, other = false}} true").unwrap() ); } }
subst_
api_op_DescribeHarvestJob.go
// Code generated by smithy-go-codegen DO NOT EDIT. package mediapackage import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/mediapackage/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Gets details about an existing HarvestJob. func (c *Client) DescribeHarvestJob(ctx context.Context, params *DescribeHarvestJobInput, optFns ...func(*Options)) (*DescribeHarvestJobOutput, error) { if params == nil { params = &DescribeHarvestJobInput{} } result, metadata, err := c.invokeOperation(ctx, "DescribeHarvestJob", params, optFns, addOperationDescribeHarvestJobMiddlewares) if err != nil { return nil, err } out := result.(*DescribeHarvestJobOutput) out.ResultMetadata = metadata return out, nil } type DescribeHarvestJobInput struct { // The ID of the HarvestJob. // // This member is required. Id *string } type DescribeHarvestJobOutput struct { // The Amazon Resource Name (ARN) assigned to the HarvestJob. Arn *string // The ID of the Channel that the HarvestJob will harvest from. ChannelId *string // The time the HarvestJob was submitted CreatedAt *string // The end of the time-window which will be harvested. EndTime *string // The ID of the HarvestJob. The ID must be unique within the region and it cannot // be changed after the HarvestJob is submitted. Id *string // The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot // be changed after the HarvestJob is submitted. OriginEndpointId *string // Configuration parameters for where in an S3 bucket to place the harvested // content S3Destination *types.S3Destination // The start of the time-window which will be harvested. StartTime *string // The current status of the HarvestJob. Consider setting up a CloudWatch Event to // listen for HarvestJobs as they succeed or fail. In the event of failure, the // CloudWatch Event will include an explanation of why the HarvestJob failed. Status types.Status // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationDescribeHarvestJobMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpDescribeHarvestJob{}, middleware.After) if err != nil
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDescribeHarvestJob{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpDescribeHarvestJobValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeHarvestJob(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opDescribeHarvestJob(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "mediapackage", OperationName: "DescribeHarvestJob", } }
{ return err }
0030_auto__add_field_maintenance_disable_alarms.py
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Maintenance.disable_alarms'
def backwards(self, orm): # Deleting field 'Maintenance.disable_alarms' db.delete_column(u'maintenance_maintenance', 'disable_alarms') models = { u'account.team': { 'Meta': {'ordering': "[u'name']", 'object_name': 'Team'}, 'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'backup.backupgroup': { 'Meta': {'object_name': 'BackupGroup'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'logical.database': { 'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'name', u'environment'),)", 'object_name': 'Database'}, 'backup_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DatabaseInfra']"}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'disk_auto_resize': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_in_quarantine': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['logical.Project']"}), 'quarantine_dt': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'quarantine_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_quarantine'", 'null': 'True', 'to': u"orm['auth.User']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases'", 'null': 'True', 'to': u"orm['account.Team']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'default': '0.0'}) }, u'logical.project': { 'Meta': {'ordering': "[u'name']", 'object_name': 'Project'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databasechangeparameter': { 'Meta': {'object_name': 'DatabaseChangeParameter'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'change_parameters'", 'to': u"orm['logical.Database']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_change_parameters'", 'to': u"orm['notification.TaskHistory']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databasecreate': { 'Meta': {'object_name': 'DatabaseCreate'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['logical.Database']"}), 'description': ('django.db.models.fields.TextField', [], {}), 'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.Environment']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['physical.DatabaseInfra']"}), 'is_protected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}), 'plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'databases_create'", 'null': 'True', 'to': u"orm['logical.Project']"}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'subscribe_to_email_events': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'create_database'", 'to': u"orm['notification.TaskHistory']"}), 'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databases_create'", 'to': u"orm['account.Team']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, u'maintenance.databasereinstallvm': { 'Meta': {'object_name': 'DatabaseReinstallVM'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'reinstall_vm'", 'to': u"orm['logical.Database']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinstall_vm'", 'to': u"orm['physical.Instance']"}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_reinsgtall_vm'", 'to': u"orm['notification.TaskHistory']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databaseresize': { 'Meta': {'object_name': 'DatabaseResize'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'resizes'", 'to': u"orm['logical.Database']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source_offer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_resizes_source'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Offering']"}), 'source_offer_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'target_offer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_resizes_target'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Offering']"}), 'target_offer_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_resizes'", 'to': u"orm['notification.TaskHistory']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databaserestore': { 'Meta': {'object_name': 'DatabaseRestore'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['logical.Database']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['backup.BackupGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_restore_new'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_restore'", 'to': u"orm['notification.TaskHistory']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databaserestoreinstancepair': { 'Meta': {'unique_together': "((u'master', u'slave', u'restore'),)", 'object_name': 'DatabaseRestoreInstancePair'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_master'", 'to': u"orm['physical.Instance']"}), 'restore': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_instances'", 'to': u"orm['maintenance.DatabaseRestore']"}), 'slave': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'restore_slave'", 'to': u"orm['physical.Instance']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.databaseupgrade': { 'Meta': {'object_name': 'DatabaseUpgrade'}, 'can_do_retry': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_step': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'database': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'upgrades'", 'to': u"orm['logical.Database']"}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_source'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}), 'source_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'target_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'database_upgrades_target'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}), 'target_plan_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'database_upgrades'", 'to': u"orm['notification.TaskHistory']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.hostmaintenance': { 'Meta': {'unique_together': "((u'host', u'maintenance'),)", 'object_name': 'HostMaintenance', 'index_together': "[[u'host', u'maintenance']]"}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'host_maintenance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}), 'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'main_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance'", 'to': u"orm['maintenance.Maintenance']"}), 'rollback_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '4'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.maintenance': { 'Meta': {'object_name': 'Maintenance'}, 'affected_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'celery_task_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'disable_alarms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'hostsid': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '10000'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'main_script': ('django.db.models.fields.TextField', [], {}), 'maximum_workers': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'revoked_by': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'rollback_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'scheduled_for': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}), 'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'maintenance.maintenanceparameters': { 'Meta': {'object_name': 'MaintenanceParameters'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'function_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'maintenance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'maintenance_params'", 'to': u"orm['maintenance.Maintenance']"}), 'parameter_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'notification.taskhistory': { 'Meta': {'object_name': 'TaskHistory'}, 'arguments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'db_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_class': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'task_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'task_status': ('django.db.models.fields.CharField', [], {'default': "u'WAITING'", 'max_length': '100', 'db_index': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'physical.databaseinfra': { 'Meta': {'object_name': 'DatabaseInfra'}, 'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}), 'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}), 'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}), 'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, u'physical.diskoffering': { 'Meta': {'object_name': 'DiskOffering'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.engine': { 'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}), 'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}), 'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}), 'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}) }, u'physical.enginetype': { 'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.environment': { 'Meta': {'object_name': 'Environment'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}), 'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.host': { 'Meta': {'object_name': 'Host'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}), 'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}), 'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'physical.instance': { 'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}), 'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'port': ('django.db.models.fields.IntegerField', [], {}), 'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}) }, u'physical.offering': { 'Meta': {'object_name': 'Offering'}, 'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.parameter': { 'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'}, 'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.plan': { 'Meta': {'object_name': 'Plan'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}), 'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}), 'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}), 'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}), 'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}), 'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}) }, u'physical.replicationtopology': { 'Meta': {'object_name': 'ReplicationTopology'}, 'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}), 'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}), 'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'physical.script': { 'Meta': {'object_name': 'Script'}, 'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['maintenance']
db.add_column(u'maintenance_maintenance', 'disable_alarms', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
index.d.ts
import { ReactNode, ComponentType } from 'react'; import { BaseProps } from '../types'; export interface CarouselCardProps extends BaseProps { scrollDuration?: number; disableAutoScroll?: boolean; disableAutoRefresh?: boolean; children?: ReactNode; id?: string; } declare const CarouselCard: ComponentType<CarouselCardProps>;
export default CarouselCard;
util.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package source import ( "context" "fmt" "go/ast" "go/token" "go/types" "path/filepath" "regexp" "strings" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/span" errors "golang.org/x/xerrors" ) type mappedRange struct { spanRange span.Range m *protocol.ColumnMapper // protocolRange is the result of converting the spanRange using the mapper. // It is computed on-demand. protocolRange *protocol.Range } func (s mappedRange) Range() (protocol.Range, error) { if s.protocolRange == nil { spn, err := s.spanRange.Span() if err != nil { return protocol.Range{}, err } prng, err := s.m.Range(spn) if err != nil { return protocol.Range{}, err } s.protocolRange = &prng } return *s.protocolRange, nil } func (s mappedRange) Span() (span.Span, error) { return s.spanRange.Span() } func (s mappedRange) URI() span.URI { return s.m.URI } // NarrowestCheckPackageHandle picks the "narrowest" package for a given file. // // By "narrowest" package, we mean the package with the fewest number of files // that includes the given file. This solves the problem of test variants, // as the test will have more files than the non-test package. func NarrowestCheckPackageHandle(handles []CheckPackageHandle) (CheckPackageHandle, error) { if len(handles) < 1 { return nil, errors.Errorf("no CheckPackageHandles") } result := handles[0] for _, handle := range handles[1:] { if result == nil || len(handle.Files()) < len(result.Files()) { result = handle } } if result == nil { return nil, errors.Errorf("nil CheckPackageHandles have been returned") } return result, nil } // WidestCheckPackageHandle returns the CheckPackageHandle containing the most files. // // This is useful for something like diagnostics, where we'd prefer to offer diagnostics // for as many files as possible. func WidestCheckPackageHandle(handles []CheckPackageHandle) (CheckPackageHandle, error) { if len(handles) < 1 { return nil, errors.Errorf("no CheckPackageHandles") } result := handles[0] for _, handle := range handles[1:] { if result == nil || len(handle.Files()) > len(result.Files()) { result = handle } } if result == nil { return nil, errors.Errorf("nil CheckPackageHandles have been returned") } return result, nil } func IsGenerated(ctx context.Context, view View, uri span.URI) bool { f, err := view.GetFile(ctx, uri) if err != nil { return false } ph := view.Session().Cache().ParseGoHandle(view.Snapshot().Handle(ctx, f), ParseHeader) parsed, _, _, err := ph.Parse(ctx) if err != nil { return false } tok := view.Session().Cache().FileSet().File(parsed.Pos()) if tok == nil { return false } for _, commentGroup := range parsed.Comments { for _, comment := range commentGroup.List { if matched := generatedRx.MatchString(comment.Text); matched { // Check if comment is at the beginning of the line in source. if pos := tok.Position(comment.Slash); pos.Column == 1 { return true } } } } return false } func nodeToProtocolRange(ctx context.Context, view View, m *protocol.ColumnMapper, n ast.Node) (protocol.Range, error) { mrng, err := nodeToMappedRange(ctx, view, m, n) if err != nil { return protocol.Range{}, err } return mrng.Range() } func objToMappedRange(ctx context.Context, pkg Package, obj types.Object) (mappedRange, error) { if pkgName, ok := obj.(*types.PkgName); ok { // An imported Go package has a package-local, unqualified name. // When the name matches the imported package name, there is no // identifier in the import spec with the local package name. // // For example: // import "go/ast" // name "ast" matches package name // import a "go/ast" // name "a" does not match package name // // When the identifier does not appear in the source, have the range // of the object be the point at the beginning of the declaration. if pkgName.Imported().Name() == pkgName.Name() { return nameToMappedRange(ctx, pkg, obj.Pos(), "") } } return nameToMappedRange(ctx, pkg, obj.Pos(), obj.Name()) } func nameToMappedRange(ctx context.Context, pkg Package, pos token.Pos, name string) (mappedRange, error) { return posToMappedRange(ctx, pkg, pos, pos+token.Pos(len(name))) } func nodeToMappedRange(ctx context.Context, view View, m *protocol.ColumnMapper, n ast.Node) (mappedRange, error) { return posToRange(ctx, view, m, n.Pos(), n.End()) } func posToMappedRange(ctx context.Context, pkg Package, pos, end token.Pos) (mappedRange, error) { m, err := posToMapper(ctx, pkg, pos) if err != nil { return mappedRange{}, err } return posToRange(ctx, pkg.Snapshot().View(), m, pos, end) } func posToRange(ctx context.Context, view View, m *protocol.ColumnMapper, pos, end token.Pos) (mappedRange, error) { if !pos.IsValid() { return mappedRange{}, errors.Errorf("invalid position for %v", pos) } if !end.IsValid() { return mappedRange{}, errors.Errorf("invalid position for %v", end) } return mappedRange{ m: m, spanRange: span.NewRange(view.Session().Cache().FileSet(), pos, end), }, nil } func posToMapper(ctx context.Context, pkg Package, pos token.Pos) (*protocol.ColumnMapper, error) { posn := pkg.Snapshot().View().Session().Cache().FileSet().Position(pos) ph, _, err := pkg.FindFile(ctx, span.FileURI(posn.Filename)) if err != nil { return nil, err } _, m, _, err := ph.Cached(ctx) return m, err } // Matches cgo generated comment as well as the proposed standard: // https://golang.org/s/generatedcode var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) func DetectLanguage(langID, filename string) FileKind { switch langID { case "go": return Go case "go.mod": return Mod case "go.sum": return Sum } // Fallback to detecting the language based on the file extension. switch filepath.Ext(filename) { case ".mod": return Mod case ".sum": return Sum default: // fallback to Go return Go } } func (k FileKind) String() string { switch k { case Mod: return "go.mod" case Sum: return "go.sum" default: return "go" } } // indexExprAtPos returns the index of the expression containing pos. func indexExprAtPos(pos token.Pos, args []ast.Expr) int { for i, expr := range args { if expr.Pos() <= pos && pos <= expr.End() { return i } } return len(args) } func exprAtPos(pos token.Pos, args []ast.Expr) ast.Expr { for _, expr := range args { if expr.Pos() <= pos && pos <= expr.End() { return expr } } return nil } // fieldSelections returns the set of fields that can // be selected from a value of type T. func fieldSelections(T types.Type) (fields []*types.Var) { // TODO(adonovan): this algorithm doesn't exclude ambiguous // selections that match more than one field/method. // types.NewSelectionSet should do that for us. seen := make(map[*types.Var]bool) // for termination on recursive types var visit func(T types.Type) visit = func(T types.Type) { if T, ok := deref(T).Underlying().(*types.Struct); ok { for i := 0; i < T.NumFields(); i++ { f := T.Field(i) if seen[f] { continue } seen[f] = true fields = append(fields, f) if f.Anonymous() { visit(f.Type()) } } } } visit(T) return fields } // resolveInvalid traverses the node of the AST that defines the scope // containing the declaration of obj, and attempts to find a user-friendly // name for its invalid type. The resulting Object and its Type are fake. func resolveInvalid(obj types.Object, node ast.Node, info *types.Info) types.Object { // Construct a fake type for the object and return a fake object with this type. formatResult := func(expr ast.Expr) types.Object { var typename string switch t := expr.(type) { case *ast.SelectorExpr: typename = fmt.Sprintf("%s.%s", t.X, t.Sel) case *ast.Ident: typename = t.String() default: return nil } typ := types.NewNamed(types.NewTypeName(token.NoPos, obj.Pkg(), typename, nil), types.Typ[types.Invalid], nil) return types.NewVar(obj.Pos(), obj.Pkg(), obj.Name(), typ) } var resultExpr ast.Expr ast.Inspect(node, func(node ast.Node) bool { switch n := node.(type) { case *ast.ValueSpec: for _, name := range n.Names { if info.Defs[name] == obj { resultExpr = n.Type } } return false case *ast.Field: // This case handles parameters and results of a FuncDecl or FuncLit. for _, name := range n.Names { if info.Defs[name] == obj { resultExpr = n.Type } } return false // TODO(rstambler): Handle range statements. default: return true } }) return formatResult(resultExpr) } func isPointer(T types.Type) bool { _, ok := T.(*types.Pointer) return ok } // deref returns a pointer's element type; otherwise it returns typ. func deref(typ types.Type) types.Type { if p, ok := typ.Underlying().(*types.Pointer); ok { return p.Elem() } return typ } func isTypeName(obj types.Object) bool { _, ok := obj.(*types.TypeName) return ok } func isFunc(obj types.Object) bool { _, ok := obj.(*types.Func) return ok } func isEmptyInterface(T types.Type) bool { intf, _ := T.(*types.Interface) return intf != nil && intf.NumMethods() == 0 } // isSelector returns the enclosing *ast.SelectorExpr when pos is in the // selector. func enclosingSelector(path []ast.Node, pos token.Pos) *ast.SelectorExpr
// typeConversion returns the type being converted to if call is a type // conversion expression. func typeConversion(call *ast.CallExpr, info *types.Info) types.Type { var ident *ast.Ident switch expr := call.Fun.(type) { case *ast.Ident: ident = expr case *ast.SelectorExpr: ident = expr.Sel default: return nil } // Type conversion (e.g. "float64(foo)"). if fun, _ := info.ObjectOf(ident).(*types.TypeName); fun != nil { return fun.Type() } return nil } func formatParams(tup *types.Tuple, variadic bool, qf types.Qualifier) []string { params := make([]string, 0, tup.Len()) for i := 0; i < tup.Len(); i++ { el := tup.At(i) typ := types.TypeString(el.Type(), qf) // Handle a variadic parameter (can only be the final parameter). if variadic && i == tup.Len()-1 { typ = strings.Replace(typ, "[]", "...", 1) } if el.Name() == "" { params = append(params, typ) } else { params = append(params, el.Name()+" "+typ) } } return params } func formatResults(tup *types.Tuple, qf types.Qualifier) ([]string, bool) { var writeResultParens bool results := make([]string, 0, tup.Len()) for i := 0; i < tup.Len(); i++ { if i >= 1 { writeResultParens = true } el := tup.At(i) typ := types.TypeString(el.Type(), qf) if el.Name() == "" { results = append(results, typ) } else { if i == 0 { writeResultParens = true } results = append(results, el.Name()+" "+typ) } } return results, writeResultParens } // formatType returns the detail and kind for an object of type *types.TypeName. func formatType(typ types.Type, qf types.Qualifier) (detail string, kind protocol.CompletionItemKind) { if types.IsInterface(typ) { detail = "interface{...}" kind = protocol.InterfaceCompletion } else if _, ok := typ.(*types.Struct); ok { detail = "struct{...}" kind = protocol.StructCompletion } else if typ != typ.Underlying() { detail, kind = formatType(typ.Underlying(), qf) } else { detail = types.TypeString(typ, qf) kind = protocol.ClassCompletion } return detail, kind } func formatFunction(params []string, results []string, writeResultParens bool) string { var detail strings.Builder detail.WriteByte('(') for i, p := range params { if i > 0 { detail.WriteString(", ") } detail.WriteString(p) } detail.WriteByte(')') // Add space between parameters and results. if len(results) > 0 { detail.WriteByte(' ') } if writeResultParens { detail.WriteByte('(') } for i, p := range results { if i > 0 { detail.WriteString(", ") } detail.WriteString(p) } if writeResultParens { detail.WriteByte(')') } return detail.String() }
{ if len(path) == 0 { return nil } if sel, ok := path[0].(*ast.SelectorExpr); ok { return sel } if _, ok := path[0].(*ast.Ident); ok && len(path) > 1 { if sel, ok := path[1].(*ast.SelectorExpr); ok && pos >= sel.Sel.Pos() { return sel } } return nil }
index.js
"use strict";
require("../../../output/ButtonsReactHooks.Main/index.js").main();
grpc_cli.go
package test import ( "context" "errors" "fmt" "strings" "time" "github.com/33cn/chain33/common" "github.com/33cn/chain33/types" "github.com/33cn/plugin/plugin/dapp/exchange/executor" et "github.com/33cn/plugin/plugin/dapp/exchange/types" tt "github.com/33cn/plugin/plugin/dapp/token/types" "github.com/golang/protobuf/proto" "google.golang.org/grpc" ) type GRPCCli struct { client types.Chain33Client } func NewGRPCCli(grpcAddr string) *GRPCCli { conn, err := grpc.Dial(grpcAddr, grpc.WithInsecure()) if err != nil { panic(err) } client := types.NewChain33Client(conn) cfg := types.NewChain33Config(types.GetDefaultCfgstring()) cfg.SetTitleOnlyForTest("chain33") executor.Init(et.ExchangeX, cfg, nil) return &GRPCCli{ client: client, } } func (c *GRPCCli) Send(tx *types.Transaction, hexKey string) ([]*types.ReceiptLog, error) { txHash, logs, err := c.sendAndWaitReceipt(tx, hexKey) if txHash != nil { fmt.Println("txHash: ", common.ToHex(txHash)) } if err != nil { return nil, parseError(err) } for _, l := range logs { if l.Ty == types.TyLogErr { return nil, errors.New(string(l.Log)) } } return logs, nil } func (c *GRPCCli) Query(fn string, msg proto.Message) ([]byte, error) { ss := strings.Split(fn, ".") var in types.ChainExecutor if len(ss) == 2 { in.Driver = ss[0] in.FuncName = ss[1] } else { in.Driver = et.ExchangeX in.FuncName = fn } in.Param = types.Encode(msg) r, err := c.client.QueryChain(context.Background(), &in) if err != nil { return nil, err } if !r.IsOk { return nil, errors.New(string(r.Msg)) } return r.Msg, nil } func (c *GRPCCli) GetExecAccount(addr string, exec string, symbol string) (*types.Account, error) { if exec == "coins" { // bty var addrs []string addrs = append(addrs, addr) params := &types.ReqBalance{ Addresses: addrs, Execer: et.ExchangeX, } accs, err := c.client.GetBalance(context.Background(), params) if err != nil { return nil, err } return accs.Acc[0], nil } // token: ccny param := &tt.ReqAccountTokenAssets{ Address: addr, Execer: et.ExchangeX, } msg, err := c.Query("token.GetAccountTokenAssets", param) if err != nil { return nil, err } var resp tt.ReplyAccountTokenAssets err = types.Decode(msg, &resp) if err != nil { return nil, err } for _, v := range resp.TokenAssets { if v.Symbol == symbol { return v.Account, nil } } return nil, types.ErrNotFound } // ๅ‘้€ไบคๆ˜“ๅนถ็ญ‰ๅพ…ๆ‰ง่กŒ็ป“ๆžœ // ๅฆ‚ๆžœไบคๆ˜“้žๆณ•๏ผŒ่ฟ”ๅ›ž้”™่ฏฏไฟกๆฏ // ๅฆ‚ๆžœไบคๆ˜“ๆ‰ง่กŒๆˆๅŠŸ๏ผŒ่ฟ”ๅ›ž ไบคๆ˜“ๅ“ˆๅธŒใ€ๅ›žๆŠฅ func (c *GRPCCli) sendAndWaitReceipt(tx *types.Transaction, hexKey string) (txHash []byte, logs []*types.ReceiptLog, err error) { r, err := c.SendTx(tx, hexKey) if err != nil { // rpc error: code = Unknown desc = ErrNotBank return nil, nil, err } if !r.IsOk { return nil, nil, errors.New(string(r.Msg)) } time.Sleep(time.Second) d, _ := c.client.QueryTransaction(context.Background(), &types.ReqHash{Hash: r.Msg}) return r.Msg, d.Receipt.Logs, nil } func (c *GRPCCli) SendTx(tx *types.Transaction, hexKey string) (reply *types.Reply, err error) { cfg := types.NewChain33Config(types.GetDefaultCfgstring()) cfg.SetTitleOnlyForTest("chain33") tx, err = types.FormatTx(cfg, et.ExchangeX, tx) if err != nil { return nil, err } tx, err = signTx(tx, hexKey) if err != nil { return nil, err } return c.client.SendTransaction(context.Background(), tx) } func parseError(err error) error { // rpc error: code = Unknown desc = ErrNotBank str :
r() sep := "desc = " i := strings.Index(str, sep) if i != -1 { return errors.New(str[i+len(sep):]) } return err }
= err.Erro
patients.services.ts
import CustomStore from 'devextreme/data/custom_store'; import { AppSettings } from '../app.config'; import { HttpClient } from '@angular/common/http'; /** * Patients Service */ @Injectable() export class PatientsService { store: CustomStore; constructor(private http: HttpClient) { this.setupStore(); } /** * Setup the store with the http methods */ setupStore() { const api = AppSettings.API + '/Patients' const http = this.http; this.store = new CustomStore({ insert: (item): Promise<any> => { return http .post(api, item) .toPromise() .then(response => { return response; }) .catch(error => { throw error._body; }); }, load: (loadOptions): Promise<any> => { let params = ''; if (loadOptions.skip) { params += 'skip=' + loadOptions.skip; } if (loadOptions.take) { params += '&take=' + loadOptions.take; } if (loadOptions.filter) { params += '&$filter=' + loadOptions.filter; } if (loadOptions.sort) { params += '&orderby=' + loadOptions.sort[0].selector; if (loadOptions.sort[0].desc) { params += ' desc'; } } let query = ''; if (params.length > 0) { query = '?' + params; } return http.get<any[]>(api + query) .toPromise() .then(response => { return { data: response, totalCount: response.length } }) .catch(error => { throw new Error('Data Loading Error') }); }, update: (entity, updatedValues): Promise<any> => { return http.put(api + '/' + encodeURIComponent(entity.id), {...entity, ...updatedValues}) .toPromise() .then(response => { return { data: response } }) .catch(error => { throw new Error('Data Update Error') }); }, remove: (key): Promise<any> => { return http.delete(api + '/' + encodeURIComponent(key.id)) .toPromise() .then(response => { return { data: response } }) .catch(error => { console.log(error); throw new Error('Data Update Error') }); } }); } }
import { Injectable } from '@angular/core';
lib.rs
#![doc(html_root_url = "https://docs.rs/nilsimsa/0.1.0")] #![feature(test)] //! Implementation of the Nilsimsa locality-sensitive hashing algorithm. //! //! Compared to "traditional" hash functions (cryptographic or not), a small modification to the input does not //! substantially change the resulting hash. This crate contains the [Nilsimsa](Nilsimsa) utility to calculate Nilsimsa //! hash digests, as well as a [compare](compare) function for given digests. //! //! ```rust //! # use nilsimsa::*; //! # fn main() { //! let mut hasher = Nilsimsa::new(); //! hasher.update("test string"); //! let digest = hasher.digest(); //! assert_eq!( //! digest, //! "42c82c184080082040001004000000084e1043b0c0925829003e84c860410010" //! ); //! # } //! ``` const TRAN: [u8; 256] = [ 0x02, 0xd6, 0x9e, 0x6f, 0xf9, 0x1d, 0x04, 0xab, 0xd0, 0x22, 0x16, 0x1f, 0xd8, 0x73, 0xa1, 0xac, 0x3b, 0x70, 0x62, 0x96, 0x1e, 0x6e, 0x8f, 0x39, 0x9d, 0x05, 0x14, 0x4a, 0xa6, 0xbe, 0xae, 0x0e, 0xcf, 0xb9, 0x9c, 0x9a, 0xc7, 0x68, 0x13, 0xe1, 0x2d, 0xa4, 0xeb, 0x51, 0x8d, 0x64, 0x6b, 0x50, 0x23, 0x80, 0x03, 0x41, 0xec, 0xbb, 0x71, 0xcc, 0x7a, 0x86, 0x7f, 0x98, 0xf2, 0x36, 0x5e, 0xee, 0x8e, 0xce, 0x4f, 0xb8, 0x32, 0xb6, 0x5f, 0x59, 0xdc, 0x1b, 0x31, 0x4c, 0x7b, 0xf0, 0x63, 0x01, 0x6c, 0xba, 0x07, 0xe8, 0x12, 0x77, 0x49, 0x3c, 0xda, 0x46, 0xfe, 0x2f, 0x79, 0x1c, 0x9b, 0x30, 0xe3, 0x00, 0x06, 0x7e, 0x2e, 0x0f, 0x38, 0x33, 0x21, 0xad, 0xa5, 0x54, 0xca, 0xa7, 0x29, 0xfc, 0x5a, 0x47, 0x69, 0x7d, 0xc5, 0x95, 0xb5, 0xf4, 0x0b, 0x90, 0xa3, 0x81, 0x6d, 0x25, 0x55, 0x35, 0xf5, 0x75, 0x74, 0x0a, 0x26, 0xbf, 0x19, 0x5c, 0x1a, 0xc6, 0xff, 0x99, 0x5d, 0x84, 0xaa, 0x66, 0x3e, 0xaf, 0x78, 0xb3, 0x20, 0x43, 0xc1, 0xed, 0x24, 0xea, 0xe6, 0x3f, 0x18, 0xf3, 0xa0, 0x42, 0x57, 0x08, 0x53, 0x60, 0xc3, 0xc0, 0x83, 0x40, 0x82, 0xd7, 0x09, 0xbd, 0x44, 0x2a, 0x67, 0xa8, 0x93, 0xe0, 0xc2, 0x56, 0x9f, 0xd9, 0xdd, 0x85, 0x15, 0xb4, 0x8a, 0x27, 0x28, 0x92, 0x76, 0xde, 0xef, 0xf8, 0xb2, 0xb7, 0xc9, 0x3d, 0x45, 0x94, 0x4b, 0x11, 0x0d, 0x65, 0xd5, 0x34, 0x8b, 0x91, 0x0c, 0xfa, 0x87, 0xe9, 0x7c, 0x5b, 0xb1, 0x4d, 0xe5, 0xd4, 0xcb, 0x10, 0xa2, 0x17, 0x89, 0xbc, 0xdb, 0xb0, 0xe2, 0x97, 0x88, 0x52, 0xf7, 0x48, 0xd3, 0x61, 0x2c, 0x3a, 0x2b, 0xd1, 0x8c, 0xfb, 0xf1, 0xcd, 0xe4, 0x6a, 0xe7, 0xa9, 0xfd, 0xc4, 0x37, 0xc8, 0xd2, 0xf6, 0xdf, 0x58, 0x72, 0x4e, ]; const POPC: [i16; 256] = [ 0x00, 0x01, 0x01, 0x02, 0x01, 0x02, 0x02, 0x03, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x01, 0x02, 0x02, 0x03, 0x02, 0x03, 0x03, 0x04, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x02, 0x03, 0x03, 0x04, 0x03, 0x04, 0x04, 0x05, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x03, 0x04, 0x04, 0x05, 0x04, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x04, 0x05, 0x05, 0x06, 0x05, 0x06, 0x06, 0x07, 0x05, 0x06, 0x06, 0x07, 0x06, 0x07, 0x07, 0x08, ]; /// Utility to calculate Nilsimsa hash digests for arbitrarily long string inputs. See the crate-level documentation for /// an example of use. #[derive(Debug, Clone)] pub struct Nilsimsa { num_char: usize, acc: Vec<u8>, window: Vec<u8>, } impl Default for Nilsimsa { fn default() -> Self { Self { num_char: 0, acc: vec![0; 256], window: Vec::new(), } } } impl Nilsimsa { /// Returns a new Nilsimsa hash digest utility. pub fn new() -> Self { Default::default() } /// Updates the digest with a given string. pub fn update(&mut self, s: &str) { for c in s.bytes() { self.num_char += 1; let window_len = self.window.len(); if window_len > 1 { self.acc[tran_hash(c, self.window[0], self.window[1], 0) as usize] += 1; } if window_len > 2 { self.acc[tran_hash(c, self.window[0], self.window[2], 1) as usize] += 1; self.acc[tran_hash(c, self.window[1], self.window[2], 2) as usize] += 1; } if window_len > 3 { self.acc[tran_hash(c, self.window[0], self.window[3], 3) as usize] += 1; self.acc[tran_hash(c, self.window[1], self.window[3], 4) as usize] += 1; self.acc[tran_hash(c, self.window[2], self.window[3], 5) as usize] += 1; self.acc[tran_hash(self.window[3], self.window[0], c, 6) as usize] += 1; self.acc[tran_hash(self.window[3], self.window[2], c, 7) as usize] += 1; } self.window.insert(0, c); if self.window.len() > 4 { self.window.remove(4); } } } /// Finalise and consume the digest and return the computed Nilsimsa hash digest as a hex string. pub fn digest(self) -> String { let num_trigrams = match self.num_char { 0..=2 => 0, 3 => 1, 4 => 4, n => 8 * n - 28, }; let threshold = num_trigrams / 256; let mut digest = [0u8; 32]; for i in 0..256 { if self.acc[i] as usize > threshold { digest[i >> 3] += 1 << (i & 7); } } digest.reverse(); hex::encode(digest) } } /// Compare two hex digests with a Hamming distance calculation. Returns an `i16` in the range `[-128, 128]` /// representing the similarity of the two input digests, where -128 is most dissimilar and 128 is most similar, or /// equal. The input strings must be of the same length. /// /// ```rust /// # use nilsimsa::*; /// # fn main() { /// let similar = compare( /// "42c82c184080082040001004000000084e1043b0c0925829003e84c860410010", /// "00480cba20810802408000000400000a481091b088b21e21003e840a20011016", /// ); /// assert_eq!(similar, 90); /// /// let very_dissimilar = compare( /// "51613b08c286b8054e09847c51928935289e623b63308db6b1606b0883804264", /// "1db4dd17fb93907f2dbb52a5d7dddc268f15545be7da0f75efcb0f9df7cc65b3", /// ); /// assert_eq!(very_dissimilar, 1); /// # } /// ``` pub fn compare(digest_a: &str, digest_b: &str) -> i16 { assert!(digest_a.len() == digest_b.len()); let hex_a = hex::decode(digest_a).expect("failed to decode digest A into hex"); let hex_b = hex::decode(digest_b).expect("failed to decode digest B into hex"); let mut bits = 0; for (a, b) in hex_a.into_iter().zip(hex_b) { bits += POPC[(a ^ b) as usize]; } 128 - bits } fn tran_hash(a: u8, b: u8, c: u8, n: u8) -> u8 { (TRAN[(a.wrapping_add(n)) as usize] ^ (TRAN[b as usize].wrapping_mul(n.wrapping_add(n).wrapping_add(1)))) .wrapping_add(TRAN[(c ^ TRAN[n as usize]) as usize]) } #[cfg(test)] mod tests { extern crate test; use super::*; use test::Bencher; #[bench] fn expected_output(b: &mut Bencher) { b.iter(|| { let mut hash = Nilsimsa::default(); hash.update("test string"); let output = hash.digest(); assert_eq!( output, "42c82c184080082040001004000000084e1043b0c0925829003e84c860410010" ); }) } #[bench] fn compare_equal(b: &mut Bencher)
#[bench] fn compare_almost_equal(b: &mut Bencher) { b.iter(|| { // input: test string let hash_a = String::from("42c82c184080082040001004000000084e1043b0c0925829003e84c860410010"); // input: best strong let hash_b = String::from("00480cba20810802408000000400000a481091b088b21e21003e840a20011016"); assert_eq!(compare(&hash_a, &hash_b), 90); }); } #[bench] fn compare_very_dissimilar(b: &mut Bencher) { b.iter(|| { let hash_a = String::from("0000000000000000000000000000000000000000000000000000000000000000"); let hash_b = String::from("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); assert_eq!(compare(&hash_a, &hash_b), -128); }); } #[test] fn compare_zero_with_zero() { let hash_a = "0000000000000000000000000000000000000000000000000000000000000000"; let hash_b = "0000000000000000000000000000000000000000000000000000000000000000"; assert_eq!(compare(&hash_a, &hash_b), 128); } #[test] fn compare_zero_with_nonzero() { let hash_a = "0000000000000000000000000000000000000000000000000000000000000000"; let hash_b = "6402a0021082c8320943c018f2003023ad0820205844ba30813d00dc0620d18c"; assert_eq!(compare(&hash_a, &hash_b), 51); } #[test] fn compare_known_crash() { let hash_a = "6d2bbcd2b1dbf71af96fd19bfa34a0a3ff69b8fc7c50ba1e7ffd8e3e76b2e7da"; let hash_b = "5c10c0c61f96920d094a6d8575316dd007330b82fb6c434f7034c008c3d4f8a9"; assert_eq!(compare(&hash_a, &hash_b), -9); } #[bench] fn long_string(b: &mut Bencher) { b.iter(|| { let mut hash = Nilsimsa::default(); hash.update( "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse dictum odio id massa rhoncus, \ nec congue ante hendrerit. Donec elementum sollicitudin arcu, ut ultricies libero ultrices sed. \ Phasellus hendrerit urna quis tellus porta, pharetra congue risus elementum. Vivamus finibus \ malesuada mollis. Nulla mollis sit amet est ac commodo. Integer ac lacus in tellus condimentum \ tempus. Quisque sed ligula eget felis lobortis tempor nec vel neque. Etiam nisi urna, malesuada at \ rhoncus et, pharetra in ligula. Pellentesque venenatis efficitur magna vel consequat. Duis a \ sollicitudin mi. Pellentesque rutrum placerat consequat. Ut tristique, neque in dignissim aliquet, \ enim est luctus nisi, nec mollis lacus risus eu quam. Suspendisse potenti. Mauris pellentesque purus \ et neque vehicula, nec tempor purus ornare. Mauris pharetra turpis vel nulla ultrices, non imperdiet \ ante egestas. Sed rhoncus dolor non maximus gravida. Nam tristique ante sit amet consectetur \ tincidunt. Ut vitae scelerisque neque. Nulla nec tristique mauris. Mauris elementum turpis at purus \ venenatis congue. Donec pellentesque congue arcu, ac suscipit massa aliquet quis. Aenean tincidunt \ tempor ultrices. Sed vel ultrices magna. Etiam viverra accumsan neque, id gravida justo egestas \ vitae. Aliquam et libero magna. Etiam eu semper elit, ut eleifend orci. Curabitur volutpat suscipit \ tincidunt. Suspendisse id molestie enim. Sed vitae vehicula tellus, et pulvinar risus. Curabitur \ ornare vel ligula sed pulvinar. Praesent faucibus erat massa, ac pulvinar lacus faucibus sed. Sed \ hendrerit nec arcu sit amet luctus. Donec mollis ligula lacus, eget mollis augue dictum eget. Donec \ vitae dui vel ligula pellentesque pulvinar a pulvinar nulla. Nam nec nulla quam. Morbi vel sodales \ nisi. Proin vitae mattis dui, id accumsan lacus. Nullam rhoncus fermentum nunc at tempus. In hac \ habitasse platea dictumst. Curabitur vel molestie augue.Nam et elementum risus. Sed in turpis non \ augue tempus dictum. Duis eu arcu eu tortor mollis blandit. Nam feugiat felis eu varius scelerisque. \ Donec venenatis, ex sit amet fermentum fringilla, lorem tellus dictum turpis, sit amet tristique \ quam nunc at lorem. Nam tincidunt leo non vulputate feugiat. Pellentesque ut porttitor massa. \ Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. \ Integer bibendum diam sed turpis hendrerit sodales. Ut hendrerit auctor enim, volutpat bibendum \ risus dapibus in.", ); let output = hash.digest(); assert_eq!( output, "9b8c8a910218eb47d0f283c5ac948ba12c0ba8112513eae8291befdca3f4e066" ); }) } #[test] fn short_strings() { let mut hash = Nilsimsa::default(); hash.update("a"); let output = hash.digest(); assert_eq!( output, "0000000000000000000000000000000000000000000000000000000000000000" ); let mut hash = Nilsimsa::default(); hash.update("aa"); let output = hash.digest(); assert_eq!( output, "0000000000000000000000000000000000000000000000000000000000000000" ); let mut hash = Nilsimsa::default(); hash.update("aaa"); let output = hash.digest(); assert_eq!( output, "0000000000000000000000000000000040000000000000000000000000000000" ); } }
{ b.iter(|| { let hash_a = String::from("42c82c184080082040001004000000084e1043b0c0925829003e84c860410010"); let hash_b = hash_a.clone(); assert_eq!(compare(&hash_a, &hash_b), 128); }); }
decode_test.go
package structpbenc_test import ( "fmt" "testing" structpb "github.com/karantin2020/structpbenc" "github.com/stretchr/testify/assert" pb "google.golang.org/protobuf/types/known/structpb" ) func ExampleDecode()
func TestDecode(t *testing.T) { tt := []struct { input *pb.Struct want map[string]interface{} }{ { input: &pb.Struct{}, want: nil, }, { input: &pb.Struct{ Fields: map[string]*pb.Value{ "null": {Kind: &pb.Value_NullValue{}}, "number": {Kind: &pb.Value_NumberValue{NumberValue: 10}}, "str": {Kind: &pb.Value_StringValue{StringValue: "str"}}, "bool": {Kind: &pb.Value_BoolValue{BoolValue: true}}, "struct": {Kind: &pb.Value_StructValue{StructValue: &pb.Struct{ Fields: map[string]*pb.Value{ "nested": {Kind: &pb.Value_StringValue{StringValue: "nested"}}, }, }}}, "slice": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ Values: []*pb.Value{ {Kind: &pb.Value_StringValue{StringValue: "one"}}, {Kind: &pb.Value_StringValue{StringValue: "two"}}, }, }}}, }, }, want: map[string]interface{}{ "null": nil, "number": float64(10), "str": "str", "bool": true, "struct": map[string]interface{}{ "nested": "nested", }, "slice": []interface{}{"one", "two"}, }, }, } for i, te := range tt { t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { got := structpb.Decode(te.input) assert.Equal(t, te.want, got) }) } }
{ src := &pb.Struct{ Fields: map[string]*pb.Value{ "null": {Kind: &pb.Value_NullValue{}}, "number": {Kind: &pb.Value_NumberValue{NumberValue: 10}}, "str": {Kind: &pb.Value_StringValue{StringValue: "str"}}, "bool": {Kind: &pb.Value_BoolValue{BoolValue: true}}, "struct": {Kind: &pb.Value_StructValue{StructValue: &pb.Struct{ Fields: map[string]*pb.Value{ "nested": {Kind: &pb.Value_StringValue{StringValue: "nested"}}, }, }}}, "slice": {Kind: &pb.Value_ListValue{ListValue: &pb.ListValue{ Values: []*pb.Value{ {Kind: &pb.Value_StringValue{StringValue: "one"}}, {Kind: &pb.Value_StringValue{StringValue: "two"}}, }, }}}, }, } dest := structpb.Decode(src) fmt.Println( dest["null"], dest["number"], dest["str"], dest["bool"], ) if nested, ok := dest["struct"].(map[string]interface{}); ok { fmt.Println(nested["nested"]) } if slice, ok := dest["slice"].([]interface{}); ok { fmt.Println(slice[0], slice[1]) } // Output: // <nil> 10 str true // nested // one two }
density_solver.py
from __future__ import division, print_function, absolute_import import numpy as np from scipy.optimize import minimize_scalar, brentq from ..constants import Na def dPsaft_fun(rho, x, temp_aux, saft): rhomolecular = Na * rho global Xass da, Xass = saft.d2afcn_drho_aux(x, rhomolecular, temp_aux, Xass) afcn, dafcn, d2afcn = da dPsaft = 2 * rhomolecular * dafcn + rhomolecular**2 * d2afcn return dPsaft def Psaft_obj(rho, x, temp_aux, saft, Pspec): rhomolecular = Na * rho global Xass da, Xass = saft.dafcn_drho_aux(x, rhomolecular, temp_aux, Xass) afcn, dafcn = da Psaft = rhomolecular**2 * dafcn / Na return Psaft - Pspec def density_newton_lim(rho_a, rho_b, x, temp_aux, P, Xass0, saft): rho = (rho_a + rho_b) / 2 Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0) for i in range(15): rho_old = rho FO = Psaft - P dFO = dPsaft drho = FO/dFO rho_new = rho - drho if FO > 0: rho_b = rho else: rho_a = rho if rho_a < rho_new < rho_b: rho = rho_new else: rho = (rho_a + rho_b) / 2 if np.abs(rho - rho_old) < 1e-6: break Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass) return rho, Xass def density_topliss(state, x, temp_aux, P, Xass0, saft): if state != 'L' and state != 'V': raise Warning("Not valid state. 'L' for liquid and 'V' for vapor.") beta = temp_aux[0] # lower boundary a zero density rho_lb = 1e-5 dP_lb = Na / beta # Upper boundary limit at infinity pressure etamax = 0.7405 rho_lim = (6 * etamax) / np.dot(x, (saft.ms * np.pi * saft.sigma**3)) / Na ub_sucess = False rho_ub = 0.4 * rho_lim it = 0 P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass0) while not ub_sucess and it < 5: it += 1 P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass_ub) rho_ub += 0.15 * rho_lim ub_sucess = P_ub > P and dP_ub > 0 # Derivative calculation at zero density rho_lb1 = 1e-4 * rho_lim P_lb1, dP_lb1, Xass_lb = saft.dP_drho_aux(x, rho_lb1, temp_aux, Xass0) d2P_lb1 = (dP_lb1 - dP_lb) / rho_lb1 if d2P_lb1 > 0: flag = 3 else: flag = 1 global Xass Xass = Xass0 # Stage 1 bracket = [rho_lb, rho_ub] if flag == 1: # Found inflexion point sol_inf = minimize_scalar(dPsaft_fun, args=(x, temp_aux, saft), bounds=bracket, method='Bounded', options={'xatol': 1e-1}) rho_inf = sol_inf.x dP_inf = sol_inf.fun if dP_inf > 0: flag = 3 else: flag = 2 # Stage 2 if flag == 2: if state == 'L': bracket[0] = rho_inf elif state == 'V': bracket[1] = rho_inf rho_ext = brentq(dPsaft_fun, bracket[0], bracket[1], args=(x, temp_aux, saft), xtol=1e-2) P_ext, dP_ext, Xass = saft.dP_drho_aux(x, rho_ext, temp_aux, Xass) if P_ext > P and state == 'V': bracket[1] = rho_ext elif P_ext < P and state == 'L': bracket[0] = rho_ext else: flag = -1 if flag == -1: rho = np.nan else: rho, Xass = density_newton_lim(bracket[0], bracket[1], x, temp_aux, P, Xass, saft) # rho = brentq(Psaft_obj, bracket[0], bracket[1], # args=(x, temp_aux, saft, P)) return rho, Xass def density_newton(rho0, x, temp_aux, P, Xass0, saft):
rho = 1.*rho0 Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0) for i in range(15): FO = Psaft - P dFO = dPsaft drho = FO/dFO rho -= drho if np.abs(drho) < 1e-6: break Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass) return rho, Xass
utils.py
import matplotlib matplotlib.use("Agg") import matplotlib.pylab as plt from math import ceil import numpy as np import argparse from functools import partial import os from keras.models import Model, Sequential from keras.layers import Input, Dense, Reshape, Flatten from keras.layers.merge import _Merge from keras.layers.convolutional import Convolution2D, Conv2DTranspose from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import LeakyReLU from keras.optimizers import Adam, RMSprop from keras.datasets import mnist from keras import backend as K from keras.datasets import cifar10 def get_data(): # load cifar10 data (X_train, y_train), (X_test, y_test) = cifar10.load_data() # convert train and test data to float32 X_train = X_train.astype(np.float32) X_test = X_test.astype(np.float32) # scale train and test data to [-1, 1] X_train = (X_train / 255) * 2 - 1 X_test = (X_train / 255) * 2 - 1 return X_train, X_test def plot_images(images, filename): # scale images to [0.0, 1.0] images = (images + 1) / 2 h, w, c = images.shape[1:] grid_size = ceil(np.sqrt(images.shape[0])) images = (images.reshape(grid_size, grid_size, h, w, c) .transpose(0, 2, 1, 3, 4) .reshape(grid_size*h, grid_size*w, c)) plt.figure(figsize=(16, 16)) plt.imsave(filename, images)
def plot_losses(losses_d, losses_g, filename): losses_d = np.array(losses_d) fig, axes = plt.subplots(2, 2, figsize=(8, 8)) axes = axes.flatten() axes[0].plot(losses_d[:, 0]) axes[1].plot(losses_d[:, 1]) axes[2].plot(losses_d[:, 2]) axes[3].plot(losses_g) axes[0].set_title("losses_d") axes[1].set_title("losses_d_real") axes[2].set_title("losses_d_fake") axes[3].set_title("losses_g") plt.tight_layout() plt.savefig(filename) plt.close()
plt.close('all')
file_access_event_delete_responses.go
// Code generated by go-swagger; DO NOT EDIT. package n_a_s // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" "github.com/netapp/trident/storage_drivers/ontap/api/rest/models" ) // FileAccessEventDeleteReader is a Reader for the FileAccessEventDelete structure. type FileAccessEventDeleteReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *FileAccessEventDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewFileAccessEventDeleteOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: result := NewFileAccessEventDeleteDefault(response.Code()) if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } if response.Code()/100 == 2 { return result, nil } return nil, result } } // NewFileAccessEventDeleteOK creates a FileAccessEventDeleteOK with default headers values func NewFileAccessEventDeleteOK() *FileAccessEventDeleteOK { return &FileAccessEventDeleteOK{} } /* FileAccessEventDeleteOK describes a response with status code 200, with default header values. OK */ type FileAccessEventDeleteOK struct { } func (o *FileAccessEventDeleteOK) Error() string { return fmt.Sprintf("[DELETE /protocols/file-access-tracing/events/{node.uuid}/{svm.uuid}/{index}][%d] fileAccessEventDeleteOK ", 200) } func (o *FileAccessEventDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { return nil } // NewFileAccessEventDeleteDefault creates a FileAccessEventDeleteDefault with default headers values func NewFileAccessEventDeleteDefault(code int) *FileAccessEventDeleteDefault { return &FileAccessEventDeleteDefault{
_statusCode: code, } } /* FileAccessEventDeleteDefault describes a response with status code -1, with default header values. Error */ type FileAccessEventDeleteDefault struct { _statusCode int Payload *models.ErrorResponse } // Code gets the status code for the file access event delete default response func (o *FileAccessEventDeleteDefault) Code() int { return o._statusCode } func (o *FileAccessEventDeleteDefault) Error() string { return fmt.Sprintf("[DELETE /protocols/file-access-tracing/events/{node.uuid}/{svm.uuid}/{index}][%d] file_access_event_delete default %+v", o._statusCode, o.Payload) } func (o *FileAccessEventDeleteDefault) GetPayload() *models.ErrorResponse { return o.Payload } func (o *FileAccessEventDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(models.ErrorResponse) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil }
splitter.py
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Dict import re import string from jina.hub.crafters.nlp.Sentencizer import Sentencizer import pickle # class Splitter(Sentencizer): # count = 0 # separator = "|" # # def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # # def craft(self, text: str, *args, **kwargs) -> Dict: # print('================== test2') # return dict(text=text, meta_info=text[:5].encode("utf-8")) class SentenceSplitter(Sentencizer): count = 0 separator = "|" def
(self, *args, **kwargs): super().__init__(*args, **kwargs) def craft(self, text: str, *args, **kwargs) -> Dict: results = [] ret = text with open("tokenizer/eng_sentence_tokenizer.pkl", 'rb') as f: sent_tokenizer = pickle.load(f) for ci, (s, e) in enumerate(sent_tokenizer.span_tokenize(ret)): f = ret[s:e] f = f[:self.max_sent_len] if len(f) > self.min_sent_len: results.append(dict( text=f, offset=ci, weight=1.0 if self.uniform_weight else len(f) / len(text), location=[s, e], meta_info='testID'.encode("utf-8") )) return results
__init__
user.go
package cmd import ( "fmt" context "golang.org/x/net/context" "github.com/luizalabs/teresa/pkg/client" "github.com/luizalabs/teresa/pkg/client/connection" "github.com/spf13/cobra" userpb "github.com/luizalabs/teresa/pkg/protobuf/user" ) // userCmd represents the user command var userCmd = &cobra.Command{ Use: "user", Short: "Create a user", Long: `Create a user. Note that the user's password must be at least 8 characters long. eg.: $ teresa create user --email [email protected] --name john --password foobarfoo `, Run: createUser, } // delete user var deleteUserCmd = &cobra.Command{ Use: "user", Short: "Delete an user", Long: `Delete an user.`, Run: deleteUser, } // set password for an user var setUserPasswordCmd = &cobra.Command{ Use: "set-password", Short: "Set password for an user", Long: `Set password for an user. To set password for another user (needs admin): $ teresa set-password --user [email protected]`, Run: setPassword, } func setPassword(cmd *cobra.Command, args []string) { p, err := client.GetMaskedPassword("New Password: ") if err != nil { client.PrintErrorAndExit("Error trying to get the user password: %v", err) } if err = client.EnsurePasswordLength(p); err != nil { client.PrintErrorAndExit(err.Error()) } user, err := cmd.Flags().GetString("user") if err != nil { client.PrintErrorAndExit("Invalid user parameter: %v", err) } conn, err := connection.New(cfgFile, cfgCluster) if err != nil { client.PrintErrorAndExit("Error connecting to server: %v", err) } defer conn.Close() cli := userpb.NewUserClient(conn) spr := &userpb.SetPasswordRequest{ Password: p, User: user, } if _, err := cli.SetPassword(context.Background(), spr); err != nil { client.PrintErrorAndExit(client.GetErrorMsg(err)) } fmt.Println("Password updated") } func deleteUser(cmd *cobra.Command, args []string) { email, _ := cmd.Flags().GetString("email") if email == "" { cmd.Usage() return } conn, err := connection.New(cfgFile, cfgCluster) if err != nil { client.PrintErrorAndExit("Error connecting to server: %v", err) } defer conn.Close() cli := userpb.NewUserClient(conn) _, err = cli.Delete( context.Background(), &userpb.DeleteRequest{Email: email}, ) if err != nil { client.PrintErrorAndExit(client.GetErrorMsg(err)) } fmt.Println("User deleted") } func createUser(cmd *cobra.Command, args []string) { name, err := cmd.Flags().GetString("name") if err != nil { client.PrintErrorAndExit("Invalid user parameter: %v", err) } email, err := cmd.Flags().GetString("email") if err != nil { client.PrintErrorAndExit("Invalid email parameter: %v", err) } pass, err := cmd.Flags().GetString("password") if err != nil { client.PrintErrorAndExit("Invalid password parameter: %v", err) } if email == "" || name == "" || pass == "" { cmd.Usage() return } conn, err := connection.New(cfgFile, cfgCluster) if err != nil { client.PrintErrorAndExit("Error connecting to server: %v", err) } defer conn.Close() cli := userpb.NewUserClient(conn) _, err = cli.Create( context.Background(), &userpb.CreateRequest{ Name: name, Email: email, Password: pass, Admin: false, }, ) if err != nil { client.PrintErrorAndExit(client.GetErrorMsg(err)) } fmt.Println("User created") }
createCmd.AddCommand(userCmd) userCmd.Flags().String("name", "", "user name [required]") userCmd.Flags().String("email", "", "user email [required]") userCmd.Flags().String("password", "", "user password [required]") deleteCmd.AddCommand(deleteUserCmd) deleteUserCmd.Flags().String("email", "", "user email [required]") RootCmd.AddCommand(setUserPasswordCmd) setUserPasswordCmd.Flags().String("user", "", "user to set the password, if not provided will set the current user password") }
func init() {
main.py
from Utilities import MENU, resources from art import logo print(logo) shop_open_and_ingredients_available = True pay = 0 Water = resources["water"] Milk = resources["milk"] Coffee = resources["coffee"] espresso_water = MENU["espresso"]["ingredients"]["water"] espresso_coffee = MENU["espresso"]["ingredients"]["coffee"] latte_water = MENU["latte"]["ingredients"]["water"] latte_coffee = MENU["latte"]["ingredients"]["coffee"] latte_milk = MENU["latte"]["ingredients"]["milk"] cappuccino_water = MENU["cappuccino"]["ingredients"]["water"] cappuccino_coffee = MENU["cappuccino"]["ingredients"]["coffee"] cappuccino_milk = MENU["cappuccino"]["ingredients"]["milk"] def
(): print(f"Water left : {Water}") print(f"Milk left : {Milk}") print(f"Coffee left : {Coffee}") print(f"Total money collected: {pay}") # Shut Down Machine when OFF is called def make_coffee(): global Water, Coffee, Milk, shop_open_and_ingredients_available, pay choice = input("What would you like to have? (espresso Rs.25/latte Rs.35/cappuccino Rs.50): ") if "report" in choice: report() elif "off" in choice: shop_open_and_ingredients_available = False print("SYSTEM IS CLOSED FOR REPAIR.") elif "espresso" in choice: money = int(input("Enter the money for the drink of your choice")) if money < MENU['espresso']['cost']: print(f"Money insufficient. Here's your refund of RS.{money}") elif Water >= espresso_water and Coffee >= espresso_coffee: print("Here is your Espresso. Thank You!") print(f"Here's your change of RS.{money - MENU['espresso']['cost']}") Water -= espresso_water Coffee -= espresso_coffee pay += 25 elif Water < espresso_water and espresso_coffee: print("Sorry, Water is over") elif Water > espresso_water and espresso_coffee: print("Sorry, Coffee is over") elif Water < espresso_water and espresso_coffee: print("Water and Coffee are over") elif Water < espresso_water: print("Sorry, Water Shortage") elif Coffee < espresso_coffee: print("Sorry, Coffee Shortage") else: print("Sorry, We are currently facing some technical issues") elif "latte" in choice: money = int(input("Enter the money for the drink of your choice")) if money < MENU['latte']['cost']: print(f"Money insufficient. Here's your refund of RS.{money}") if Water >= latte_water and Coffee >= latte_coffee and Milk >= latte_milk: print("Here is your Latte. Thank You!") print(f"Here's your change of RS.{money - MENU['latte']['cost']}") Water -= latte_water Coffee -= latte_coffee Milk -= latte_milk pay += 35 elif Water < latte_water and Coffee > latte_coffee and Milk > latte_milk: print("Sorry, Water is over") elif Water > latte_water and Coffee < latte_coffee and Milk > latte_milk: print("Sorry, Coffee is over") elif Water > latte_water and Coffee > latte_coffee and Milk < latte_milk: print("Sorry, Milk is over") elif Water < latte_water and Coffee < latte_coffee and Milk < latte_milk: print("Water, Coffee and Milk are over") elif Water < latte_water: print("Sorry, Water Shortage") elif Coffee < latte_coffee: print("Sorry, Coffee Shortage") elif Milk < latte_milk: print("Sorry, Milk shortage") else: print("Sorry, We are currently facing some technical issues") elif "cappuccino" in choice: money = int(input("Enter the money for the drink of your choice")) if money < MENU['cappuccino']['cost']: print(f"Money insufficient. Here's your refund of RS.{money}") if Water >= cappuccino_water and Coffee >= cappuccino_coffee and Milk >= cappuccino_milk: print("Here is your cappuccino. Thank You!") print(f"Here's your change of RS.{money - MENU['cappuccino']['cost']}") Water -= cappuccino_water Coffee -= cappuccino_coffee Milk -= cappuccino_milk pay += 50 elif Water < cappuccino_water and Coffee > cappuccino_coffee and Milk > cappuccino_milk: print("Sorry, Water is over") elif Water > cappuccino_water and Coffee < cappuccino_coffee and Milk > cappuccino_milk: print("Sorry, Coffee is over") elif Water > cappuccino_water and Coffee > cappuccino_coffee and Milk < cappuccino_milk: print("Sorry, Milk is over") elif Water < cappuccino_water and Coffee < cappuccino_coffee and Milk < cappuccino_milk: print("Water, Coffee and Milk are over") elif Water < cappuccino_water: print("Sorry, Water Shortage") elif Coffee < cappuccino_coffee: print("Sorry, Coffee Shortage") elif Milk < cappuccino_milk: print("Sorry, Milk shortage") else: print("Sorry, We are currently facing some technical issues") while shop_open_and_ingredients_available: make_coffee()
report
mod.rs
//! This module defines a load-balanced pool of services that adds new services when load is high. //! //! The pool uses `poll_ready` as a signal indicating whether additional services should be spawned //! to handle the current level of load. Specifically, every time `poll_ready` on the inner service //! returns `Ready`, [`Pool`] consider that a 0, and every time it returns `Pending`, [`Pool`] //! considers it a 1. [`Pool`] then maintains an [exponential moving //! average](https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) over those //! samples, which gives an estimate of how often the underlying service has been ready when it was //! needed "recently" (see [`Builder::urgency`]). If the service is loaded (see //! [`Builder::loaded_above`]), a new service is created and added to the underlying [`Balance`]. //! If the service is underutilized (see [`Builder::underutilized_below`]) and there are two or //! more services, then the latest added service is removed. In either case, the load estimate is //! reset to its initial value (see [`Builder::initial`] to prevent services from being rapidly //! added or removed. #![deny(missing_docs)] use super::p2c::Balance; use crate::discover::Change; use crate::load::Load; use crate::make::MakeService; use futures_core::{ready, Stream}; use pin_project::pin_project; use slab::Slab; use std::{ fmt, future::Future, pin::Pin, task::{Context, Poll}, }; use tower_service::Service; #[cfg(test)] mod test; #[derive(Debug, Clone, Copy, Eq, PartialEq)] enum Level { /// Load is low -- remove a service instance. Low, /// Load is normal -- keep the service set as it is. Normal, /// Load is high -- add another service instance. High, } /// A wrapper around `MakeService` that discovers a new service when load is high, and removes a /// service when load is low. See [`Pool`]. #[pin_project] pub struct PoolDiscoverer<MS, Target, Request> where MS: MakeService<Target, Request>, { maker: MS, #[pin] making: Option<MS::Future>, target: Target, load: Level, services: Slab<()>, died_tx: tokio::sync::mpsc::UnboundedSender<usize>, #[pin] died_rx: tokio::sync::mpsc::UnboundedReceiver<usize>, limit: Option<usize>, } impl<MS, Target, Request> fmt::Debug for PoolDiscoverer<MS, Target, Request> where MS: MakeService<Target, Request> + fmt::Debug, Target: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PoolDiscoverer") .field("maker", &self.maker) .field("making", &self.making.is_some()) .field("target", &self.target) .field("load", &self.load) .field("services", &self.services) .field("limit", &self.limit) .finish() } } impl<MS, Target, Request> Stream for PoolDiscoverer<MS, Target, Request> where MS: MakeService<Target, Request>, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone, { type Item = Result<Change<usize, DropNotifyService<MS::Service>>, MS::MakeError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); while let Poll::Ready(Some(sid)) = this.died_rx.as_mut().poll_recv(cx) { this.services.remove(sid); tracing::trace!( pool.services = this.services.len(), message = "removing dropped service" ); } if this.services.len() == 0 && this.making.is_none() { let _ = ready!(this.maker.poll_ready(cx))?; tracing::trace!("construct initial pool connection"); this.making .set(Some(this.maker.make_service(this.target.clone()))); } if let Level::High = this.load { if this.making.is_none() { if this .limit .map(|limit| this.services.len() >= limit) .unwrap_or(false) { return Poll::Pending; } tracing::trace!( pool.services = this.services.len(), message = "decided to add service to loaded pool" ); ready!(this.maker.poll_ready(cx))?; tracing::trace!("making new service"); // TODO: it'd be great if we could avoid the clone here and use, say, &Target this.making .set(Some(this.maker.make_service(this.target.clone()))); } } if let Some(fut) = this.making.as_mut().as_pin_mut() { let svc = ready!(fut.poll(cx))?; this.making.set(None); let id = this.services.insert(()); let svc = DropNotifyService { svc, id, notify: this.died_tx.clone(), }; tracing::trace!( pool.services = this.services.len(), message = "finished creating new service" ); *this.load = Level::Normal; return Poll::Ready(Some(Ok(Change::Insert(id, svc)))); } match this.load { Level::High => { unreachable!("found high load but no Service being made"); } Level::Normal => Poll::Pending, Level::Low if this.services.len() == 1 => Poll::Pending, Level::Low => { *this.load = Level::Normal; // NOTE: this is a little sad -- we'd prefer to kill short-living services let rm = this.services.iter().next().unwrap().0; // note that we _don't_ remove from self.services here // that'll happen automatically on drop tracing::trace!( pool.services = this.services.len(), message = "removing service for over-provisioned pool" ); Poll::Ready(Some(Ok(Change::Remove(rm)))) } } } } /// A [builder] that lets you configure how a [`Pool`] determines whether the underlying service is /// loaded or not. See the [module-level documentation](index.html) and the builder's methods for /// details. /// /// [builder]: https://rust-lang-nursery.github.io/api-guidelines/type-safety.html#builders-enable-construction-of-complex-values-c-builder #[derive(Copy, Clone, Debug)] pub struct Builder { low: f64, high: f64, init: f64, alpha: f64, limit: Option<usize>, } impl Default for Builder { fn default() -> Self { Builder { init: 0.1, low: 0.00001, high: 0.2, alpha: 0.03, limit: None, } } } impl Builder { /// Create a new builder with default values for all load settings. /// /// If you just want to use the defaults, you can just use [`Pool::new`]. pub fn new() -> Self { Self::default() } /// When the estimated load (see the [module-level docs](index.html)) drops below this /// threshold, and there are at least two services active, a service is removed. /// /// The default value is 0.01. That is, when one in every 100 `poll_ready` calls return /// `Pending`, then the underlying service is considered underutilized. pub fn underutilized_below(&mut self, low: f64) -> &mut Self { self.low = low; self } /// When the estimated load (see the [module-level docs](index.html)) exceeds this /// threshold, and no service is currently in the process of being added, a new service is /// scheduled to be added to the underlying [`Balance`]. /// /// The default value is 0.5. That is, when every other call to `poll_ready` returns /// `Pending`, then the underlying service is considered highly loaded. pub fn loaded_above(&mut self, high: f64) -> &mut Self { self.high = high; self } /// The initial estimated load average. /// /// This is also the value that the estimated load will be reset to whenever a service is added /// or removed. /// /// The default value is 0.1. pub fn initial(&mut self, init: f64) -> &mut Self { self.init = init; self } /// How aggressively the estimated load average is updated. /// /// This is the ฮฑ parameter of the formula for the [exponential moving /// average](https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average), and /// dictates how quickly new samples of the current load affect the estimated load. If the /// value is closer to 1, newer samples affect the load average a lot (when ฮฑ is 1, the load /// average is immediately set to the current load). If the value is closer to 0, newer samples /// affect the load average very little at a time. /// /// The given value is clamped to `[0,1]`. /// /// The default value is 0.05, meaning, in very approximate terms, that each new load sample /// affects the estimated load by 5%. pub fn urgency(&mut self, alpha: f64) -> &mut Self { self.alpha = alpha.max(0.0).min(1.0); self } /// The maximum number of backing `Service` instances to maintain. /// /// When the limit is reached, the load estimate is clamped to the high load threshhold, and no /// new service is spawned. /// /// No maximum limit is imposed by default. pub fn max_services(&mut self, limit: Option<usize>) -> &mut Self { self.limit = limit; self } /// See [`Pool::new`]. pub fn build<MS, Target, Request>( &self, make_service: MS, target: Target, ) -> Pool<MS, Target, Request> where MS: MakeService<Target, Request>, MS::Service: Load, <MS::Service as Load>::Metric: std::fmt::Debug, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone, { let (died_tx, died_rx) = tokio::sync::mpsc::unbounded_channel(); let d = PoolDiscoverer { maker: make_service, making: None, target, load: Level::Normal, services: Slab::new(), died_tx, died_rx, limit: self.limit, }; Pool { balance: Balance::new(Box::pin(d)), options: *self, ewma: self.init, } } } /// A dynamically sized, load-balanced pool of `Service` instances. pub struct Pool<MS, Target, Request> where MS: MakeService<Target, Request>, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone, { // the Pin<Box<_>> here is needed since Balance requires the Service to be Unpin balance: Balance<Pin<Box<PoolDiscoverer<MS, Target, Request>>>, Request>, options: Builder, ewma: f64, } impl<MS, Target, Request> fmt::Debug for Pool<MS, Target, Request> where MS: MakeService<Target, Request> + fmt::Debug, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone + fmt::Debug, MS::Service: fmt::Debug, Request: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Pool") .field("balance", &self.balance) .field("options", &self.options) .field("ewma", &self.ewma) .finish() } } impl<MS, Target, Request> Pool<MS, Target, Request> where MS: MakeService<Target, Request>, MS::Service: Load, <MS::Service as Load>::Metric: std::fmt::Debug, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone, { /// Construct a new dynamically sized `Pool`. /// /// If many calls to `poll_ready` return `Pending`, `new_service` is used to /// construct another `Service` that is then added to the load-balanced pool. /// If many calls to `poll_ready` succeed, the most recently added `Service` /// is dropped from the pool. pub fn new(make_service: MS, target: Target) -> Self {
type PinBalance<S, Request> = Balance<Pin<Box<S>>, Request>; impl<MS, Target, Req> Service<Req> for Pool<MS, Target, Req> where MS: MakeService<Target, Req>, MS::Service: Load, <MS::Service as Load>::Metric: std::fmt::Debug, MS::MakeError: Into<crate::BoxError>, MS::Error: Into<crate::BoxError>, Target: Clone, { type Response = <PinBalance<PoolDiscoverer<MS, Target, Req>, Req> as Service<Req>>::Response; type Error = <PinBalance<PoolDiscoverer<MS, Target, Req>, Req> as Service<Req>>::Error; type Future = <PinBalance<PoolDiscoverer<MS, Target, Req>, Req> as Service<Req>>::Future; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { if let Poll::Ready(()) = self.balance.poll_ready(cx)? { // services was ready -- there are enough services // update ewma with a 0 sample self.ewma = (1.0 - self.options.alpha) * self.ewma; let discover = self.balance.discover_mut().as_mut().project(); if self.ewma < self.options.low { if *discover.load != Level::Low { tracing::trace!({ ewma = %self.ewma }, "pool is over-provisioned"); } *discover.load = Level::Low; if discover.services.len() > 1 { // reset EWMA so we don't immediately try to remove another service self.ewma = self.options.init; } } else { if *discover.load != Level::Normal { tracing::trace!({ ewma = %self.ewma }, "pool is appropriately provisioned"); } *discover.load = Level::Normal; } return Poll::Ready(Ok(())); } let discover = self.balance.discover_mut().as_mut().project(); if discover.making.is_none() { // no services are ready -- we're overloaded // update ewma with a 1 sample self.ewma = self.options.alpha + (1.0 - self.options.alpha) * self.ewma; if self.ewma > self.options.high { if *discover.load != Level::High { tracing::trace!({ ewma = %self.ewma }, "pool is under-provisioned"); } *discover.load = Level::High; // don't reset the EWMA -- in theory, poll_ready should now start returning // `Ready`, so we won't try to launch another service immediately. // we clamp it to high though in case the # of services is limited. self.ewma = self.options.high; // we need to call balance again for PoolDiscover to realize // it can make a new service return self.balance.poll_ready(cx); } else { *discover.load = Level::Normal; } } Poll::Pending } fn call(&mut self, req: Req) -> Self::Future { self.balance.call(req) } } #[doc(hidden)] #[derive(Debug)] pub struct DropNotifyService<Svc> { svc: Svc, id: usize, notify: tokio::sync::mpsc::UnboundedSender<usize>, } impl<Svc> Drop for DropNotifyService<Svc> { fn drop(&mut self) { let _ = self.notify.send(self.id).is_ok(); } } impl<Svc: Load> Load for DropNotifyService<Svc> { type Metric = Svc::Metric; fn load(&self) -> Self::Metric { self.svc.load() } } impl<Request, Svc: Service<Request>> Service<Request> for DropNotifyService<Svc> { type Response = Svc::Response; type Future = Svc::Future; type Error = Svc::Error; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { self.svc.poll_ready(cx) } fn call(&mut self, req: Request) -> Self::Future { self.svc.call(req) } }
Builder::new().build(make_service, target) } }
solution_test.py
""" Advent of code 2020 day 08/1 """ import unittest from solution import solution class MyTest(unittest.TestCase): """Unist tests for actual day""" def
(self): """ Test from the task """ self.assertEqual(solution("""\ nop +0 acc +1 jmp +4 acc +3 jmp -3 acc -99 acc +1 jmp -4 acc +6\ """), 5) if __name__ == '__main__': unittest.main()
test_basic
Template.Widget.Form.js
๏ปฟ/// <reference path="VsixMvcAppResult.A.Intellisense.js" /> jQuery.widget("ui.widgetFormItem", jQuery.ui.widgetBase, { options: { }, _create: function () { // TODO: check this is really needed jQuery.ui.widgetBase.prototype._create.call(this); var self = this; jQuery(this.element) .find(':input') .change(function () { jQuery(self.element).removeClass('ui-state-error').find('div.ui-widgetForm-inputError').remove(); self._trigger('changed', null, jQuery(this).attr('id')); }); }, _init: function () { // TODO: check this is really needed jQuery.ui.widgetBase.prototype._init.call(this); } , destroy: function () {
// TODO: check this is really needed jQuery.ui.widgetBase.prototype.destroy.call(this); } }); jQuery.widget("ui.widgetFormSummary", jQuery.ui.widgetBase, { options: { }, _create: function () { // TODO: check this is really needed jQuery.ui.widgetBase.prototype._create.call(this); }, _init: function () { // TODO: check this is really needed jQuery.ui.widgetBase.prototype._init.call(this); } , destroy: function () { // TODO: check this is really needed jQuery.ui.widgetBase.prototype.destroy.call(this); } , deleteByKey: function (key) { jQuery(this.element).find('li[modelkey="' + key + '"]').remove(); if (jQuery(this.element).find('ul').find('li').length == 0) { jQuery(this.element).hide(); } } });
minimum_genetic_mutation.go
package leetcode //ๆœ€ๅฐๅŸบๅ› ๅ˜ๅŒ– func minMutation(start string, end string, bank []string) int { bankSet :
= make(map[string]bool) for _, s := range bank { bankSet[s] = true } if !bankSet[end] { return -1 } res := 0 queue, letters := []string{start}, []byte{'A', 'C', 'G', 'T'} for len(queue) > 0 { n := len(queue) for i := 0; i < n; i++ { cur := queue[0] queue = queue[1:] if cur == end { return res } for i := 0; i < len(cur); i++ { modStr := []byte(cur) for _, c := range letters { modStr[i] = c s := string(modStr) if bankSet[s] { queue = append(queue, s) delete(bankSet, s) } } } } res++ } return -1 }
pypi.go
package main import ( "encoding/json" "fmt" "io/ioutil" "log" "net/http" "time" ) type PyPiProject struct { Info `json:"info"` Releases map[string][]Release `json:"releases"` } type Info struct { LatestVersion string `json:"version"` } type Release struct { Digests `json:"digests"` Filename string `json:"filename"` PackageType string `json:"packagetype"` PythonVersion string `json:"python_version"` URL string `json:"url"` UploadTime time.Time `json:"upload_time_iso_8601"` } type Digests struct { MD5 string `json:"md5"` SHA256 string `json:"sha256"` } func get(url string) []byte
func pypiMetadata(pkg string) PyPiProject { bytes := get(fmt.Sprintf("https://pypi.org/pypi/%s/json", pkg)) project := PyPiProject{} if err := json.Unmarshal(bytes, &project); err != nil { log.Fatal(err) } return project }
{ resp, err := http.Get(url) if err != nil { log.Fatal(err) } bytes, err := ioutil.ReadAll(resp.Body) if err != nil { log.Fatal(err) } return bytes }
onnx_test_neg.go
package onnxtest // this file is auto-generated... DO NOT EDIT import ( "github.com/owulveryck/onnx-go/backend/testbackend" "gorgonia.org/tensor" ) func init() { testbackend.Register("Neg", "TestNeg", NewTestNeg) } // NewTestNeg version: 3. func
() *testbackend.TestCase { return &testbackend.TestCase{ OpType: "Neg", Title: "TestNeg", ModelB: []byte{0x8, 0x3, 0x12, 0xc, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2d, 0x74, 0x65, 0x73, 0x74, 0x3a, 0x49, 0xa, 0xb, 0xa, 0x1, 0x78, 0x12, 0x1, 0x79, 0x22, 0x3, 0x4e, 0x65, 0x67, 0x12, 0x8, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x65, 0x67, 0x5a, 0x17, 0xa, 0x1, 0x78, 0x12, 0x12, 0xa, 0x10, 0x8, 0x1, 0x12, 0xc, 0xa, 0x2, 0x8, 0x3, 0xa, 0x2, 0x8, 0x4, 0xa, 0x2, 0x8, 0x5, 0x62, 0x17, 0xa, 0x1, 0x79, 0x12, 0x12, 0xa, 0x10, 0x8, 0x1, 0x12, 0xc, 0xa, 0x2, 0x8, 0x3, 0xa, 0x2, 0x8, 0x4, 0xa, 0x2, 0x8, 0x5, 0x42, 0x2, 0x10, 0x9}, /* &pb.NodeProto{ Input: []string{"x"}, Output: []string{"y"}, Name: "", OpType: "Neg", Attributes: ([]*pb.AttributeProto) <nil> , }, */ Input: []tensor.Tensor{ tensor.New( tensor.WithShape(3, 4, 5), tensor.WithBacking([]float32{1.7640524, 0.4001572, 0.978738, 2.2408931, 1.867558, -0.9772779, 0.95008844, -0.1513572, -0.10321885, 0.41059852, 0.14404356, 1.4542735, 0.7610377, 0.121675014, 0.44386324, 0.33367434, 1.4940791, -0.20515826, 0.3130677, -0.85409576, -2.5529897, 0.6536186, 0.8644362, -0.742165, 2.2697546, -1.4543657, 0.045758516, -0.18718386, 1.5327792, 1.4693588, 0.15494743, 0.37816253, -0.88778573, -1.9807965, -0.34791216, 0.15634897, 1.2302907, 1.2023798, -0.3873268, -0.30230275, -1.048553, -1.420018, -1.7062702, 1.9507754, -0.5096522, -0.4380743, -1.2527953, 0.7774904, -1.6138978, -0.21274029, -0.89546657, 0.3869025, -0.51080513, -1.1806322, -0.028182229, 0.42833188, 0.06651722, 0.3024719, -0.6343221, -0.36274117}), ), }, ExpectedOutput: []tensor.Tensor{ tensor.New( tensor.WithShape(3, 4, 5), tensor.WithBacking([]float32{-1.7640524, -0.4001572, -0.978738, -2.2408931, -1.867558, 0.9772779, -0.95008844, 0.1513572, 0.10321885, -0.41059852, -0.14404356, -1.4542735, -0.7610377, -0.121675014, -0.44386324, -0.33367434, -1.4940791, 0.20515826, -0.3130677, 0.85409576, 2.5529897, -0.6536186, -0.8644362, 0.742165, -2.2697546, 1.4543657, -0.045758516, 0.18718386, -1.5327792, -1.4693588, -0.15494743, -0.37816253, 0.88778573, 1.9807965, 0.34791216, -0.15634897, -1.2302907, -1.2023798, 0.3873268, 0.30230275, 1.048553, 1.420018, 1.7062702, -1.9507754, 0.5096522, 0.4380743, 1.2527953, -0.7774904, 1.6138978, 0.21274029, 0.89546657, -0.3869025, 0.51080513, 1.1806322, 0.028182229, -0.42833188, -0.06651722, -0.3024719, 0.6343221, 0.36274117}), ), }, } }
NewTestNeg
collapse-tree-test.js
import { module, test } from 'qunit'; import { setupTest } from 'ember-qunit'; import { A as emberA } from '@ember/array'; import { get, set } from '@ember/object'; import { run } from '@ember/runloop'; import CollapseTree from 'ember-table/-private/collapse-tree'; // Cache is required for the row metas let rowMetaCache, tree; function metaFor(value) { return rowMetaCache.get(value); } function
(seq) { let children = emberA(); seq.forEach(item => { if (Array.isArray(item)) { let lastNode = children[children.length - 1]; lastNode.children = generateTree(item); } else { children.pushObject({ value: item }); } }); return children; } module('Unit | Private | CollapseTree', function(hooks) { setupTest(hooks); hooks.beforeEach(function() { rowMetaCache = new Map(); }); hooks.afterEach(function() { // Clean up so we can look for memory leaks more easily run(() => { for (let [key, value] of rowMetaCache.entries()) { value.destroy(); rowMetaCache.delete(key); } tree.destroy(); }); }); test('empty tree works', function(assert) { tree = CollapseTree.create({ rows: [] }); assert.equal(tree.objectAt(-1), undefined); assert.equal(tree.objectAt(0), undefined); assert.equal(tree.objectAt(1), undefined); }); test('basic tree works', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, [2, 3], 4, [5, 6]]]), enableTree: true, rowMetaCache, }); let expectedDepth = [0, 1, 2, 2, 1, 2, 2]; let length = get(tree, 'length'); assert.equal(length, 7); for (let i = 0; i < 7; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } assert.equal(tree.objectAt(length + 1), undefined); assert.equal(tree.objectAt(-1), undefined); }); test('rowMeta next works', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, [2, 3], 4, [5, 6]]]), enableTree: true, rowMetaCache, }); let expectedNext = [ { children: [{ value: 2 }, { value: 3 }], value: 1, }, { value: 2 }, { value: 3 }, { children: [{ value: 5 }, { value: 6 }], value: 4, }, { value: 5 }, { value: 6 }, null, ]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.deepEqual(metaFor(tree.objectAt(i)).get('next'), expectedNext[i]); } }); test('rowMeta prev works', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, [2, 3], 4, [5, 6]]]), enableTree: true, rowMetaCache, }); let expectedPrev = [ null, { children: [ { children: [{ value: 2 }, { value: 3 }], value: 1, }, { children: [{ value: 5 }, { value: 6 }], value: 4, }, ], value: 0, }, { children: [{ value: 2 }, { value: 3 }], value: 1, }, { value: 2 }, { value: 3 }, { children: [{ value: 5 }, { value: 6 }], value: 4, }, { value: 5 }, ]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.deepEqual(metaFor(tree.objectAt(i)).get('prev'), expectedPrev[i]); } }); test('rowMeta first works with at least 1 row', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, 1]), enableTree: true, rowMetaCache, }); let expectedFirst = { value: 0 }; for (let i = 0; i < 2; i++) { assert.deepEqual(metaFor(tree.objectAt(i)).get('first'), expectedFirst); } }); test('rowMeta last works with at least 1 row', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, 1, [2, 3]]), enableTree: true, rowMetaCache, }); let expectedLast = { value: 3 }; for (let i = 0; i < 4; i++) { assert.deepEqual(metaFor(tree.objectAt(i)).get('last'), expectedLast); } }); test('can disable tree', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, 2]]), enableTree: false, rowMetaCache, }); let expectedDepth = [0]; assert.equal(get(tree, 'length'), 1); for (let i = 0; i < 1; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } tree.set('enableTree', true); expectedDepth = [0, 1, 1]; assert.equal(get(tree, 'length'), 3); for (let i = 0; i < 3; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('works with multiroot tree', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, [2, 3], 4, [5, 6]], 7, [8, 9]]), enableTree: true, rowMetaCache, }); let expectedDepth = [0, 1, 2, 2, 1, 2, 2, 0, 1, 1]; assert.equal(get(tree, 'length'), 10); for (let i = 0; i < 10; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('intermediate leaf nodes work', function(assert) { tree = CollapseTree.create({ rows: generateTree([0, [1, 2, [3, 4], 5, 6, [7, 8]]]), enableTree: true, rowMetaCache, }); let expectedDepth = [0, 1, 1, 2, 2, 1, 1, 2, 2]; assert.equal(get(tree, 'length'), 9); for (let i = 0; i < 9; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can collapse nodes', function(assert) { let rows = generateTree([0, [1, [2, 3], 4, [5, 6]]]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true, enableCollapse: true, }); set(rows[0].children[0], 'isCollapsed', true); let expectedValue = [0, 1, 4, 5, 6]; let expectedDepth = [0, 1, 1, 2, 2]; assert.equal(get(tree, 'length'), 5); for (let i = 0; i < 5; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } set(rows[0].children[0], 'isCollapsed', false); expectedValue = [0, 1, 2, 3, 4, 5, 6]; expectedDepth = [0, 1, 2, 2, 1, 2, 2]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 5; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can collapse nodes without modifying underlying data structure', function(assert) { let rows = generateTree([0, [1, [2, 3], 4, [5, 6]]]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true, enableCollapse: true, }); metaFor(tree.objectAt(1)).toggleCollapse(); assert.equal(rows[0].children[0].isCollapsed, undefined); let expectedValue = [0, 1, 4, 5, 6]; let expectedDepth = [0, 1, 1, 2, 2]; assert.equal(get(tree, 'length'), 5); for (let i = 0; i < 5; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } metaFor(tree.objectAt(1)).toggleCollapse(); assert.equal(rows[0].children[0].isCollapsed, undefined); expectedValue = [0, 1, 2, 3, 4, 5, 6]; expectedDepth = [0, 1, 2, 2, 1, 2, 2]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 5; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can disable collapse', function(assert) { let rows = generateTree([0, [1, [2, 3], 4, [5, 6]]]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true }); assert.equal(metaFor(tree.objectAt(1)).get('canCollapse'), false, 'collapse is disabled'); metaFor(tree.objectAt(1)).toggleCollapse(); let expectedValue = [0, 1, 2, 3, 4, 5, 6]; let expectedDepth = [0, 1, 2, 2, 1, 2, 2]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } tree.set('enableCollapse', true); metaFor(tree.objectAt(1)).toggleCollapse(); expectedValue = [0, 1, 4, 5, 6]; expectedDepth = [0, 1, 1, 2, 2]; assert.equal(get(tree, 'length'), 5); for (let i = 0; i < 5; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can disable collapse at a row level', function(assert) { let rows = generateTree([0, [1, [2, 3], 4, [5, 6]]]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true }); let row4Meta = metaFor(tree.objectAt(4)); assert.equal( row4Meta.get('_rowValue.disableCollapse') === true, false, 'collapse is not yet disabled' ); let expectedValue = [0, 1, 2, 3, 4, 5, 6]; let expectedDepth = [0, 1, 2, 2, 1, 2, 2]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } row4Meta.set('_rowValue.disableCollapse', true); // we can no longer collapse the tree at this row assert.equal(get(row4Meta, 'canCollapse'), false); // but the tree remains unchanged other than the ability to collapse assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.equal(tree.objectAt(i).value, expectedValue[i]); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can update nodes', function(assert) { let rows = generateTree([0, [1, [2, 3], 6, [7, 8]]]); let subrows = generateTree([4, 5]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true }); rows[0].children[0].children.pushObjects(subrows); rows[0].children[1].children.popObject(); let expectedDepth = [0, 1, 2, 2, 2, 2, 1, 2]; assert.equal(get(tree, 'length'), 8); for (let i = 0; i < 8; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('can add and remove children', function(assert) { let rows = generateTree([0, [1, [2, 3], 6, [7, 8]]]); let subrows = generateTree([4, 5]); tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true }); set(rows[0].children[0].children[1], 'children', subrows); set(rows[0].children[1], 'children', null); let expectedDepth = [0, 1, 2, 2, 3, 3, 1]; assert.equal(get(tree, 'length'), 7); for (let i = 0; i < 7; i++) { assert.equal(tree.objectAt(i).value, i); assert.equal(metaFor(tree.objectAt(i)).get('depth'), expectedDepth[i]); } }); test('works with single level tree', function(assert) { let rows = [{ label: 'A', children: [] }, { label: 'B', children: [] }]; tree = CollapseTree.create({ rows, rowMetaCache }); run(() => { tree.get('length'); tree.destroy(); assert.ok(true, 'exception not thrown'); }); }); test('can add child to leaf node', function(assert) { let rows = [{ label: 'A', children: emberA([]) }, { label: 'B', children: emberA([]) }]; tree = CollapseTree.create({ rows, rowMetaCache, enableTree: true }); run(() => { assert.equal(tree.get('length'), 2, 'tree starts out with length 2'); rows[0].children.pushObject({ label: 'C', children: [] }); assert.equal(tree.get('length'), 3, 'adding a child to a leaf node increases length'); }); }); });
generateTree
workspace.py
class Workspace: def __init__(self, name: str = None): #PK self.name = name self.members = [] self.member_ids= [] self.roles = {} #{owners:[user1], administrators:[user5, user8], etc} self.channels = [] #name def getName(self): return self.name def setName(self, name: str):
#members def getMembers(self): return self.members def addMembers(self, user_id: int): self.members.add(user_id)
self.name = name
iterator.rs
// ignore-tidy-filelength // This file almost exclusively consists of the definition of `Iterator`. We // can't split that into multiple files. use crate::cmp::{self, Ordering}; use crate::ops::{Add, ControlFlow, Try}; use super::super::TrustedRandomAccess; use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse}; use super::super::{FlatMap, Flatten}; use super::super::{FromIterator, Product, Sum, Zip}; use super::super::{ Inspect, Map, MapWhile, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, }; fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {} /// An interface for dealing with iterators. /// /// This is the main iterator trait. For more about the concept of iterators /// generally, please see the [module-level documentation]. In particular, you /// may want to know how to [implement `Iterator`][impl]. /// /// [module-level documentation]: crate::iter /// [impl]: crate::iter#implementing-iterator #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( _Self = "[std::ops::Range<Idx>; 1]", label = "if you meant to iterate between two values, remove the square brackets", note = "`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \ without the brackets: `start..end`" ), on( _Self = "[std::ops::RangeFrom<Idx>; 1]", label = "if you meant to iterate from a value onwards, remove the square brackets", note = "`[start..]` is an array of one `RangeFrom`; you might have meant to have a \ `RangeFrom` without the brackets: `start..`, keeping in mind that iterating over an \ unbounded iterator will run forever unless you `break` or `return` from within the \ loop" ), on( _Self = "[std::ops::RangeTo<Idx>; 1]", label = "if you meant to iterate until a value, remove the square brackets and add a \ starting value", note = "`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \ `Range` without the brackets: `0..end`" ), on( _Self = "[std::ops::RangeInclusive<Idx>; 1]", label = "if you meant to iterate between two values, remove the square brackets", note = "`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \ `RangeInclusive` without the brackets: `start..=end`" ), on( _Self = "[std::ops::RangeToInclusive<Idx>; 1]", label = "if you meant to iterate until a value (including it), remove the square brackets \ and add a starting value", note = "`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \ bounded `RangeInclusive` without the brackets: `0..=end`" ), on( _Self = "std::ops::RangeTo<Idx>", label = "if you meant to iterate until a value, add a starting value", note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \ bounded `Range`: `0..end`" ), on( _Self = "std::ops::RangeToInclusive<Idx>", label = "if you meant to iterate until a value (including it), add a starting value", note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \ to have a bounded `RangeInclusive`: `0..=end`" ), on( _Self = "&str", label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self = "std::string::String", label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self = "[]", label = "borrow the array with `&` or call `.iter()` on it to iterate over it", note = "arrays are not iterators, but slices like the following are: `&[1, 2, 3]`" ), on( _Self = "{integral}", note = "if you want to iterate between `start` until a value `end`, use the exclusive range \ syntax `start..end` or the inclusive range syntax `start..=end`" ), label = "`{Self}` is not an iterator", message = "`{Self}` is not an iterator" )] #[doc(spotlight)] #[must_use = "iterators are lazy and do nothing unless consumed"] pub trait Iterator { /// The type of the elements being iterated over. #[stable(feature = "rust1", since = "1.0.0")] type Item; /// Advances the iterator and returns the next value. /// /// Returns [`None`] when iteration is finished. Individual iterator /// implementations may choose to resume iteration, and so calling `next()` /// again may or may not eventually start returning [`Some(Item)`] again at some /// point. /// /// [`Some(Item)`]: Some /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// // A call to next() returns the next value... /// assert_eq!(Some(&1), iter.next()); /// assert_eq!(Some(&2), iter.next()); /// assert_eq!(Some(&3), iter.next()); /// /// // ... and then None once it's over. /// assert_eq!(None, iter.next()); /// /// // More calls may or may not return `None`. Here, they always will. /// assert_eq!(None, iter.next()); /// assert_eq!(None, iter.next()); /// ``` #[lang = "next"] #[stable(feature = "rust1", since = "1.0.0")] fn next(&mut self) -> Option<Self::Item>; /// Returns the bounds on the remaining length of the iterator. /// /// Specifically, `size_hint()` returns a tuple where the first element /// is the lower bound, and the second element is the upper bound. /// /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. /// A [`None`] here means that either there is no known upper bound, or the /// upper bound is larger than [`usize`]. /// /// # Implementation notes /// /// It is not enforced that an iterator implementation yields the declared /// number of elements. A buggy iterator may yield less than the lower bound /// or more than the upper bound of elements. /// /// `size_hint()` is primarily intended to be used for optimizations such as /// reserving space for the elements of the iterator, but must not be /// trusted to e.g., omit bounds checks in unsafe code. An incorrect /// implementation of `size_hint()` should not lead to memory safety /// violations. /// /// That said, the implementation should provide a correct estimation, /// because otherwise it would be a violation of the trait's protocol. /// /// The default implementation returns `(0, `[`None`]`)` which is correct for any /// iterator. /// /// [`usize`]: type@usize /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let iter = a.iter(); /// /// assert_eq!((3, Some(3)), iter.size_hint()); /// ``` /// /// A more complex example: /// /// ``` /// // The even numbers from zero to ten. /// let iter = (0..10).filter(|x| x % 2 == 0); /// /// // We might iterate from zero to ten times. Knowing that it's five /// // exactly wouldn't be possible without executing filter(). /// assert_eq!((0, Some(10)), iter.size_hint()); /// /// // Let's add five more numbers with chain() /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20); /// /// // now both bounds are increased by five /// assert_eq!((5, Some(15)), iter.size_hint()); /// ``` /// /// Returning `None` for an upper bound: /// /// ``` /// // an infinite iterator has no upper bound /// // and the maximum possible lower bound /// let iter = 0..; /// /// assert_eq!((usize::MAX, None), iter.size_hint()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn size_hint(&self) -> (usize, Option<usize>) { (0, None) } /// Consumes the iterator, counting the number of iterations and returning it. /// /// This method will call [`next`] repeatedly until [`None`] is encountered, /// returning the number of times it saw [`Some`]. Note that [`next`] has to be /// called at least once even if the iterator does not have any elements. /// /// [`next`]: Iterator::next /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so counting elements of /// an iterator with more than [`usize::MAX`] elements either produces the /// wrong result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than [`usize::MAX`] /// elements. /// /// [`usize::MAX`]: crate::usize::MAX /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().count(), 3); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().count(), 5); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn count(self) -> usize where Self: Sized, { #[inline] fn add1<T>(count: usize, _: T) -> usize { // Might overflow. Add::add(count, 1) } self.fold(0, add1) } /// Consumes the iterator, returning the last element. /// /// This method will evaluate the iterator until it returns [`None`]. While /// doing so, it keeps track of the current element. After [`None`] is /// returned, `last()` will then return the last element it saw. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().last(), Some(&3)); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().last(), Some(&5)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn last(self) -> Option<Self::Item> where Self: Sized, { #[inline] fn some<T>(_: Option<T>, x: T) -> Option<T> { Some(x) } self.fold(None, some) } /// Returns the `n`th element of the iterator. /// /// Like most indexing operations, the count starts from zero, so `nth(0)` /// returns the first value, `nth(1)` the second, and so on. /// /// Note that all preceding elements, as well as the returned element, will be /// consumed from the iterator. That means that the preceding elements will be /// discarded, and also that calling `nth(0)` multiple times on the same iterator /// will return different elements. /// /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the /// iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(1), Some(&2)); /// ``` /// /// Calling `nth()` multiple times doesn't rewind the iterator: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.nth(1), Some(&2)); /// assert_eq!(iter.nth(1), None); /// ``` /// /// Returning `None` if there are less than `n + 1` elements: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(10), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn nth(&mut self, mut n: usize) -> Option<Self::Item> { while let Some(x) = self.next() { if n == 0 { return Some(x); } n -= 1; } None } /// Creates an iterator starting at the same point, but stepping by /// the given amount at each iteration. /// /// Note 1: The first element of the iterator will always be returned, /// regardless of the step given. /// /// Note 2: The time at which ignored elements are pulled is not fixed. /// `StepBy` behaves like the sequence `next(), nth(step-1), nth(step-1), โ€ฆ`, /// but is also free to behave like the sequence /// `advance_n_and_return_first(step), advance_n_and_return_first(step), โ€ฆ` /// Which way is used may change for some iterators for performance reasons. /// The second way will advance the iterator earlier and may consume more items. /// /// `advance_n_and_return_first` is the equivalent of: /// ``` /// fn advance_n_and_return_first<I>(iter: &mut I, total_step: usize) -> Option<I::Item> /// where /// I: Iterator, /// { /// let next = iter.next(); /// if total_step > 1 { /// iter.nth(total_step-2); /// } /// next /// } /// ``` /// /// # Panics /// /// The method will panic if the given step is `0`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0, 1, 2, 3, 4, 5]; /// let mut iter = a.iter().step_by(2); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized, { StepBy::new(self, step) } /// Takes two iterators and creates a new iterator over both in sequence. /// /// `chain()` will return a new iterator which will first iterate over /// values from the first iterator and then over values from the second /// iterator. /// /// In other words, it links two iterators together, in a chain. ๐Ÿ”— /// /// [`once`] is commonly used to adapt a single value into a chain of /// other kinds of iteration. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().chain(a2.iter()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `chain()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `chain()` directly: /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().chain(s2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` /// /// If you work with Windows API, you may wish to convert [`OsStr`] to `Vec<u16>`: /// /// ``` /// #[cfg(windows)] /// fn os_str_to_utf16(s: &std::ffi::OsStr) -> Vec<u16> { /// use std::os::windows::ffi::OsStrExt; /// s.encode_wide().chain(std::iter::once(0)).collect() /// } /// ``` /// /// [`once`]: crate::iter::once /// [`OsStr`]: ../../std/ffi/struct.OsStr.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where Self: Sized, U: IntoIterator<Item = Self::Item>, { Chain::new(self, other.into_iter()) } /// 'Zips up' two iterators into a single iterator of pairs. /// /// `zip()` returns a new iterator that will iterate over two other /// iterators, returning a tuple where the first element comes from the /// first iterator, and the second element comes from the second iterator. /// /// In other words, it zips two iterators together, into a single one. /// /// If either iterator returns [`None`], [`next`] from the zipped iterator /// will return [`None`]. If the first iterator returns [`None`], `zip` will /// short-circuit and `next` will not be called on the second iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().zip(a2.iter()); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `zip()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `zip()` directly: /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().zip(s2); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// `zip()` is often used to zip an infinite iterator to a finite one. /// This works because the finite iterator will eventually return [`None`], /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]: /// /// ``` /// let enumerate: Vec<_> = "foo".chars().enumerate().collect(); /// /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect(); /// /// assert_eq!((0, 'f'), enumerate[0]); /// assert_eq!((0, 'f'), zipper[0]); /// /// assert_eq!((1, 'o'), enumerate[1]); /// assert_eq!((1, 'o'), zipper[1]); /// /// assert_eq!((2, 'o'), enumerate[2]); /// assert_eq!((2, 'o'), zipper[2]); /// ``` /// /// [`enumerate`]: Iterator::enumerate /// [`next`]: Iterator::next #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where Self: Sized, U: IntoIterator, { Zip::new(self, other.into_iter()) } /// Takes a closure and creates an iterator which calls that closure on each /// element. /// /// `map()` transforms one iterator into another, by means of its argument: /// something that implements [`FnMut`]. It produces a new iterator which /// calls this closure on each element of the original iterator. /// /// If you are good at thinking in types, you can think of `map()` like this: /// If you have an iterator that gives you elements of some type `A`, and /// you want an iterator of some other type `B`, you can use `map()`, /// passing a closure that takes an `A` and returns a `B`. /// /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is /// lazy, it is best used when you're already working with other iterators. /// If you're doing some sort of looping for a side effect, it's considered /// more idiomatic to use [`for`] than `map()`. /// /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for /// [`FnMut`]: crate::ops::FnMut /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().map(|x| 2 * x); /// /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), Some(6)); /// assert_eq!(iter.next(), None); /// ``` /// /// If you're doing some sort of side effect, prefer [`for`] to `map()`: /// /// ``` /// # #![allow(unused_must_use)] /// // don't do this: /// (0..5).map(|x| println!("{}", x)); /// /// // it won't even execute, as it is lazy. Rust will warn you about this. /// /// // Instead, use for: /// for x in 0..5 { /// println!("{}", x); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn map<B, F>(self, f: F) -> Map<Self, F> where Self: Sized, F: FnMut(Self::Item) -> B, { Map::new(self, f) } /// Calls a closure on each element of an iterator. /// /// This is equivalent to using a [`for`] loop on the iterator, although /// `break` and `continue` are not possible from a closure. It's generally /// more idiomatic to use a `for` loop, but `for_each` may be more legible /// when processing items at the end of longer iterator chains. In some /// cases `for_each` may also be faster than a loop, because it will use /// internal iteration on adaptors like `Chain`. /// /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::sync::mpsc::channel; /// /// let (tx, rx) = channel(); /// (0..5).map(|x| x * 2 + 1) /// .for_each(move |x| tx.send(x).unwrap()); /// /// let v: Vec<_> = rx.iter().collect(); /// assert_eq!(v, vec![1, 3, 5, 7, 9]); /// ``` /// /// For such a small example, a `for` loop may be cleaner, but `for_each` /// might be preferable to keep a functional style with longer iterators: /// /// ``` /// (0..5).flat_map(|x| x * 100 .. x * 110) /// .enumerate() /// .filter(|&(i, x)| (i + x) % 3 == 0) /// .for_each(|(i, x)| println!("{}:{}", i, x)); /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] fn for_each<F>(self, f: F) where Self: Sized, F: FnMut(Self::Item), { #[inline] fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) { move |(), item| f(item) } self.fold((), call(f)); } /// Creates an iterator which uses a closure to determine if an element /// should be yielded. /// /// Given an element the closure must return `true` or `false`. The returned /// iterator will yield only the elements for which the closure returns /// true. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0i32, 1, 2]; /// /// let mut iter = a.iter().filter(|x| x.is_positive()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `filter()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.iter().filter(|x| **x > 1); // need two *s! /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// It's common to instead use destructuring on the argument to strip away /// one: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.iter().filter(|&x| *x > 1); // both & and * /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// or both: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.iter().filter(|&&x| x > 1); // two &s /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// of these layers. /// /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter<P>(self, predicate: P) -> Filter<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { Filter::new(self, predicate) } /// Creates an iterator that both filters and maps. /// /// The returned iterator yields only the `value`s for which the supplied /// closure returns `Some(value)`. /// /// `filter_map` can be used to make chains of [`filter`] and [`map`] more /// concise. The example below shows how a `map().filter().map()` can be /// shortened to a single call to `filter_map`. /// /// [`filter`]: Iterator::filter /// [`map`]: Iterator::map /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = ["1", "two", "NaN", "four", "5"]; /// /// let mut iter = a.iter().filter_map(|s| s.parse().ok()); /// /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// Here's the same example, but with [`filter`] and [`map`]: /// /// ``` /// let a = ["1", "two", "NaN", "four", "5"]; /// let mut iter = a.iter().map(|s| s.parse()).filter(|s| s.is_ok()).map(|s| s.unwrap()); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// [`Option<T>`]: Option #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { FilterMap::new(self, f) } /// Creates an iterator which gives the current iteration count as well as /// the next value. /// /// The iterator returned yields pairs `(i, val)`, where `i` is the /// current index of iteration and `val` is the value returned by the /// iterator. /// /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a /// different sized integer, the [`zip`] function provides similar /// functionality. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so enumerating more than /// [`usize::MAX`] elements either produces the wrong result or panics. If /// debug assertions are enabled, a panic is guaranteed. /// /// # Panics /// /// The returned iterator might panic if the to-be-returned index would /// overflow a [`usize`]. /// /// [`usize`]: type@usize /// [`usize::MAX`]: crate::usize::MAX /// [`zip`]: Iterator::zip /// /// # Examples /// /// ``` /// let a = ['a', 'b', 'c']; /// /// let mut iter = a.iter().enumerate(); /// /// assert_eq!(iter.next(), Some((0, &'a'))); /// assert_eq!(iter.next(), Some((1, &'b'))); /// assert_eq!(iter.next(), Some((2, &'c'))); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn enumerate(self) -> Enumerate<Self> where Self: Sized, { Enumerate::new(self) } /// Creates an iterator which can use [`peek`] to look at the next element of /// the iterator without consuming it. /// /// Adds a [`peek`] method to an iterator. See its documentation for /// more information. /// /// Note that the underlying iterator is still advanced when [`peek`] is /// called for the first time: In order to retrieve the next element, /// [`next`] is called on the underlying iterator, hence any side effects (i.e. /// anything other than fetching the next value) of the [`next`] method /// will occur. /// /// [`peek`]: Peekable::peek /// [`next`]: Iterator::next /// /// # Examples /// /// Basic usage: /// /// ``` /// let xs = [1, 2, 3]; /// /// let mut iter = xs.iter().peekable(); /// /// // peek() lets us see into the future /// assert_eq!(iter.peek(), Some(&&1)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), Some(&2)); /// /// // we can peek() multiple times, the iterator won't advance /// assert_eq!(iter.peek(), Some(&&3)); /// assert_eq!(iter.peek(), Some(&&3)); /// /// assert_eq!(iter.next(), Some(&3)); /// /// // after the iterator is finished, so is peek() /// assert_eq!(iter.peek(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn peekable(self) -> Peekable<Self> where Self: Sized, { Peekable::new(self) } /// Creates an iterator that [`skip`]s elements based on a predicate. /// /// [`skip`]: Iterator::skip /// /// `skip_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and ignore elements /// until it returns `false`. /// /// After `false` is returned, `skip_while()`'s job is over, and the /// rest of the elements are yielded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.iter().skip_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `skip_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.iter().skip_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.iter().skip_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// /// // while this would have been false, since we already got a false, /// // skip_while() isn't used any more /// assert_eq!(iter.next(), Some(&-2)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { SkipWhile::new(self, predicate) } /// Creates an iterator that yields elements based on a predicate. /// /// `take_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and yield elements /// while it returns `true`. /// /// After `false` is returned, `take_while()`'s job is over, and the /// rest of the elements are ignored. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.iter().take_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `take_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.iter().take_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.iter().take_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&-1)); /// /// // We have more elements that are less than zero, but since we already /// // got a false, take_while() isn't used any more /// assert_eq!(iter.next(), None); /// ``` /// /// Because `take_while()` needs to look at the value in order to see if it /// should be included or not, consuming iterators will see that it is /// removed: /// /// ``` /// let a = [1, 2, 3, 4]; /// let mut iter = a.iter(); /// /// let result: Vec<i32> = iter.by_ref() /// .take_while(|n| **n != 3) /// .cloned() /// .collect(); /// /// assert_eq!(result, &[1, 2]); /// /// let result: Vec<i32> = iter.cloned().collect(); /// /// assert_eq!(result, &[4]); /// ``` /// /// The `3` is no longer there, because it was consumed in order to see if /// the iteration should stop, but wasn't placed back into the iterator. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { TakeWhile::new(self, predicate) } /// Creates an iterator that both yields elements based on a predicate and maps. /// /// `map_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and yield elements /// while it returns [`Some(_)`][`Some`]. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(iter_map_while)] /// let a = [-1i32, 4, 0, 1]; /// /// let mut iter = a.iter().map_while(|x| 16i32.checked_div(*x)); /// /// assert_eq!(iter.next(), Some(-16)); /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// ``` /// /// Here's the same example, but with [`take_while`] and [`map`]: /// /// [`take_while`]: Iterator::take_while /// [`map`]: Iterator::map /// /// ``` /// let a = [-1i32, 4, 0, 1]; /// /// let mut iter = a.iter() /// .map(|x| 16i32.checked_div(*x)) /// .take_while(|x| x.is_some()) /// .map(|x| x.unwrap()); /// /// assert_eq!(iter.next(), Some(-16)); /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial [`None`]: /// /// ``` /// #![feature(iter_map_while)] /// use std::convert::TryFrom; /// /// let a = [0, 1, 2, -3, 4, 5, -6]; /// /// let iter = a.iter().map_while(|x| u32::try_from(*x).ok()); /// let vec = iter.collect::<Vec<_>>(); /// /// // We have more elements which could fit in u32 (4, 5), but `map_while` returned `None` for `-3` /// // (as the `predicate` returned `None`) and `collect` stops at the first `None` encountered. /// assert_eq!(vec, vec![0, 1, 2]); /// ``` /// /// Because `map_while()` needs to look at the value in order to see if it /// should be included or not, consuming iterators will see that it is /// removed: /// /// ``` /// #![feature(iter_map_while)] /// use std::convert::TryFrom; /// /// let a = [1, 2, -3, 4]; /// let mut iter = a.iter(); /// /// let result: Vec<u32> = iter.by_ref() /// .map_while(|n| u32::try_from(*n).ok()) /// .collect(); /// /// assert_eq!(result, &[1, 2]); /// /// let result: Vec<i32> = iter.cloned().collect(); /// /// assert_eq!(result, &[4]); /// ``` /// /// The `-3` is no longer there, because it was consumed in order to see if /// the iteration should stop, but wasn't placed back into the iterator. /// /// Note that unlike [`take_while`] this iterator is **not** fused. /// It is also not specified what this iterator returns after the first` None` is returned. /// If you need fused iterator, use [`fuse`]. /// /// [`fuse`]: Iterator::fuse #[inline] #[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")] fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P> where Self: Sized, P: FnMut(Self::Item) -> Option<B>, { MapWhile::new(self, predicate) } /// Creates an iterator that skips the first `n` elements. /// /// After they have been consumed, the rest of the elements are yielded. /// Rather than overriding this method directly, instead override the `nth` method. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().skip(2); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip(self, n: usize) -> Skip<Self> where Self: Sized, { Skip::new(self, n) } /// Creates an iterator that yields its first `n` elements. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().take(2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// `take()` is often used with an infinite iterator, to make it finite: /// /// ``` /// let mut iter = (0..).take(3); /// /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// ``` /// /// If less than `n` elements are available, /// `take` will limit itself to the size of the underlying iterator: /// /// ``` /// let v = vec![1, 2]; /// let mut iter = v.into_iter().take(5); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take(self, n: usize) -> Take<Self> where Self: Sized, { Take::new(self, n) } /// An iterator adaptor similar to [`fold`] that holds internal state and /// produces a new iterator. /// /// [`fold`]: Iterator::fold /// /// `scan()` takes two arguments: an initial value which seeds the internal /// state, and a closure with two arguments, the first being a mutable /// reference to the internal state and the second an iterator element. /// The closure can assign to the internal state to share state between /// iterations. /// /// On iteration, the closure will be applied to each element of the /// iterator and the return value from the closure, an [`Option`], is /// yielded by the iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().scan(1, |state, &x| { /// // each iteration, we'll multiply the state by the element /// *state = *state * x; /// /// // then, we'll yield the negation of the state /// Some(-*state) /// }); /// /// assert_eq!(iter.next(), Some(-1)); /// assert_eq!(iter.next(), Some(-2)); /// assert_eq!(iter.next(), Some(-6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F> where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option<B>, { Scan::new(self, initial_state, f) } /// Creates an iterator that works like map, but flattens nested structure. /// /// The [`map`] adapter is very useful, but only when the closure /// argument produces values. If it produces an iterator instead, there's /// an extra layer of indirection. `flat_map()` will remove this extra layer /// on its own. /// /// You can think of `flat_map(f)` as the semantic equivalent /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`. /// /// Another way of thinking about `flat_map()`: [`map`]'s closure returns /// one item for each element, and `flat_map()`'s closure returns an /// iterator for each element. /// /// [`map`]: Iterator::map /// [`flatten`]: Iterator::flatten /// /// # Examples /// /// Basic usage: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F> where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, { FlatMap::new(self, f) } /// Creates an iterator that flattens nested structure. /// /// This is useful when you have an iterator of iterators or an iterator of /// things that can be turned into iterators and you want to remove one /// level of indirection. /// /// # Examples /// /// Basic usage: /// /// ``` /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]]; /// let flattened = data.into_iter().flatten().collect::<Vec<u8>>(); /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]); /// ``` /// /// Mapping and then flattening: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .map(|s| s.chars()) /// .flatten() /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// You can also rewrite this in terms of [`flat_map()`], which is preferable /// in this case since it conveys intent more clearly: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// Flattening once only removes one level of nesting: /// /// ``` /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]; /// /// let d2 = d3.iter().flatten().collect::<Vec<_>>(); /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]); /// /// let d1 = d3.iter().flatten().flatten().collect::<Vec<_>>(); /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]); /// ``` /// /// Here we see that `flatten()` does not perform a "deep" flatten. /// Instead, only one level of nesting is removed. That is, if you /// `flatten()` a three-dimensional array the result will be /// two-dimensional and not one-dimensional. To get a one-dimensional /// structure, you have to `flatten()` again. /// /// [`flat_map()`]: Iterator::flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] fn flatten(self) -> Flatten<Self> where Self: Sized, Self::Item: IntoIterator, { Flatten::new(self) } /// Creates an iterator which ends after the first [`None`]. /// /// After an iterator returns [`None`], future calls may or may not yield /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a /// [`None`] is given, it will always return [`None`] forever. /// /// [`Some(T)`]: Some /// /// # Examples /// /// Basic usage: /// /// ``` /// // an iterator which alternates between Some and None /// struct Alternate { /// state: i32, /// } /// /// impl Iterator for Alternate { /// type Item = i32; /// /// fn next(&mut self) -> Option<i32> { /// let val = self.state; /// self.state = self.state + 1; /// /// // if it's even, Some(i32), else None /// if val % 2 == 0 { /// Some(val) /// } else { /// None /// } /// } /// } /// /// let mut iter = Alternate { state: 0 }; /// /// // we can see our iterator going back and forth /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// /// // however, once we fuse it... /// let mut iter = iter.fuse(); /// /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// /// // it will always return `None` after the first time. /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fuse(self) -> Fuse<Self> where Self: Sized, { Fuse::new(self) } /// Does something with each element of an iterator, passing the value on. /// /// When using iterators, you'll often chain several of them together. /// While working on such code, you might want to check out what's /// happening at various parts in the pipeline. To do that, insert /// a call to `inspect()`. /// /// It's more common for `inspect()` to be used as a debugging tool than to /// exist in your final code, but applications may find it useful in certain /// situations when errors need to be logged before being discarded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 4, 2, 3]; /// /// // this iterator sequence is complex. /// let sum = a.iter() /// .cloned() /// .filter(|x| x % 2 == 0) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// /// // let's add some inspect() calls to investigate what's happening /// let sum = a.iter() /// .cloned() /// .inspect(|x| println!("about to filter: {}", x)) /// .filter(|x| x % 2 == 0) /// .inspect(|x| println!("made it through filter: {}", x)) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// ``` /// /// This will print: /// /// ```text /// 6 /// about to filter: 1 /// about to filter: 4 /// made it through filter: 4 /// about to filter: 2 /// made it through filter: 2 /// about to filter: 3 /// 6 /// ``` /// /// Logging errors before discarding them: /// /// ``` /// let lines = ["1", "2", "a"]; /// /// let sum: i32 = lines /// .iter() /// .map(|line| line.parse::<i32>()) /// .inspect(|num| { /// if let Err(ref e) = *num { /// println!("Parsing error: {}", e); /// } /// }) /// .filter_map(Result::ok) /// .sum(); /// /// println!("Sum: {}", sum); /// ``` /// /// This will print: /// /// ```text /// Parsing error: invalid digit found in string /// Sum: 3 /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn inspect<F>(self, f: F) -> Inspect<Self, F> where Self: Sized, F: FnMut(&Self::Item), { Inspect::new(self, f) } /// Borrows an iterator, rather than consuming it. /// /// This is useful to allow applying iterator adaptors while still /// retaining ownership of the original iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let iter = a.iter(); /// /// let sum: i32 = iter.take(5).fold(0, |acc, i| acc + i); /// /// assert_eq!(sum, 6); /// /// // if we try to use iter again, it won't work. The following line /// // gives "error: use of moved value: `iter` /// // assert_eq!(iter.next(), None); /// /// // let's try that again /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// // instead, we add in a .by_ref() /// let sum: i32 = iter.by_ref().take(2).fold(0, |acc, i| acc + i); /// /// assert_eq!(sum, 3); /// /// // now this is just fine: /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self where Self: Sized, { self } /// Transforms an iterator into a collection. /// /// `collect()` can take anything iterable, and turn it into a relevant /// collection. This is one of the more powerful methods in the standard /// library, used in a variety of contexts. /// /// The most basic pattern in which `collect()` is used is to turn one /// collection into another. You take a collection, call [`iter`] on it, /// do a bunch of transformations, and then `collect()` at the end. /// /// `collect()` can also create instances of types that are not typical /// collections. For example, a [`String`] can be built from [`char`]s, /// and an iterator of [`Result<T, E>`][`Result`] items can be collected /// into `Result<Collection<T>, E>`. See the examples below for more. /// /// Because `collect()` is so general, it can cause problems with type /// inference. As such, `collect()` is one of the few times you'll see /// the syntax affectionately known as the 'turbofish': `::<>`. This /// helps the inference algorithm understand specifically which collection /// you're trying to collect into. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled: Vec<i32> = a.iter() /// .map(|&x| x * 2) /// .collect(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because /// we could collect into, for example, a [`VecDeque<T>`] instead: /// /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html /// /// ``` /// use std::collections::VecDeque; /// /// let a = [1, 2, 3]; /// /// let doubled: VecDeque<i32> = a.iter().map(|&x| x * 2).collect(); /// /// assert_eq!(2, doubled[0]); /// assert_eq!(4, doubled[1]); /// assert_eq!(6, doubled[2]); /// ``` /// /// Using the 'turbofish' instead of annotating `doubled`: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<i32>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Because `collect()` only cares about what you're collecting into, you can /// still use a partial type hint, `_`, with the turbofish: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<_>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Using `collect()` to make a [`String`]: /// /// ``` /// let chars = ['g', 'd', 'k', 'k', 'n']; /// /// let hello: String = chars.iter() /// .map(|&x| x as u8) /// .map(|x| (x + 1) as char) /// .collect(); /// /// assert_eq!("hello", hello); /// ``` /// /// If you have a list of [`Result<T, E>`][`Result`]s, you can use `collect()` to /// see if any of them failed: /// /// ``` /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the first error /// assert_eq!(Err("nope"), result); /// /// let results = [Ok(1), Ok(3)]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the list of answers /// assert_eq!(Ok(vec![1, 3]), result); /// ``` /// /// [`iter`]: Iterator::next /// [`String`]: ../../std/string/struct.String.html /// [`char`]: type@char #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized, { FromIterator::from_iter(self) } /// Consumes an iterator, creating two collections from it. /// /// The predicate passed to `partition()` can return `true`, or `false`. /// `partition()` returns a pair, all of the elements for which it returned /// `true`, and all of the elements for which it returned `false`. /// /// See also [`is_partitioned()`] and [`partition_in_place()`]. /// /// [`is_partitioned()`]: Iterator::is_partitioned /// [`partition_in_place()`]: Iterator::partition_in_place /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let (even, odd): (Vec<i32>, Vec<i32>) = a /// .iter() /// .partition(|&n| n % 2 == 0); /// /// assert_eq!(even, vec![2]); /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn partition<B, F>(self, f: F) -> (B, B) where Self: Sized, B: Default + Extend<Self::Item>, F: FnMut(&Self::Item) -> bool, { #[inline] fn extend<'a, T, B: Extend<T>>( mut f: impl FnMut(&T) -> bool + 'a, left: &'a mut B, right: &'a mut B, ) -> impl FnMut((), T) + 'a { move |(), x| { if f(&x) { left.extend_one(x); } else { right.extend_one(x); } } } let mut left: B = Default::default(); let mut right: B = Default::default(); self.fold((), extend(f, &mut left, &mut right)); (left, right) } /// Reorders the elements of this iterator *in-place* according to the given predicate, /// such that all those that return `true` precede all those that return `false`. /// Returns the number of `true` elements found. /// /// The relative order of partitioned items is not maintained. /// /// See also [`is_partitioned()`] and [`partition()`]. /// /// [`is_partitioned()`]: Iterator::is_partitioned /// [`partition()`]: Iterator::partition /// /// # Examples /// /// ``` /// #![feature(iter_partition_in_place)] /// /// let mut a = [1, 2, 3, 4, 5, 6, 7]; /// /// // Partition in-place between evens and odds /// let i = a.iter_mut().partition_in_place(|&n| n % 2 == 0); /// /// assert_eq!(i, 3); /// assert!(a[..i].iter().all(|&n| n % 2 == 0)); // evens /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds /// ``` #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")] fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize where Self: Sized + DoubleEndedIterator<Item = &'a mut T>, P: FnMut(&T) -> bool, { // FIXME: should we worry about the count overflowing? The only way to have more than // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition... // These closure "factory" functions exist to avoid genericity in `Self`. #[inline] fn is_false<'a, T>( predicate: &'a mut impl FnMut(&T) -> bool, true_count: &'a mut usize, ) -> impl FnMut(&&mut T) -> bool + 'a { move |x| { let p = predicate(&**x); *true_count += p as usize; !p } } #[inline] fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ { move |x| predicate(&**x) } // Repeatedly find the first `false` and swap it with the last `true`. let mut true_count = 0; while let Some(head) = self.find(is_false(predicate, &mut true_count)) { if let Some(tail) = self.rfind(is_true(predicate)) { crate::mem::swap(head, tail); true_count += 1; } else { break; } } true_count } /// Checks if the elements of this iterator are partitioned according to the given predicate, /// such that all those that return `true` precede all those that return `false`. /// /// See also [`partition()`] and [`partition_in_place()`]. /// /// [`partition()`]: Iterator::partition /// [`partition_in_place()`]: Iterator::partition_in_place /// /// # Examples /// /// ``` /// #![feature(iter_is_partitioned)] /// /// assert!("Iterator".chars().is_partitioned(char::is_uppercase)); /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase)); /// ``` #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")] fn is_partitioned<P>(mut self, mut predicate: P) -> bool where Self: Sized, P: FnMut(Self::Item) -> bool, { // Either all items test `true`, or the first clause stops at `false` // and we check that there are no more `true` items after that. self.all(&mut predicate) || !self.any(predicate) } /// An iterator method that applies a function as long as it returns /// successfully, producing a single, final value. /// /// `try_fold()` takes two arguments: an initial value, and a closure with /// two arguments: an 'accumulator', and an element. The closure either /// returns successfully, with the value that the accumulator should have /// for the next iteration, or it returns failure, with an error value that /// is propagated back to the caller immediately (short-circuiting). /// /// The initial value is the value the accumulator will have on the first /// call. If applying the closure succeeded against every element of the /// iterator, `try_fold()` returns the final accumulator as success. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// # Note to Implementors /// /// Several of the other (forward) methods have default implementations in /// terms of this one, so try to implement this explicitly if it can /// do something better than the default `for` loop implementation. /// /// In particular, try to have this call `try_fold()` on the internal parts /// from which this iterator is composed. If multiple calls are needed, /// the `?` operator may be convenient for chaining the accumulator value /// along, but beware any invariants that need to be upheld before those /// early returns. This is a `&mut self` method, so iteration needs to be /// resumable after hitting an error here. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the checked sum of all of the elements of the array /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x)); /// /// assert_eq!(sum, Some(6)); /// ``` /// /// Short-circuiting: /// /// ``` /// let a = [10, 20, 30, 100, 40, 50]; /// let mut it = a.iter(); /// /// // This sum overflows when adding the 100 element /// let sum = it.try_fold(0i8, |acc, &x| acc.checked_add(x)); /// assert_eq!(sum, None); /// /// // Because it short-circuited, the remaining elements are still /// // available through the iterator. /// assert_eq!(it.len(), 2); /// assert_eq!(it.next(), Some(&40)); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok = B>, { let mut accum = init; while let Some(x) = self.next() { accum = f(accum, x)?; } Try::from_ok(accum) } /// An iterator method that applies a fallible function to each item in the /// iterator, stopping at the first error and returning that error. /// /// This can also be thought of as the fallible form of [`for_each()`] /// or as the stateless version of [`try_fold()`]. /// /// [`for_each()`]: Iterator::for_each /// [`try_fold()`]: Iterator::try_fold /// /// # Examples /// /// ``` /// use std::fs::rename; /// use std::io::{stdout, Write}; /// use std::path::Path; /// /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"]; /// /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{}", x)); /// assert!(res.is_ok()); /// /// let mut it = data.iter().cloned(); /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old"))); /// assert!(res.is_err()); /// // It short-circuited, so the remaining items are still in the iterator: /// assert_eq!(it.next(), Some("stale_bread.json")); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_for_each<F, R>(&mut self, f: F) -> R where Self: Sized, F: FnMut(Self::Item) -> R, R: Try<Ok = ()>, { #[inline] fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
self.try_fold((), call(f)) } /// An iterator method that applies a function, producing a single, final value. /// /// `fold()` takes two arguments: an initial value, and a closure with two /// arguments: an 'accumulator', and an element. The closure returns the value that /// the accumulator should have for the next iteration. /// /// The initial value is the value the accumulator will have on the first /// call. /// /// After applying this closure to every element of the iterator, `fold()` /// returns the accumulator. /// /// This operation is sometimes called 'reduce' or 'inject'. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// Note: `fold()`, and similar methods that traverse the entire iterator, /// may not terminate for infinite iterators, even on traits for which a /// result is determinable in finite time. /// /// # Note to Implementors /// /// Several of the other (forward) methods have default implementations in /// terms of this one, so try to implement this explicitly if it can /// do something better than the default `for` loop implementation. /// /// In particular, try to have this call `fold()` on the internal parts /// from which this iterator is composed. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the sum of all of the elements of the array /// let sum = a.iter().fold(0, |acc, x| acc + x); /// /// assert_eq!(sum, 6); /// ``` /// /// Let's walk through each step of the iteration here: /// /// | element | acc | x | result | /// |---------|-----|---|--------| /// | | 0 | | | /// | 1 | 0 | 1 | 1 | /// | 2 | 1 | 2 | 3 | /// | 3 | 3 | 3 | 6 | /// /// And so, our final result, `6`. /// /// It's common for people who haven't used iterators a lot to /// use a `for` loop with a list of things to build up a result. Those /// can be turned into `fold()`s: /// /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for /// /// ``` /// let numbers = [1, 2, 3, 4, 5]; /// /// let mut result = 0; /// /// // for loop: /// for i in &numbers { /// result = result + i; /// } /// /// // fold: /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x); /// /// // they're the same /// assert_eq!(result, result2); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fold<B, F>(mut self, init: B, mut f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { let mut accum = init; while let Some(x) = self.next() { accum = f(accum, x); } accum } /// The same as [`fold()`], but uses the first element in the /// iterator as the initial value, folding every subsequent element into it. /// If the iterator is empty, return [`None`]; otherwise, return the result /// of the fold. /// /// [`fold()`]: Iterator::fold /// /// # Example /// /// Find the maximum value: /// /// ``` /// #![feature(iterator_fold_self)] /// /// fn find_max<I>(iter: I) -> Option<I::Item> /// where I: Iterator, /// I::Item: Ord, /// { /// iter.fold_first(|a, b| { /// if a >= b { a } else { b } /// }) /// } /// let a = [10, 20, 5, -23, 0]; /// let b: [u32; 0] = []; /// /// assert_eq!(find_max(a.iter()), Some(&20)); /// assert_eq!(find_max(b.iter()), None); /// ``` #[inline] #[unstable(feature = "iterator_fold_self", issue = "68125")] fn fold_first<F>(mut self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(Self::Item, Self::Item) -> Self::Item, { let first = self.next()?; Some(self.fold(first, f)) } /// Tests if every element of the iterator matches a predicate. /// /// `all()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if they all return /// `true`, then so does `all()`. If any of them return `false`, it /// returns `false`. /// /// `all()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `false`, given that no matter what else happens, /// the result will also be `false`. /// /// An empty iterator returns `true`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().all(|&x| x > 0)); /// /// assert!(!a.iter().all(|&x| x > 2)); /// ``` /// /// Stopping at the first `false`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(!iter.all(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn all<F>(&mut self, f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool, { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<(), ()> { move |(), x| { if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } } } self.try_fold((), check(f)) == ControlFlow::CONTINUE } /// Tests if any element of the iterator matches a predicate. /// /// `any()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then so does `any()`. If they all return `false`, it /// returns `false`. /// /// `any()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `true`, given that no matter what else happens, /// the result will also be `true`. /// /// An empty iterator returns `false`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().any(|&x| x > 0)); /// /// assert!(!a.iter().any(|&x| x > 5)); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(iter.any(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&2)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn any<F>(&mut self, f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool, { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<(), ()> { move |(), x| { if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } } } self.try_fold((), check(f)) == ControlFlow::BREAK } /// Searches for an element of an iterator that satisfies a predicate. /// /// `find()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then `find()` returns [`Some(element)`]. If they all return /// `false`, it returns [`None`]. /// /// `find()` is short-circuiting; in other words, it will stop processing /// as soon as the closure returns `true`. /// /// Because `find()` takes a reference, and many iterators iterate over /// references, this leads to a possibly confusing situation where the /// argument is a double reference. You can see this effect in the /// examples below, with `&&x`. /// /// [`Some(element)`]: Some /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2)); /// /// assert_eq!(a.iter().find(|&&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.find(|&&x| x == 2), Some(&2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` /// /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn find<P>(&mut self, predicate: P) -> Option<Self::Item> where Self: Sized, P: FnMut(&Self::Item) -> bool, { #[inline] fn check<T>( mut predicate: impl FnMut(&T) -> bool, ) -> impl FnMut((), T) -> ControlFlow<(), T> { move |(), x| { if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } } } self.try_fold((), check(predicate)).break_value() } /// Applies function to the elements of iterator and returns /// the first non-none result. /// /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`. /// /// # Examples /// /// ``` /// let a = ["lol", "NaN", "2", "5"]; /// /// let first_number = a.iter().find_map(|s| s.parse().ok()); /// /// assert_eq!(first_number, Some(2)); /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] fn find_map<B, F>(&mut self, f: F) -> Option<B> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { #[inline] fn check<T, B>( mut f: impl FnMut(T) -> Option<B>, ) -> impl FnMut((), T) -> ControlFlow<(), B> { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), None => ControlFlow::CONTINUE, } } self.try_fold((), check(f)).break_value() } /// Applies function to the elements of iterator and returns /// the first true result or the first error. /// /// # Examples /// /// ``` /// #![feature(try_find)] /// /// let a = ["1", "2", "lol", "NaN", "5"]; /// /// let is_my_num = |s: &str, search: i32| -> Result<bool, std::num::ParseIntError> { /// Ok(s.parse::<i32>()? == search) /// }; /// /// let result = a.iter().try_find(|&&s| is_my_num(s, 2)); /// assert_eq!(result, Ok(Some(&"2"))); /// /// let result = a.iter().try_find(|&&s| is_my_num(s, 5)); /// assert!(result.is_err()); /// ``` #[inline] #[unstable(feature = "try_find", reason = "new API", issue = "63178")] fn try_find<F, R>(&mut self, f: F) -> Result<Option<Self::Item>, R::Error> where Self: Sized, F: FnMut(&Self::Item) -> R, R: Try<Ok = bool>, { #[inline] fn check<F, T, R>(mut f: F) -> impl FnMut((), T) -> ControlFlow<(), Result<T, R::Error>> where F: FnMut(&T) -> R, R: Try<Ok = bool>, { move |(), x| match f(&x).into_result() { Ok(false) => ControlFlow::CONTINUE, Ok(true) => ControlFlow::Break(Ok(x)), Err(x) => ControlFlow::Break(Err(x)), } } self.try_fold((), check(f)).break_value().transpose() } /// Searches for an element in an iterator, returning its index. /// /// `position()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if one of them /// returns `true`, then `position()` returns [`Some(index)`]. If all of /// them return `false`, it returns [`None`]. /// /// `position()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so if there are more /// than [`usize::MAX`] non-matching elements, it either produces the wrong /// result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than `usize::MAX` /// non-matching elements. /// /// [`Some(index)`]: Some /// [`usize::MAX`]: crate::usize::MAX /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().position(|&x| x == 2), Some(1)); /// /// assert_eq!(a.iter().position(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3, 4]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.position(|&x| x >= 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// /// // The returned index depends on iterator state /// assert_eq!(iter.position(|&x| x == 4), Some(0)); /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn position<P>(&mut self, predicate: P) -> Option<usize> where Self: Sized, P: FnMut(Self::Item) -> bool, { #[inline] fn check<T>( mut predicate: impl FnMut(T) -> bool, ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> { // The addition might panic on overflow move |i, x| { if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(Add::add(i, 1)) } } } self.try_fold(0, check(predicate)).break_value() } /// Searches for an element in an iterator from the right, returning its /// index. /// /// `rposition()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, starting from the end, /// and if one of them returns `true`, then `rposition()` returns /// [`Some(index)`]. If all of them return `false`, it returns [`None`]. /// /// `rposition()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// [`Some(index)`]: Some /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2)); /// /// assert_eq!(a.iter().rposition(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&1)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rposition<P>(&mut self, predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool, Self: Sized + ExactSizeIterator + DoubleEndedIterator, { // No need for an overflow check here, because `ExactSizeIterator` // implies that the number of elements fits into a `usize`. #[inline] fn check<T>( mut predicate: impl FnMut(T) -> bool, ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> { move |i, x| { let i = i - 1; if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i) } } } let n = self.len(); self.try_rfold(n, check(predicate)).break_value() } /// Returns the maximum element of an iterator. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().max(), Some(&3)); /// assert_eq!(b.iter().max(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn max(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord, { self.max_by(Ord::cmp) } /// Returns the minimum element of an iterator. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().min(), Some(&1)); /// assert_eq!(b.iter().min(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn min(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord, { self.min_by(Ord::cmp) } /// Returns the element that gives the maximum value from the /// specified function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { #[inline] fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) { move |x| (f(&x), x) } #[inline] fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering { x_p.cmp(y_p) } let (_, x) = self.map(key(f)).max_by(compare)?; Some(x) } /// Returns the element that gives the maximum value with respect to the /// specified comparison function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5); /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] fn max_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { #[inline] fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T { move |x, y| cmp::max_by(x, y, &mut compare) } self.fold_first(fold(compare)) } /// Returns the element that gives the minimum value from the /// specified function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { #[inline] fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) { move |x| (f(&x), x) } #[inline] fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering { x_p.cmp(y_p) } let (_, x) = self.map(key(f)).min_by(compare)?; Some(x) } /// Returns the element that gives the minimum value with respect to the /// specified comparison function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] fn min_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { #[inline] fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T { move |x, y| cmp::min_by(x, y, &mut compare) } self.fold_first(fold(compare)) } /// Reverses an iterator's direction. /// /// Usually, iterators iterate from left to right. After using `rev()`, /// an iterator will instead iterate from right to left. /// /// This is only possible if the iterator has an end, so `rev()` only /// works on [`DoubleEndedIterator`]s. /// /// # Examples /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().rev(); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator, { Rev::new(self) } /// Converts an iterator of pairs into a pair of containers. /// /// `unzip()` consumes an entire iterator of pairs, producing two /// collections: one from the left elements of the pairs, and one /// from the right elements. /// /// This function is, in some sense, the opposite of [`zip`]. /// /// [`zip`]: Iterator::zip /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [(1, 2), (3, 4)]; /// /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip(); /// /// assert_eq!(left, [1, 3]); /// assert_eq!(right, [2, 4]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where FromA: Default + Extend<A>, FromB: Default + Extend<B>, Self: Sized + Iterator<Item = (A, B)>, { fn extend<'a, A, B>( ts: &'a mut impl Extend<A>, us: &'a mut impl Extend<B>, ) -> impl FnMut((), (A, B)) + 'a { move |(), (t, u)| { ts.extend_one(t); us.extend_one(u); } } let mut ts: FromA = Default::default(); let mut us: FromB = Default::default(); let (lower_bound, _) = self.size_hint(); if lower_bound > 0 { ts.extend_reserve(lower_bound); us.extend_reserve(lower_bound); } self.fold((), extend(&mut ts, &mut us)); (ts, us) } /// Creates an iterator which copies all of its elements. /// /// This is useful when you have an iterator over `&T`, but you need an /// iterator over `T`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let v_copied: Vec<_> = a.iter().copied().collect(); /// /// // copied is the same as .map(|&x| x) /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); /// /// assert_eq!(v_copied, vec![1, 2, 3]); /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "iter_copied", since = "1.36.0")] fn copied<'a, T: 'a>(self) -> Copied<Self> where Self: Sized + Iterator<Item = &'a T>, T: Copy, { Copied::new(self) } /// Creates an iterator which [`clone`]s all of its elements. /// /// This is useful when you have an iterator over `&T`, but you need an /// iterator over `T`. /// /// [`clone`]: Clone::clone /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let v_cloned: Vec<_> = a.iter().cloned().collect(); /// /// // cloned is the same as .map(|&x| x), for integers /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); /// /// assert_eq!(v_cloned, vec![1, 2, 3]); /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn cloned<'a, T: 'a>(self) -> Cloned<Self> where Self: Sized + Iterator<Item = &'a T>, T: Clone, { Cloned::new(self) } /// Repeats an iterator endlessly. /// /// Instead of stopping at [`None`], the iterator will instead start again, /// from the beginning. After iterating again, it will start at the /// beginning again. And again. And again. Forever. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut it = a.iter().cycle(); /// /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] fn cycle(self) -> Cycle<Self> where Self: Sized + Clone, { Cycle::new(self) } /// Sums the elements of an iterator. /// /// Takes each element, adds them together, and returns the result. /// /// An empty iterator returns the zero value of the type. /// /// # Panics /// /// When calling `sum()` and a primitive integer type is being returned, this /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let sum: i32 = a.iter().sum(); /// /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn sum<S>(self) -> S where Self: Sized, S: Sum<Self::Item>, { Sum::sum(self) } /// Iterates over the entire iterator, multiplying all the elements /// /// An empty iterator returns the one value of the type. /// /// # Panics /// /// When calling `product()` and a primitive integer type is being returned, /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// ``` /// fn factorial(n: u32) -> u32 { /// (1..=n).product() /// } /// assert_eq!(factorial(0), 1); /// assert_eq!(factorial(1), 1); /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn product<P>(self) -> P where Self: Sized, P: Product<Self::Item>, { Product::product(self) } /// Lexicographically compares the elements of this [`Iterator`] with those /// of another. /// /// # Examples /// /// ``` /// use std::cmp::Ordering; /// /// assert_eq!([1].iter().cmp([1].iter()), Ordering::Equal); /// assert_eq!([1].iter().cmp([1, 2].iter()), Ordering::Less); /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn cmp<I>(self, other: I) -> Ordering where I: IntoIterator<Item = Self::Item>, Self::Item: Ord, Self: Sized, { self.cmp_by(other, |x, y| x.cmp(&y)) } /// Lexicographically compares the elements of this [`Iterator`] with those /// of another with respect to the specified comparison function. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(iter_order_by)] /// /// use std::cmp::Ordering; /// /// let xs = [1, 2, 3, 4]; /// let ys = [1, 4, 9, 16]; /// /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| x.cmp(&y)), Ordering::Less); /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (x * x).cmp(&y)), Ordering::Equal); /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] fn cmp_by<I, F>(mut self, other: I, mut cmp: F) -> Ordering where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> Ordering, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { if other.next().is_none() { return Ordering::Equal; } else { return Ordering::Less; } } Some(val) => val, }; let y = match other.next() { None => return Ordering::Greater, Some(val) => val, }; match cmp(x, y) { Ordering::Equal => (), non_eq => return non_eq, } } } /// Lexicographically compares the elements of this [`Iterator`] with those /// of another. /// /// # Examples /// /// ``` /// use std::cmp::Ordering; /// /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal)); /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less)); /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater)); /// /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { self.partial_cmp_by(other, |x, y| x.partial_cmp(&y)) } /// Lexicographically compares the elements of this [`Iterator`] with those /// of another with respect to the specified comparison function. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(iter_order_by)] /// /// use std::cmp::Ordering; /// /// let xs = [1.0, 2.0, 3.0, 4.0]; /// let ys = [1.0, 4.0, 9.0, 16.0]; /// /// assert_eq!( /// xs.iter().partial_cmp_by(&ys, |&x, &y| x.partial_cmp(&y)), /// Some(Ordering::Less) /// ); /// assert_eq!( /// xs.iter().partial_cmp_by(&ys, |&x, &y| (x * x).partial_cmp(&y)), /// Some(Ordering::Equal) /// ); /// assert_eq!( /// xs.iter().partial_cmp_by(&ys, |&x, &y| (2.0 * x).partial_cmp(&y)), /// Some(Ordering::Greater) /// ); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] fn partial_cmp_by<I, F>(mut self, other: I, mut partial_cmp: F) -> Option<Ordering> where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> Option<Ordering>, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { if other.next().is_none() { return Some(Ordering::Equal); } else { return Some(Ordering::Less); } } Some(val) => val, }; let y = match other.next() { None => return Some(Ordering::Greater), Some(val) => val, }; match partial_cmp(x, y) { Some(Ordering::Equal) => (), non_eq => return non_eq, } } } /// Determines if the elements of this [`Iterator`] are equal to those of /// another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().eq([1].iter()), true); /// assert_eq!([1].iter().eq([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn eq<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { self.eq_by(other, |x, y| x == y) } /// Determines if the elements of this [`Iterator`] are equal to those of /// another with respect to the specified equality function. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(iter_order_by)] /// /// let xs = [1, 2, 3, 4]; /// let ys = [1, 4, 9, 16]; /// /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y)); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] fn eq_by<I, F>(mut self, other: I, mut eq: F) -> bool where Self: Sized, I: IntoIterator, F: FnMut(Self::Item, I::Item) -> bool, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_none(), Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; if !eq(x, y) { return false; } } } /// Determines if the elements of this [`Iterator`] are unequal to those of /// another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().ne([1].iter()), false); /// assert_eq!([1].iter().ne([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn ne<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { !self.eq(other) } /// Determines if the elements of this [`Iterator`] are lexicographically /// less than those of another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().lt([1].iter()), false); /// assert_eq!([1].iter().lt([1, 2].iter()), true); /// assert_eq!([1, 2].iter().lt([1].iter()), false); /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn lt<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { self.partial_cmp(other) == Some(Ordering::Less) } /// Determines if the elements of this [`Iterator`] are lexicographically /// less or equal to those of another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().le([1].iter()), true); /// assert_eq!([1].iter().le([1, 2].iter()), true); /// assert_eq!([1, 2].iter().le([1].iter()), false); /// assert_eq!([1, 2].iter().le([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn le<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal)) } /// Determines if the elements of this [`Iterator`] are lexicographically /// greater than those of another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().gt([1].iter()), false); /// assert_eq!([1].iter().gt([1, 2].iter()), false); /// assert_eq!([1, 2].iter().gt([1].iter()), true); /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn gt<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { self.partial_cmp(other) == Some(Ordering::Greater) } /// Determines if the elements of this [`Iterator`] are lexicographically /// greater than or equal to those of another. /// /// # Examples /// /// ``` /// assert_eq!([1].iter().ge([1].iter()), true); /// assert_eq!([1].iter().ge([1, 2].iter()), false); /// assert_eq!([1, 2].iter().ge([1].iter()), true); /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] fn ge<I>(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal)) } /// Checks if the elements of this iterator are sorted. /// /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the /// iterator yields exactly zero or one element, `true` is returned. /// /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition /// implies that this function returns `false` if any two consecutive items are not /// comparable. /// /// # Examples /// /// ``` /// #![feature(is_sorted)] /// /// assert!([1, 2, 2, 9].iter().is_sorted()); /// assert!(![1, 3, 2, 4].iter().is_sorted()); /// assert!([0].iter().is_sorted()); /// assert!(std::iter::empty::<i32>().is_sorted()); /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted()); /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] fn is_sorted(self) -> bool where Self: Sized, Self::Item: PartialOrd, { self.is_sorted_by(PartialOrd::partial_cmp) } /// Checks if the elements of this iterator are sorted using the given comparator function. /// /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare` /// function to determine the ordering of two elements. Apart from that, it's equivalent to /// [`is_sorted`]; see its documentation for more information. /// /// # Examples /// /// ``` /// #![feature(is_sorted)] /// /// assert!([1, 2, 2, 9].iter().is_sorted_by(|a, b| a.partial_cmp(b))); /// assert!(![1, 3, 2, 4].iter().is_sorted_by(|a, b| a.partial_cmp(b))); /// assert!([0].iter().is_sorted_by(|a, b| a.partial_cmp(b))); /// assert!(std::iter::empty::<i32>().is_sorted_by(|a, b| a.partial_cmp(b))); /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted_by(|a, b| a.partial_cmp(b))); /// ``` /// /// [`is_sorted`]: Iterator::is_sorted #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] fn is_sorted_by<F>(mut self, mut compare: F) -> bool where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>, { let mut last = match self.next() { Some(e) => e, None => return true, }; while let Some(curr) = self.next() { if let Some(Ordering::Greater) | None = compare(&last, &curr) { return false; } last = curr; } true } /// Checks if the elements of this iterator are sorted using the given key extraction /// function. /// /// Instead of comparing the iterator's elements directly, this function compares the keys of /// the elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see /// its documentation for more information. /// /// [`is_sorted`]: Iterator::is_sorted /// /// # Examples /// /// ``` /// #![feature(is_sorted)] /// /// assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len())); /// assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs())); /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] fn is_sorted_by_key<F, K>(self, f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> K, K: PartialOrd, { self.map(f).is_sorted() } /// See [TrustedRandomAccess] #[inline] #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] unsafe fn get_unchecked(&mut self, _idx: usize) -> Self::Item where Self: TrustedRandomAccess, { unreachable!("Always specialized"); } } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; fn next(&mut self) -> Option<I::Item> { (**self).next() } fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() } fn nth(&mut self, n: usize) -> Option<Self::Item> { (**self).nth(n) } }
move |(), x| f(x) }
non_native_gates.rs
// Copyright (c) 2022 Espresso Systems (espressosys.com) // This file is part of the Jellyfish library. // You should have received a copy of the MIT License // along with the Jellyfish library. If not, see <https://mit-license.org/>. //! This module implements non-native circuits that are mainly //! useful for rescue hash function. use super::mod_arith::{FpElem, FpElemVar}; use crate::{ circuit::{Circuit, PlonkCircuit}, errors::{CircuitError, PlonkError}, }; use ark_ff::{BigInteger, FpParameters, PrimeField}; use ark_std::{format, vec::Vec}; impl<F: PrimeField> PlonkCircuit<F> { /// generate a non-native circuit for the statement x^11 = y
/// /// Input: /// - variable representation of x over a target field `T` whose order is /// less than F. /// - variable representation of x^11 over a same field /// /// Cost: 5 mod_mul + 2 equal gate pub fn non_native_power_11_gate<T: PrimeField>( &mut self, x: &FpElemVar<F>, x_to_11: &FpElemVar<F>, ) -> Result<(), PlonkError> { self.check_var_bound(x.components().0)?; self.check_var_bound(x.components().1)?; self.check_var_bound(x_to_11.components().0)?; self.check_var_bound(x_to_11.components().1)?; if T::size_in_bits() >= F::size_in_bits() { return Err(CircuitError::NotSupported(format!( "Target field size ({}) is greater than evaluation field size (P{})", T::size_in_bits(), F::size_in_bits() )) .into()); } // x^11 = y let y = self.non_native_power_11_gen::<T>(x)?; self.equal_gate(x_to_11.components().0, y.components().0)?; self.equal_gate(x_to_11.components().1, y.components().1) } /// generate a non-native circuit for the statement x^11 = y /// /// Input: variable representation of x over a target /// field `T` whose order is less than F. /// /// Output: variable representation of x^11 /// /// Cost: 5 mod_mul pub fn non_native_power_11_gen<T: PrimeField>( &mut self, x: &FpElemVar<F>, ) -> Result<FpElemVar<F>, PlonkError> { // // checks already done by the caller // if T::size_in_bits() >= F::size_in_bits() { // return Err(CircuitError::NotSupported(format!( // "Target field size ({}) is greater than evaluation field size (P{})", // T::size_in_bits(), // F::size_in_bits() // )) // .into()); // } // convert T::MODULUS into an element in F // Guaranteed without mod reduction since T::size_in_bits() < F::size_in_bits() let t_modulus = F::from_le_bytes_mod_order(T::Params::MODULUS.to_bytes_le().as_ref()); // convert t_modulus into FpElem let m = x.param_m(); let two_power_m = Some(x.two_power_m()); let p = FpElem::new(&t_modulus, m, two_power_m)?; // x^11 = y let x2 = self.mod_mul(x, x, &p)?; let x3 = self.mod_mul(&x2, x, &p)?; let x4 = self.mod_mul(&x2, &x2, &p)?; let x8 = self.mod_mul(&x4, &x4, &p)?; self.mod_mul(&x3, &x8, &p) } /// generate a non-native circuit for the statement x^5 = y /// /// Input: variable representation of x over a target /// field `T` whose order is less than F. /// /// Output: variable representation of x^5 /// /// Cost: 3 mod_mul pub fn non_native_power_5_gen<T: PrimeField>( &mut self, x: &FpElemVar<F>, ) -> Result<FpElemVar<F>, PlonkError> { // checks already done by the caller if T::size_in_bits() >= F::size_in_bits() { return Err(CircuitError::NotSupported(format!( "Target field size ({}) is greater than evaluation field size (P{})", T::size_in_bits(), F::size_in_bits() )) .into()); } // convert T::MODULUS into an element in F // Guaranteed without mod reduction since T::size_in_bits() < F::size_in_bits() let t_modulus = F::from_le_bytes_mod_order(T::Params::MODULUS.to_bytes_le().as_ref()); // convert t_modulus into FpElem let m = x.param_m(); let two_power_m = Some(x.two_power_m()); let p = FpElem::new(&t_modulus, m, two_power_m)?; // x^5 = y let x2 = self.mod_mul(x, x, &p)?; let x3 = self.mod_mul(&x2, x, &p)?; self.mod_mul(&x2, &x3, &p) } /// Input vector x and y, and a constant c, /// generate a non-native circuit for the statement /// var_output = inner_product(x, y) + c /// Input: variable representation of x, y, c over a target /// field `T` whose order is less than F. /// /// Cost: 4 mod_mul_constant + 1 mod_add_internal #[allow(clippy::many_single_char_names)] pub fn non_native_linear_gen<T: PrimeField>( &mut self, x: &[FpElemVar<F>], y: &[FpElem<F>], c: &FpElem<F>, ) -> Result<FpElemVar<F>, PlonkError> { let m = c.param_m(); let two_power_m = Some(c.two_power_m()); // check the correctness of parameters if T::size_in_bits() >= F::size_in_bits() { return Err(CircuitError::NotSupported(format!( "Target field size ({}) is greater than evaluation field size ({})", T::size_in_bits(), F::size_in_bits() )) .into()); } if x.len() != y.len() { return Err(CircuitError::ParameterError(format!( "inputs x any y has different length ({} vs {})", x.len(), y.len() )) .into()); } for e in x { if m != e.param_m() { return Err(CircuitError::ParameterError(format!( "inputs x any c has different m parameter ({} vs {})", e.param_m(), m )) .into()); } } for e in y { if m != e.param_m() { return Err(CircuitError::ParameterError(format!( "inputs y any c has different m parameter ({} vs {})", e.param_m(), m )) .into()); } } // convert T::MODULUS into an element in F // Guaranteed without mod reduction since T::size_in_bits() < F::size_in_bits() let t_modulus = F::from_le_bytes_mod_order(T::Params::MODULUS.to_bytes_le().as_ref()); // convert t_modulus into FpElem let p = FpElem::new(&t_modulus, m, two_power_m)?; // generate the linear statement // (\sum x[i] * y[i]) + c let xiyi: Vec<FpElemVar<F>> = x .iter() .zip(y) .map(|(xi, yi)| self.mod_mul_constant(xi, yi, &p)) .collect::<Result<Vec<FpElemVar<F>>, _>>()?; let sum_xiyi = self.mod_add_vec(xiyi.as_ref(), &p)?; self.mod_add_constant(&sum_xiyi, c, &p) } } #[cfg(test)] mod test { use super::*; use crate::circuit::{Circuit, Variable}; use ark_bls12_377::Fq as Fq377; use ark_ed_on_bls12_377::Fq as FqEd377; use ark_std::test_rng; const RANGE_BIT_LEN_FOR_TEST: usize = 8; #[test] fn test_non_native_power_11_gen() -> Result<(), PlonkError> { // use bls12-377 base field to prove rescue over jubjub377 base field test_non_native_power_11_gen_helper::<FqEd377, Fq377>() } fn test_non_native_power_11_gen_helper<T: PrimeField, F: PrimeField>() -> Result<(), PlonkError> { let mut circuit: PlonkCircuit<F> = PlonkCircuit::new_ultra_plonk(RANGE_BIT_LEN_FOR_TEST); let mut rng = test_rng(); let x_t = T::rand(&mut rng); let y_t = x_t.pow(&[11]); let x_p = F::from_le_bytes_mod_order(x_t.into_repr().to_bytes_le().as_ref()); let y_p = F::from_le_bytes_mod_order(y_t.into_repr().to_bytes_le().as_ref()); let m = (T::size_in_bits() / 2 / RANGE_BIT_LEN_FOR_TEST + 1) * RANGE_BIT_LEN_FOR_TEST; let x_var = circuit.create_variable(x_p)?; let y_var = circuit.create_variable(y_p)?; let x_split_vars = FpElemVar::new_unchecked(&mut circuit, x_var, m, None)?; let x11_split_vars = circuit.non_native_power_11_gen::<T>(&x_split_vars)?; let x11_var = x11_split_vars.convert_to_var(&mut circuit)?; // good path circuit.equal_gate(x11_var, y_var)?; assert!(circuit.check_circuit_satisfiability(&[]).is_ok()); // bad path: wrong witness should fail let witness = circuit.witness(y_var)?; *circuit.witness_mut(y_var) = F::rand(&mut rng); assert!(circuit.check_circuit_satisfiability(&[]).is_err()); *circuit.witness_mut(y_var) = witness; // bad path: wrong value should fail let y_wrong = F::rand(&mut rng); let y_wrong_var = circuit.create_variable(y_wrong)?; circuit.equal_gate(x11_var, y_wrong_var)?; assert!(circuit.check_circuit_satisfiability(&[]).is_err()); Ok(()) } #[test] fn test_non_native_power_5_gen() -> Result<(), PlonkError> { // use bls12-377 base field to prove rescue over jubjub377 base field test_non_native_power_5_gen_helper::<FqEd377, Fq377>() } fn test_non_native_power_5_gen_helper<T: PrimeField, F: PrimeField>() -> Result<(), PlonkError> { let mut circuit: PlonkCircuit<F> = PlonkCircuit::new_ultra_plonk(RANGE_BIT_LEN_FOR_TEST); let mut rng = test_rng(); let x_t = T::rand(&mut rng); let y_t = x_t.pow(&[5]); let x_p = F::from_le_bytes_mod_order(x_t.into_repr().to_bytes_le().as_ref()); let y_p = F::from_le_bytes_mod_order(y_t.into_repr().to_bytes_le().as_ref()); let m = (T::size_in_bits() / 2 / RANGE_BIT_LEN_FOR_TEST + 1) * RANGE_BIT_LEN_FOR_TEST; let x_var = circuit.create_variable(x_p)?; let y_var = circuit.create_variable(y_p)?; let x_split_vars = FpElemVar::new_unchecked(&mut circuit, x_var, m, None)?; let x5_split_vars = circuit.non_native_power_5_gen::<T>(&x_split_vars)?; let x5_var = x5_split_vars.convert_to_var(&mut circuit)?; // good path circuit.equal_gate(x5_var, y_var)?; assert!(circuit.check_circuit_satisfiability(&[]).is_ok()); // bad path: wrong witness should fail let witness = circuit.witness(y_var)?; *circuit.witness_mut(y_var) = F::rand(&mut rng); assert!(circuit.check_circuit_satisfiability(&[]).is_err()); *circuit.witness_mut(y_var) = witness; // bad path: wrong value should fail let y_wrong = F::rand(&mut rng); let y_wrong_var = circuit.create_variable(y_wrong)?; circuit.equal_gate(x5_var, y_wrong_var)?; assert!(circuit.check_circuit_satisfiability(&[]).is_err()); Ok(()) } #[test] fn test_non_native_power_11_gate() -> Result<(), PlonkError> { // use bls12-377 base field to prove rescue over bls scalar field test_non_native_power_11_gate_helper::<FqEd377, Fq377>() } fn test_non_native_power_11_gate_helper<T: PrimeField, F: PrimeField>() -> Result<(), PlonkError> { let mut circuit: PlonkCircuit<F> = PlonkCircuit::new_ultra_plonk(RANGE_BIT_LEN_FOR_TEST); let mut rng = test_rng(); let x_t = T::rand(&mut rng); let y_t = x_t.pow(&[11]); let x_p = F::from_le_bytes_mod_order(x_t.into_repr().to_bytes_le().as_ref()); let y_p = F::from_le_bytes_mod_order(y_t.into_repr().to_bytes_le().as_ref()); let m = (T::size_in_bits() / 2 / RANGE_BIT_LEN_FOR_TEST + 1) * RANGE_BIT_LEN_FOR_TEST; let x_var = circuit.create_variable(x_p)?; let y_var = circuit.create_variable(y_p)?; let x_split_vars = FpElemVar::new_unchecked(&mut circuit, x_var, m, None)?; let y_split_vars = FpElemVar::new_unchecked(&mut circuit, y_var, m, Some(x_split_vars.two_power_m()))?; circuit.non_native_power_11_gate::<T>(&x_split_vars, &y_split_vars)?; // good path assert!(circuit.check_circuit_satisfiability(&[]).is_ok()); // bad path: wrong witness should fail let witness = circuit.witness(y_var)?; *circuit.witness_mut(y_var) = F::rand(&mut rng); assert!(circuit.check_circuit_satisfiability(&[]).is_err()); *circuit.witness_mut(y_var) = witness; // bad path: wrong value should fail let y_wrong = F::rand(&mut rng); let y_wrong_var = circuit.create_variable(y_wrong)?; let y_wrong_split_vars = FpElemVar::new_unchecked( &mut circuit, y_wrong_var, m, Some(x_split_vars.two_power_m()), )?; circuit.non_native_power_11_gate::<T>(&x_split_vars, &y_wrong_split_vars)?; assert!(circuit.check_circuit_satisfiability(&[]).is_err()); Ok(()) } #[test] fn test_non_native_linear_gate() -> Result<(), PlonkError> { // use bls12-377 base field to prove rescue over jubjub377 base field test_non_native_linear_gate_helper::<FqEd377, Fq377>() } fn test_non_native_linear_gate_helper<T: PrimeField, F: PrimeField>() -> Result<(), PlonkError> { let mut circuit: PlonkCircuit<F> = PlonkCircuit::new_ultra_plonk(RANGE_BIT_LEN_FOR_TEST); let m = (T::size_in_bits() / 2 / RANGE_BIT_LEN_FOR_TEST + 1) * RANGE_BIT_LEN_FOR_TEST; let mut rng = test_rng(); let x_t: Vec<T> = (0..4).map(|_| T::rand(&mut rng)).collect(); let y_t: Vec<T> = (0..4).map(|_| T::rand(&mut rng)).collect(); let c_t = T::rand(&mut rng); let mut res_t = c_t.clone(); for (&xi, &yi) in x_t.iter().zip(y_t.iter()) { res_t += xi * yi; } let res_p = F::from_le_bytes_mod_order(res_t.into_repr().to_bytes_le().as_ref()); let x_p: Vec<F> = x_t .iter() .map(|x| F::from_le_bytes_mod_order(x.into_repr().to_bytes_le().as_ref())) .collect(); let y_p: Vec<F> = y_t .iter() .map(|y| F::from_le_bytes_mod_order(y.into_repr().to_bytes_le().as_ref())) .collect(); let c_p = F::from_le_bytes_mod_order(c_t.into_repr().to_bytes_le().as_ref()); let x_vars: Vec<Variable> = x_p .iter() .map(|x| circuit.create_variable(*x)) .collect::<Result<Vec<Variable>, _>>()?; let x_split_vars: Vec<FpElemVar<F>> = x_vars .iter() .map(|x| FpElemVar::new_unchecked(&mut circuit, *x, m, None)) .collect::<Result<Vec<FpElemVar<F>>, _>>()?; let y_split: Vec<FpElem<F>> = y_p .iter() .map(|y| FpElem::new(y, m, Some(x_split_vars[0].two_power_m()))) .collect::<Result<Vec<FpElem<F>>, _>>()?; let c_split = FpElem::new(&c_p, m, Some(x_split_vars[0].two_power_m()))?; // check the result is correct let res_split_var = circuit.non_native_linear_gen::<T>(&x_split_vars, &y_split, &c_split)?; let res_var = res_split_var.convert_to_var(&mut circuit)?; assert_eq!(circuit.witness(res_var)?, res_p); // good path: the circuit is satisfied let res_var2 = circuit.create_variable(res_p)?; circuit.equal_gate(res_var, res_var2)?; assert!(circuit.check_circuit_satisfiability(&[]).is_ok()); // bad path: wrong witness should fail let witness = circuit.witness(x_vars[0])?; *circuit.witness_mut(x_vars[0]) = F::rand(&mut rng); assert!(circuit.check_circuit_satisfiability(&[]).is_err()); *circuit.witness_mut(x_vars[0]) = witness; // bad path: wrong value should fail let res_var3 = F::rand(&mut rng); let res_var3 = circuit.create_variable(res_var3)?; circuit.equal_gate(res_var, res_var3)?; assert!(circuit.check_circuit_satisfiability(&[]).is_err()); Ok(()) } }
PositionedOverlay.d.ts
import React, { PureComponent } from 'react'; import { Rect } from '../../utilities/geometry'; import { PreferredPosition, PreferredAlignment } from './utilities/math'; declare type Positioning = 'above' | 'below'; interface OverlayDetails { left?: number; right?: number; desiredHeight: number; positioning: Positioning; measuring: boolean; activatorRect: Rect; } export interface PositionedOverlayProps { active: boolean; activator: HTMLElement; preferInputActivator?: boolean; preferredPosition?: PreferredPosition; preferredAlignment?: PreferredAlignment; fullWidth?: boolean; fixed?: boolean; preventInteraction?: boolean; classNames?: string; zIndexOverride?: number; render(overlayDetails: OverlayDetails): React.ReactNode; onScrollOut?(): void; } interface State { measuring: boolean; activatorRect: Rect; left?: number; right?: number; top: number; height: number; width: number | null; positioning: Positioning; zIndex: number | null; outsideScrollableContainer: boolean; lockPosition: boolean; } export declare class
extends PureComponent<PositionedOverlayProps, State> { state: State; private overlay; private scrollableContainer; private observer; constructor(props: PositionedOverlayProps); componentDidMount(): void; componentWillUnmount(): void; componentDidUpdate(): void; render(): JSX.Element; forceUpdatePosition(): void; private overlayDetails; private setOverlay; private handleMeasurement; } export {};
PositionedOverlay
test_versions.py
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os # try/except added for compatibility with python < 3.8 try: from unittest import mock from unittest.mock import AsyncMock except ImportError: import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.dialogflow_v2.services.versions import VersionsAsyncClient from google.cloud.dialogflow_v2.services.versions import VersionsClient from google.cloud.dialogflow_v2.services.versions import pagers from google.cloud.dialogflow_v2.services.versions import transports from google.cloud.dialogflow_v2.types import version from google.cloud.dialogflow_v2.types import version as gcd_version from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert VersionsClient._get_default_mtls_endpoint(None) is None assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint assert ( VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.VersionsGrpcTransport, "grpc"), (transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class,transport_name", [ (VersionsClient, "grpc"), (VersionsAsyncClient, "grpc_asyncio"), ], ) def test_versions_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json( "dummy/file/path.json", transport=transport_name ) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == ("dialogflow.googleapis.com:443") def test_versions_client_get_transport_class(): transport = VersionsClient.get_transport_class() available_transports = [ transports.VersionsGrpcTransport, ] assert transport in available_transports transport = VersionsClient.get_transport_class("grpc") assert transport == transports.VersionsGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(VersionsClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "true", ), (VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_versions_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient]) @mock.patch.object( VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient) ) @mock.patch.object( VersionsAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsAsyncClient), ) def test_versions_client_get_mtls_endpoint_and_cert_source(client_class): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source == mock_client_cert_source # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): mock_client_cert_source = mock.Mock() mock_api_endpoint = "foo" options = client_options.ClientOptions( client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint ) api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( options ) assert api_endpoint == mock_api_endpoint assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_ENDPOINT assert cert_source is None # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=mock_client_cert_source, ): ( api_endpoint, cert_source, ) = client_class.get_mtls_endpoint_and_cert_source() assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc"), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_versions_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions( scopes=["1", "2"], ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_versions_client_client_options_from_dict(): with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"}) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,grpc_helpers", [ (VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers), ( VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) def test_versions_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # test that the credentials from file are saved and used as the credentials. with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: creds = ga_credentials.AnonymousCredentials() file_creds = ga_credentials.AnonymousCredentials() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=file_creds, credentials_file=None, quota_project_id=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=None, default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "request_type", [ version.ListVersionsRequest, dict, ], ) def test_list_versions(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse( next_page_token="next_page_token_value", ) response = client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsPager) assert response.next_page_token == "next_page_token_value" def test_list_versions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: client.list_versions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() @pytest.mark.asyncio async def test_list_versions_async( transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.ListVersionsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListVersionsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_versions_async_from_dict(): await test_list_versions_async(request_type=dict) def test_list_versions_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = version.ListVersionsResponse() client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_list_versions_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.ListVersionsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) await client.list_versions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_list_versions_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val def test_list_versions_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_versions_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.ListVersionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.ListVersionsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_versions( parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_versions_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_versions( version.ListVersionsRequest(), parent="parent_value", ) def test_list_versions_pager(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_versions(request={}) assert pager._metadata == metadata results = list(pager) assert len(results) == 6 assert all(isinstance(i, version.Version) for i in results) def test_list_versions_pages(transport_name: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_versions), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = list(client.list_versions(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_versions_async_pager(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) async_pager = await client.list_versions( request={}, ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 assert all(isinstance(i, version.Version) for i in responses) @pytest.mark.asyncio async def test_list_versions_async_pages(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( version.ListVersionsResponse( versions=[ version.Version(), version.Version(), version.Version(), ], next_page_token="abc", ), version.ListVersionsResponse( versions=[], next_page_token="def", ), version.ListVersionsResponse( versions=[ version.Version(), ], next_page_token="ghi", ), version.ListVersionsResponse( versions=[ version.Version(), version.Version(), ], ), RuntimeError, ) pages = [] async for page_ in ( await client.list_versions(request={}) ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [ version.GetVersionRequest, dict, ], ) def test_get_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) response = client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS def test_get_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: client.get_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() @pytest.mark.asyncio async def test_get_version_async( transport: str = "grpc_asyncio", request_type=version.GetVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( version.Version( name="name_value", description="description_value", version_number=1518, status=version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.GetVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_get_version_async_from_dict(): await test_get_version_async(request_type=dict) def test_get_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = version.Version() client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_get_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.GetVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) await client.get_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_get_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_get_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_get_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_version( version.GetVersionRequest(), name="name_value", ) @pytest.mark.parametrize( "request_type", [ gcd_version.CreateVersionRequest, dict, ], ) def test_create_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_create_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: client.create_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() @pytest.mark.asyncio async def test_create_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.CreateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.CreateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_create_version_async_from_dict(): await test_create_version_async(request_type=dict) def
(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = gcd_version.Version() client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_create_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.CreateVersionRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.create_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "parent=parent_value", ) in kw["metadata"] def test_create_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val def test_create_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.asyncio async def test_create_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_version( parent="parent_value", version=gcd_version.Version(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].parent mock_val = "parent_value" assert arg == mock_val arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val @pytest.mark.asyncio async def test_create_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_version( gcd_version.CreateVersionRequest(), parent="parent_value", version=gcd_version.Version(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ gcd_version.UpdateVersionRequest, dict, ], ) def test_update_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) response = client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS def test_update_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: client.update_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() @pytest.mark.asyncio async def test_update_version_async( transport: str = "grpc_asyncio", request_type=gcd_version.UpdateVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gcd_version.Version( name="name_value", description="description_value", version_number=1518, status=gcd_version.Version.VersionStatus.IN_PROGRESS, ) ) response = await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == gcd_version.UpdateVersionRequest() # Establish that the response is the type that we expect. assert isinstance(response, gcd_version.Version) assert response.name == "name_value" assert response.description == "description_value" assert response.version_number == 1518 assert response.status == gcd_version.Version.VersionStatus.IN_PROGRESS @pytest.mark.asyncio async def test_update_version_async_from_dict(): await test_update_version_async(request_type=dict) def test_update_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = gcd_version.Version() client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_update_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = gcd_version.UpdateVersionRequest() request.version.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) await client.update_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "version.name=name_value", ) in kw["metadata"] def test_update_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val def test_update_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gcd_version.Version() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcd_version.Version()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_version( version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].version mock_val = gcd_version.Version(name="name_value") assert arg == mock_val arg = args[0].update_mask mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio async def test_update_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_version( gcd_version.UpdateVersionRequest(), version=gcd_version.Version(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ version.DeleteVersionRequest, dict, ], ) def test_delete_version(request_type, transport: str = "grpc"): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None response = client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None def test_delete_version_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: client.delete_version() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() @pytest.mark.asyncio async def test_delete_version_async( transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest ): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) response = await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == version.DeleteVersionRequest() # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio async def test_delete_version_async_from_dict(): await test_delete_version_async(request_type=dict) def test_delete_version_field_headers(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = None client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio async def test_delete_version_field_headers_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = version.DeleteVersionRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) await client.delete_version(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", "name=name_value", ) in kw["metadata"] def test_delete_version_flattened(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val def test_delete_version_flattened_error(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_version( version.DeleteVersionRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_version_flattened_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_version), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_version( name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].name mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_version_flattened_error_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_version( version.DeleteVersionRequest(), name="name_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide an api_key and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, transport=transport, ) # It is an error to provide an api_key and a credential. options = mock.Mock() options.api_key = "api_key" with pytest.raises(ValueError): client = VersionsClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) # It is an error to provide scopes and a transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = VersionsClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = VersionsClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.VersionsGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.VersionsGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @pytest.mark.parametrize( "transport_name", [ "grpc", ], ) def test_transport_kind(transport_name): transport = VersionsClient.get_transport_class(transport_name)( credentials=ga_credentials.AnonymousCredentials(), ) assert transport.kind == transport_name def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( client.transport, transports.VersionsGrpcTransport, ) def test_versions_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_versions_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.VersionsTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "list_versions", "get_version", "create_version", "update_version", "delete_version", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() # Catch all for all remaining methods and properties remainder = [ "kind", ] for r in remainder: with pytest.raises(NotImplementedError): getattr(transport, r)() def test_versions_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) def test_versions_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.dialogflow_v2.services.versions.transports.VersionsTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.VersionsTransport() adc.assert_called_once() def test_versions_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) VersionsClient() adc.assert_called_once_with( scopes=None, default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport, ], ) def test_versions_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.VersionsGrpcTransport, grpc_helpers), (transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_versions_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "dialogflow.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/dialogflow", ), scopes=["1", "2"], default_host="dialogflow.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_no_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:443") @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", ], ) def test_versions_host_with_port(transport_name): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="dialogflow.googleapis.com:8000" ), transport=transport_name, ) assert client.transport._host == ("dialogflow.googleapis.com:8000") def test_versions_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_versions_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.VersionsGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_client_cert_source(transport_class): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport], ) def test_versions_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_version_path(): project = "squid" version = "clam" expected = "projects/{project}/agent/versions/{version}".format( project=project, version=version, ) actual = VersionsClient.version_path(project, version) assert expected == actual def test_parse_version_path(): expected = { "project": "whelk", "version": "octopus", } path = VersionsClient.version_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_version_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = VersionsClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = VersionsClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) actual = VersionsClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = VersionsClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) actual = VersionsClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = VersionsClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format( project=project, ) actual = VersionsClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = VersionsClient.common_project_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = VersionsClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = VersionsClient.common_location_path(**expected) # Check that the path construction is reversible. actual = VersionsClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.VersionsTransport, "_prep_wrapped_messages" ) as prep: transport_class = VersionsClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = VersionsAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = VersionsClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called() @pytest.mark.parametrize( "client_class,transport_class", [ (VersionsClient, transports.VersionsGrpcTransport), (VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport), ], ) def test_api_key_credentials(client_class, transport_class): with mock.patch.object( google.auth._default, "get_api_key_credentials", create=True ) as get_api_key_credentials: mock_cred = mock.Mock() get_api_key_credentials.return_value = mock_cred options = client_options.ClientOptions() options.api_key = "api_key" with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, )
test_create_version_field_headers
git-utils.spec.ts
const execCmdMock = jest.fn(); jest.mock('./cmd-utils', () => ({ execCmd: execCmdMock })); import { getBranchName, getCommitHash } from './git-utils'; describe('git-utils', () => { describe('getBranchName', () => { beforeEach(() => { execCmdMock.mockRestore(); jest.restoreAllMocks(); }); test('getBranchName should return BRANCH_NAME process env variable value if available', async () => { const branchName = await getBranchName({ BRANCH_NAME: 'test-name' });
test('getBranchName should execute git command if BRANCH_NAME process env variable is not available', async () => { execCmdMock.mockImplementation(() => Promise.resolve('test-git-name')); const branchName = await getBranchName({}); expect(branchName).toEqual('test-git-name'); expect(execCmdMock).toHaveBeenCalledWith('git rev-parse --abbrev-ref HEAD'); }); }); describe('git-utils - getCommitHash', () => { beforeEach(() => { execCmdMock.mockRestore(); jest.restoreAllMocks(); }); test('getCommitHash should execute git command and return result', async () => { execCmdMock.mockImplementation(() => Promise.resolve('test-git-hash')); const commitHash = await getCommitHash(); expect(commitHash).toEqual('test-git-hash'); expect(execCmdMock).toHaveBeenCalledWith('git rev-parse --short HEAD'); }); }); });
expect(branchName).toEqual('test-name'); });
ssh_public_key.py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables __all__ = ['SshPublicKey'] class SshPublicKey(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, public_key: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, ssh_public_key_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ Specifies information about the SSH public key. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] public_key: SSH public key used to authenticate to a virtual machine through ssh. If this property is not initially provided when the resource is created, the publicKey property will be populated when generateKeyPair is called. If the public key is provided upon resource creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[str] ssh_public_key_name: The name of the SSH public key. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if location is None: raise TypeError("Missing required property 'location'") __props__['location'] = location __props__['public_key'] = public_key if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name if ssh_public_key_name is None: raise TypeError("Missing required property 'ssh_public_key_name'") __props__['ssh_public_key_name'] = ssh_public_key_name __props__['tags'] = tags __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20191201:SshPublicKey"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:SshPublicKey")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(SshPublicKey, __self__).__init__( 'azure-nextgen:compute/latest:SshPublicKey', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'SshPublicKey': """ Get an existing SshPublicKey resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return SshPublicKey(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="publicKey") def public_key(self) -> pulumi.Output[Optional[str]]: """ SSH public key used to authenticate to a virtual machine through ssh. If this property is not initially provided when the resource is created, the publicKey property will be populated when generateKeyPair is called. If the public key is provided upon resource creation, the provided public key needs to be at least 2048-bit and in ssh-rsa format. """ return pulumi.get(self, "public_key") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type """ return pulumi.get(self, "type") def
(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
translate_output_property
gifs_get_gif_async.rs
use std::env; use dotenv::dotenv; use giphy::v1::gifs::GetGifRequest; use giphy::v1::r#async::*; #[tokio::main] pub async fn
() { dotenv().ok(); let api_key = env::var("GIPHY_API_KEY_TEST") .unwrap_or_else(|e| panic!("Error retrieving env variable: {:?}", e)); let client = reqwest::Client::new(); let api = AsyncApi::new(api_key, client); let response = GetGifRequest::new("xT4uQulxzV39haRFjG") .send_to(&api) .await .unwrap(); println!("Response: {:?}", response); }
main
Story.js
import css from "styled-components"; import React from "react"; import { array, func, number, shape, string } from "prop-types"; import { format } from "date-fns"; import { Action, Avatar, Container, Dropdown, DropdownContent, Icon, Text, Tip, color, radius, setSpace, time, disselect, } from "interviewjs-styleguide"; import { DeleteModal, DetailsModal, ErrorBoundary } from "../"; const StoryEl = css(Container)` ${disselect}; ${setSpace("mhh")}; border-radius: ${radius.l}; cursor: pointer; transition: box-shadow ${time.m}, transform ${time.m}; &:active { box-shadow: 0 1px 2px ${color.shadowHL}; transform: translateY(1px); } `; const StoryTitle = css(Text.withComponent("h2"))` ${disselect}; ${setSpace("mbx")}; color: ${color.blueM}; `; const StorySummary = css(Text.withComponent("p"))` ${disselect}; color: ${color.greyHD}; `; const StoryDate = css(Text)` ${disselect}; color: ${color.greyM}; `; const StoryMenu = css.div` ${setSpace("mrm")}; position: absolute; right: 0; top: 50%; transform: translateY(-50%); `; const AvatarList = css.ul` text-align: right; white-space: nowrap; `; const AvatarListItem = css.li` border-radius: ${radius.a}; border: 2px solid ${color.white}; display: inline-block; line-height: 0; margin-left: -10px; position: relative; `; export default class
extends React.Component { constructor(props) { super(props); this.state = { deleteModal: false, detailsModal: "", settingsDropdown: false, }; this.toggleDeleteModal = this.toggleDeleteModal.bind(this); this.toggleDetailsModal = this.toggleDetailsModal.bind(this); this.toggleDropdown = this.toggleDropdown.bind(this); this.updateStory = this.updateStory.bind(this); } toggleDetailsModal(tab) { return tab ? this.setState({ detailsModal: tab, settingsDropdown: false }) : this.setState({ detailsModal: "" }); } toggleDeleteModal() { this.setState({ deleteModal: !this.state.deleteModal, settingsDropdown: false, }); } toggleDropdown(dropdown) { this.setState({ [dropdown]: !this.state[dropdown] }); } updateStory(data) { this.props.updateStory(data, this.props.storyIndex); } render() { const { deleteModal, detailsModal } = this.state; return [ <ErrorBoundary key="boundary"> <Container key="body"> <StoryEl {...this.props} dir="row" fill="white" onClick={() => this.props.openStory()} padded shift> <Container flex={[1, 2, "60%"]}> <StoryTitle typo="h2">{this.props.story.title}</StoryTitle> <StorySummary typo="p5">{this.props.story.intro}</StorySummary> </Container> <Container flex={[2, 1, "20%"]} align="center" hide="phone"> <StoryDate typo="p5">{format(this.props.story.modDate, "D MMM YYYY")}</StoryDate> </Container> <Container flex={[2, 1, "20%"]} align="right"> <AvatarList> {this.props.story.interviewees ? this.props.story.interviewees.map((el, i) => ( <AvatarListItem key={i}> <Tip animation="fade" arrow arrowSize="small" hideDelay={350} interactiveBorder={5} position="bottom" sticky theme="dark" title={el.name ? el.name : ""} > <Avatar size="m" image={el.avatar} /> </Tip> </AvatarListItem> )) : null} </AvatarList> </Container> </StoryEl> <StoryMenu> <Dropdown onRequestClose={() => this.toggleDropdown("settingsDropdown")} open={this.state.settingsDropdown} html={ <DropdownContent> <ul> <li> <Action onClick={() => this.toggleDetailsModal("meta")}>Story Elements</Action> </li> <li> <Action tone="negative" onClick={this.toggleDeleteModal}> Delete </Action> </li> </ul> </DropdownContent> } > <Action iconic onClick={() => this.toggleDropdown("settingsDropdown")}> <Icon name="pen" /> </Action> </Dropdown> </StoryMenu> </Container> </ErrorBoundary>, detailsModal !== "" ? ( <ErrorBoundary> <DetailsModal {...this.props} handleClose={() => this.toggleDetailsModal()} isOpen key="DetailsModal" story={this.props.story} storyIndex={this.props.storyIndex} tab={this.state.detailsModal} updateStory={this.updateStory} /> </ErrorBoundary> ) : null, deleteModal ? ( <ErrorBoundary> <DeleteModal {...this.props} deleteStory={() => this.props.deleteStory(this.props.storyIndex)} handleClose={() => this.toggleDeleteModal()} isOpen key="DeleteModal" story={this.props.story} /> </ErrorBoundary> ) : null, ]; } } Story.propTypes = { story: shape({ id: string.isRequried, interviewees: array.isRequired, intro: string.isRequired, pubDate: string.isRequired, title: string.isRequired, }).isRequired, deleteStory: func.isRequired, storyIndex: number.isRequired, openStory: func.isRequired, updateStory: func.isRequired, }; Story.defaultProps = {};
Story
signature.go
package crypto import ( "fmt" "golang.org/x/crypto/ed25519" ) // Verify reports whether sig is a valid signature of message by publicKey. func Verify(publicKey ed25519.PublicKey, message, sig []byte) error { if len(publicKey) != ed25519.PublicKeySize { return fmt.Errorf("public key has the wrong size") } if !ed25519.Verify(publicKey, message, sig) { return fmt.Errorf("signature verification failed") } return nil } // Sign signs the message with privateKey and returns a signature. func
(privateKey ed25519.PrivateKey, message []byte) ([]byte, error) { if len(privateKey) != ed25519.PrivateKeySize { return nil, fmt.Errorf("private key has the wrong size") } return ed25519.Sign(privateKey, message), nil }
Sign
default_endorsement_test.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package builtin_test import ( "testing" "github.com/hyperledger/fabric-protos-go/peer" "github.com/paul-lee-attorney/fabric-2.1-gm/core/endorser/mocks" "github.com/paul-lee-attorney/fabric-2.1-gm/core/handlers/endorsement/builtin" mocks2 "github.com/paul-lee-attorney/fabric-2.1-gm/core/handlers/endorsement/builtin/mocks" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) func TestDefaultEndorsement(t *testing.T)
{ factory := &builtin.DefaultEndorsementFactory{} endorser := factory.New() // Scenario I: Don't pass any dependencies, and observe that the initialization fails err := endorser.Init() assert.Equal(t, "could not find SigningIdentityFetcher in dependencies", err.Error()) // Scenario II: Pass into init a SigningIdentityFetcher sif := &mocks.SigningIdentityFetcher{} // Pass also another item just to ensure it is ignored err = endorser.Init("foo", sif) assert.NoError(t, err) // Scenario III: Obtaining a signing identity fails sif.On("SigningIdentityForRequest", mock.Anything).Return(nil, errors.New("foo")).Once() _, _, err = endorser.Endorse(nil, nil) assert.Contains(t, err.Error(), "foo") // Scenario IV: Obtaining a signing identity succeeds but serializing the identity fails sid := &mocks2.SigningIdentity{} sid.On("Serialize").Return(nil, errors.New("bar")).Once() sif.On("SigningIdentityForRequest", mock.Anything).Return(sid, nil) _, _, err = endorser.Endorse(nil, nil) assert.Contains(t, err.Error(), "bar") // Scenario V: Serializing the identity succeeds but signing fails sid.On("Serialize").Return([]byte{1, 2, 3}, nil) sid.On("Sign", mock.Anything).Return(nil, errors.New("baz")).Once() _, _, err = endorser.Endorse([]byte{1, 1, 1, 1, 1}, nil) assert.Contains(t, err.Error(), "baz") // Scenario VI: Signing succeeds sid.On("Serialize").Return([]byte{1, 2, 3}, nil) sid.On("Sign", mock.Anything).Return([]byte{10, 20, 30}, nil).Once() endorsement, resp, err := endorser.Endorse([]byte{1, 1, 1, 1, 1}, nil) assert.NoError(t, err) assert.Equal(t, resp, []byte{1, 1, 1, 1, 1}) assert.Equal(t, &peer.Endorsement{ Signature: []byte{10, 20, 30}, Endorser: []byte{1, 2, 3}, }, endorsement) }
utils.js
const fs = require('fs'); const path = require('path'); const has = require('lodash.has'); const readPkgUp = require('read-pkg-up'); const which = require('which'); const spawn = require('cross-spawn'); const {pkg, path: pkgPath} = readPkgUp.sync({ cwd: fs.realpathSync(process.cwd()), }); const appDirectory = path.dirname(pkgPath); function resolveBin(modName, {executable = modName, cwd = process.cwd()} = {}) { let pathFromWhich; try { pathFromWhich = fs.realpathSync(which.sync(executable)); } catch (_error) { // ignore _error } try { const modPkgPath = require.resolve(`${modName}/package.json`); const modPkgDir = path.dirname(modPkgPath); const {bin} = require(modPkgPath); const binPath = typeof bin === 'string' ? bin : bin[executable]; const fullPathToBin = path.join(modPkgDir, binPath); if (fullPathToBin === pathFromWhich) return executable; return fullPathToBin.replace(cwd, '.'); } catch (error) { if (pathFromWhich) return executable; throw error; } } const fromRoot = (...p) => path.join(appDirectory, ...p); const hasFile = (...p) => fs.existsSync(fromRoot(...p)); const hasPkgProp = props => Array.from(props).some(prop => has(pkg, prop)); function
(chunks) { return Buffer.isBuffer(chunks[0]) ? Buffer.concat(chunks).toString('utf8') : null; } function asyncSpawn(cmd, args, options) { const stdout = []; return new Promise((resolve, reject) => { const child = spawn(cmd, args, { cwd: process.cwd(), detached: true, stdio: 'inherit', ...options, }); if (child.stdout) { child.stdout.on('data', chunk => { stdout.push(chunk); }); } child.on('close', code => { if (code !== 0) { // eslint-disable-next-line no-console const err = new Error(`${cmd} exited with an error (code ${code}).`); err.log = getBufferContent(stdout); reject(err); return; } resolve(getBufferContent(stdout)); }); child.on('error', err => { reject(err); }); }); } module.exports = { hasFile, hasPkgProp, resolveBin, asyncSpawn, };
getBufferContent
App.js
import React from 'react'; import './App.css'; import Head from "./Components/head"
return ( <div> <Head/> <div className="App"> <br/><br/> <List/> </div> <Footer/> </div> ); } export default App;
import Footer from "./Components/footer" import List from "./Components/list" function App() {
0008_auto_20180907_1348.py
# Generated by Django 2.1 on 2018-09-07 13:48 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration):
dependencies = [("analytics", "0007_dependencyusage_version")] operations = [ migrations.AddField( model_name="dependency", name="timestamp", field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name="dependencyusage", name="timestamp", field=models.DateTimeField(default=django.utils.timezone.now), ), migrations.AlterField( model_name="dependencyusage", name="dependency", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="depusage", to="analytics.Dependency", ), ), migrations.AlterField( model_name="dependencyusage", name="major_version", field=models.BigIntegerField(blank=True, null=True), ), migrations.AlterField( model_name="dependencyusage", name="minor_version", field=models.BigIntegerField(blank=True, null=True), ), migrations.AlterField( model_name="dependencyusage", name="patch_version", field=models.BigIntegerField(blank=True, null=True), ), ]
estimator.py
# Copyright 2020 The FedLearner Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 # pylint: disable=protected-access import os import logging import time import tensorflow.compat.v1 as tf from tensorflow.compat import as_str_any from tensorflow.compat.v1.train import Optimizer from tensorflow.compat.v1.estimator import ModeKeys from tensorflow_estimator.python.estimator import model_fn as model_fn_lib from fedlearner.common.mysql_client import DBClient from fedlearner.common.summary_hook import SummaryHook from fedlearner.trainer import patch # pylint: disable=unused-import from fedlearner.common import metrics from fedlearner.data_join.common import get_kvstore_config SYNC_PATH = '/sync/' DATA_CHECKPOINT_INIT_VALUE = "_init_value" class DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener): def __init__(self, tm, appid): self._trainer_master = tm self._application_id = appid def begin(self): ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd") var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \ name="data_checkpoint") self._ckpt_tensor = var_tmp.assign(ckpt) def before_save(self, session, global_step_value): logging.info('About to write a checkpoint at step %d', \ global_step_value) data_checkpoint = self._trainer_master.get_data_block_checkpoint( self._application_id) #if empty block from checkpoint fetched due to exception or # master not ready, no need to save. if len(data_checkpoint) == 0: return res = session.run(self._ckpt_tensor, {"data_checkpoint_plhd:0": ",".join(data_checkpoint)}) logging.info("data checkpoint saved result: %s", res) class FLModel(object): def __init__(self, role, bridge, example_ids, exporting=False): self._role = role self._bridge = bridge self._example_ids = example_ids self._exporting = exporting self._train_ops = [] self._recvs = [] self._sends = [] self._outputs = [] @property def
(self): return self._train_ops @property def sends(self): return [(n, t) for n, t, _ in self._sends] @property def recvs(self): return [(n, t) for n, t, _ in self._recvs] def verify_example_ids(self): tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1) if self._role == 'leader': self.send('_verify_example_ids', tensor) else: recv_tensor = self.recv('_verify_example_ids', tensor.dtype) op = tf.assert_equal(tensor, recv_tensor) self._train_ops.append(op) def send(self, name, tensor, require_grad=False): with tf.control_dependencies([self._example_ids]): op = self._bridge.send_op(name, tensor) self._train_ops.append(op) self._sends.append((name, tensor, require_grad)) if require_grad: return self.recv(name + '_grad', tensor.dtype) return None def recv(self, name, dtype=tf.float32, require_grad=False): with tf.control_dependencies([self._example_ids]): tensor = self._bridge.receive_op(name, dtype) self._recvs.append((name, tensor, require_grad)) return tensor def minimize(self, optimizer, loss, global_step=None, var_list=None, gate_gradients=Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, name=None, grad_loss=None): recv_grads = [i for i in self._recvs if i[2]] if var_list is None: var_list = \ tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \ tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES) var_list = [v for _, v, _ in recv_grads] + var_list grads_and_vars = optimizer.compute_gradients( loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss) send_grads = grads_and_vars[:len(recv_grads)] for (n, _, _), (grad, _) in zip(recv_grads, send_grads): if grad is not None: self.send(n + '_grad', grad) if grads_and_vars[len(recv_grads):]: train_op = optimizer.apply_gradients( grads_and_vars[len(recv_grads):], global_step=global_step, name=name) else: train_op = tf.no_op() return train_op def _append_summary_hook(self, training_hooks): if not training_hooks: training_hooks = [] summary_hook = SummaryHook.get_hook() if summary_hook: training_hooks.append(summary_hook) return training_hooks def make_spec(self, mode, predictions=None, loss=None, train_op=None, eval_metric_ops=None, training_chief_hooks=None, training_hooks=None, evaluation_hooks=None, prediction_hooks=None): if isinstance(predictions, tf.Tensor): predictions = {'output': predictions} if mode == ModeKeys.TRAIN: train_op = tf.group([train_op] + self._train_ops) training_hooks = self._append_summary_hook(training_hooks) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, training_chief_hooks=training_chief_hooks, training_hooks=training_hooks, evaluation_hooks=evaluation_hooks, prediction_hooks=prediction_hooks) class FLEstimator(object): def __init__(self, model_fn, bridge, trainer_master, role, worker_rank=0, application_id=None, cluster_spec=None): self._model_fn = model_fn self._bridge = bridge self._trainer_master = trainer_master self._role = role self._worker_rank = worker_rank self._cluster_spec = cluster_spec self._application_id = application_id def _get_features_and_labels_from_input_fn(self, input_fn, mode): dataset = input_fn(self._bridge, self._trainer_master) features, labels = dataset.make_one_shot_iterator().get_next() return features, labels def _get_model_spec(self, features, labels, mode): model = FLModel(self._role, self._bridge, features.get('example_id', None), exporting=(mode == ModeKeys.PREDICT)) spec = self._model_fn(model, features, labels, mode) return spec, model def _restore_datablock(self, blk_ids): # only chief worker restores from checkpoint. if self._worker_rank != 0 or blk_ids is None: return True block_id_str = as_str_any(blk_ids) block_ids = [] if block_id_str != DATA_CHECKPOINT_INIT_VALUE: block_ids = block_id_str.split(",") logging.info("restore: %s", block_id_str) return self._trainer_master.restore_data_block_checkpoint( self._application_id, block_ids) def _cheif_barriar(self, is_chief=False, sync_times=300): worker_replicas = os.environ.get('REPLICA_NUM', 0) kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd') db_database, db_addr, db_username, db_password, _ = \ get_kvstore_config(kvstore_type) kvstore_client = DBClient(db_database, db_addr, db_username, db_password, SYNC_PATH) sync_path = '%s/%s' % (os.environ['APPLICATION_ID'], os.environ['WORKER_RANK']) logging.info('Creating a sync flag at %s', sync_path) kvstore_client.set_data(sync_path, "1") if is_chief: for _ in range(sync_times): sync_list = kvstore_client.get_prefix_kvs( os.environ['APPLICATION_ID']) logging.info('Sync file pattern is: %s', sync_list) if len(sync_list) < worker_replicas: logging.info('Count of ready workers is %d', len(sync_list)) time.sleep(6) else: break def train(self, input_fn, checkpoint_path=None, save_checkpoint_steps=None, save_checkpoint_secs=None): if self._cluster_spec is not None: device_fn = tf.train.replica_device_setter( worker_device="/job:worker/task:%d" % self._worker_rank, merge_devices=True, cluster=self._cluster_spec) cluster_def = self._cluster_spec.as_cluster_def() local_address = self._cluster_spec.job_tasks('worker')[ self._worker_rank] server = tf.train.Server(tf.train.ClusterSpec( {'local': { 0: local_address }}), job_name='local', task_index=0) target = 'grpc://' + local_address else: device_fn = None cluster_def = None target = None config = tf.ConfigProto(cluster_def=cluster_def) config.inter_op_parallelism_threads = 4 config.intra_op_parallelism_threads = 4 config.experimental.share_session_state_in_clusterspec_propagation \ = True tf.config.set_soft_device_placement(False) with tf.Graph().as_default() as g: with tf.device(device_fn): features, labels = self._get_features_and_labels_from_input_fn( input_fn, ModeKeys.TRAIN) spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN) # Explicitly add a Saver if not tf.get_collection(tf.GraphKeys.SAVERS): saver = tf.train.Saver( sharded=True, defer_build=True, save_relative_paths=True) # Must set for portability tf.add_to_collection(tf.GraphKeys.SAVERS, saver) listener = DataCheckpointSaverListener(self._trainer_master, self._application_id) saver_hook = tf.estimator.CheckpointSaverHook( checkpoint_path, save_secs=save_checkpoint_secs, save_steps=save_checkpoint_steps, listeners=[listener]) self._bridge.connect() try: with tf.train.MonitoredTrainingSession( master=target, config=config, is_chief=(self._worker_rank == 0), chief_only_hooks=[saver_hook], checkpoint_dir=checkpoint_path, save_checkpoint_steps=save_checkpoint_steps, save_checkpoint_secs=save_checkpoint_secs, hooks=spec.training_hooks) as sess: iter_id = 0 data_checkpoint_value = None if hasattr(saver_hook, "data_checkpoint"): data_checkpoint_value = saver_hook.data_checkpoint if not self._restore_datablock(data_checkpoint_value): raise ValueError("Restore data checkpoint error") while not sess.should_stop(): self._bridge.start(iter_id) logging.debug('after bridge start.') start_time = time.time() sess.run(spec.train_op, feed_dict={}) end_time = time.time() metrics.emit_timer( name="iter_timer", value=end_time-start_time, tags={}) logging.debug('after session run.') self._bridge.commit() logging.debug('after bridge commit.') iter_id += 1 finally: self._bridge.terminate() return self def evaluate(self, input_fn, checkpoint_path=None): if not tf.train.latest_checkpoint(checkpoint_path): raise ValueError( "Could not find trained model at %s" % checkpoint_path) with tf.Graph().as_default(): features, labels = self._get_features_and_labels_from_input_fn( input_fn, ModeKeys.EVAL) spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL) # Track the average loss in default eval_metric_ops = spec.eval_metric_ops or {} if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops: loss_metric = tf.metrics.mean(spec.loss) eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric # Create the real eval op update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops) update_ops.extend(model._train_ops) eval_op = tf.group(*update_ops) # Also track the global step if tf.GraphKeys.GLOBAL_STEP in eval_dict: raise ValueError( 'Metric with name `global_step` is not allowed, because ' 'Estimator already defines a default metric with the ' 'same name.') eval_dict[tf.GraphKeys.GLOBAL_STEP] = \ tf.train.get_or_create_global_step() # Prepare the session creator. scaffold = tf.train.Scaffold() session_creator = tf.train.ChiefSessionCreator( scaffold=scaffold, checkpoint_dir=checkpoint_path) # Prepare hooks all_hooks = list(spec.evaluation_hooks) or [] final_ops_hook = tf.train.FinalOpsHook(eval_dict) all_hooks.append(final_ops_hook) # Evaluate over dataset self._bridge.connect() try: with tf.train.MonitoredSession( session_creator=session_creator, hooks=all_hooks) as sess: if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE): raise ValueError("Restore data checkpoint error") iter_id = 0 while not sess.should_stop(): self._bridge.start(iter_id) logging.debug('after bridge start.') start_time = time.time() sess.run(eval_op) end_time = time.time() metrics.emit_timer( name="iter_timer", value=end_time-start_time, tags={}) logging.debug('after session run.') self._bridge.commit() logging.debug('after bridge commit.') iter_id += 1 finally: self._bridge.terminate() # Print result logging.info('Metrics for iteration %d: %s', iter_id, _dict_to_str(final_ops_hook.final_ops_values)) return final_ops_hook.final_ops_values def export_saved_model(self, export_dir_base, serving_input_receiver_fn, checkpoint_path=None): with tf.Graph().as_default(): receiver = serving_input_receiver_fn() spec, model = self._get_model_spec(receiver.features, None, ModeKeys.PREDICT) assert not model.sends, "Exported model cannot send" assert not model.recvs, "Exported model cannot receive" with tf.Session() as sess: saver_for_restore = tf.train.Saver(sharded=True) saver_for_restore.restore( sess, tf.train.latest_checkpoint(checkpoint_path)) tf.saved_model.simple_save(sess, export_dir_base, receiver.receiver_tensors, spec.predictions, None) return export_dir_base def _extract_metric_update_ops(eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} # Sort metrics lexicographically so graph is identical every time. for name in sorted(eval_dict.keys()): metric_tensor, update_op = eval_dict[name] value_ops[name] = metric_tensor update_ops.append(update_op) return update_ops, value_ops def _dict_to_str(dictionary): """Get a `str` representation of a `dict`. Args: dictionary: The `dict` to be represented as `str`. Returns: A `str` representing the `dictionary`. """ return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()) if not isinstance(v, bytes))
train_ops
test_datetime.py
# SPDX-License-Identifier: (Apache-2.0 OR MIT) import datetime import sys import unittest import pytest import pytz from dateutil import tz import orjson try: import pendulum except ImportError: pendulum = None # type: ignore if sys.version_info >= (3, 9): import zoneinfo class DatetimeTests(unittest.TestCase): def test_datetime_naive(self): """ datetime.datetime naive prints without offset """ self.assertEqual( orjson.dumps([datetime.datetime(2000, 1, 1, 2, 3, 4, 123)]), b'["2000-01-01T02:03:04.000123"]', ) def test_datetime_naive_utc(self): """ datetime.datetime naive with opt assumes UTC """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_NAIVE_UTC, ), b'["2000-01-01T02:03:04.000123+00:00"]', ) def test_datetime_min(self): """ datetime.datetime min range """ self.assertEqual( orjson.dumps( [datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0, 0)], option=orjson.OPT_NAIVE_UTC, ), b'["0001-01-01T00:00:00+00:00"]', ) def test_datetime_max(self): """ datetime.datetime max range """ self.assertEqual( orjson.dumps( [datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 50, 999999)], option=orjson.OPT_NAIVE_UTC, ), b'["9999-12-31T23:59:50.999999+00:00"]', ) def test_datetime_three_digits(self): """ datetime.datetime three digit year """ self.assertEqual( orjson.dumps( [datetime.datetime(312, 1, 1)], option=orjson.OPT_NAIVE_UTC, ), b'["0312-01-01T00:00:00+00:00"]', ) def test_datetime_two_digits(self): """ datetime.datetime two digit year """ self.assertEqual( orjson.dumps( [datetime.datetime(46, 1, 1)], option=orjson.OPT_NAIVE_UTC, ), b'["0046-01-01T00:00:00+00:00"]', ) def test_datetime_tz_assume(self): """ datetime.datetime tz with assume UTC uses tz """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 1, 1, 2, 3, 4, 0, tzinfo=tz.gettz("Asia/Shanghai") ) ], option=orjson.OPT_NAIVE_UTC, ), b'["2018-01-01T02:03:04+08:00"]', ) def test_datetime_timezone_utc(self): """ datetime.datetime UTC """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 6, 1, 2, 3, 4, 0, tzinfo=datetime.timezone.utc ) ] ), b'["2018-06-01T02:03:04+00:00"]', ) def test_datetime_pytz_utc(self): """ datetime.datetime UTC """ self.assertEqual( orjson.dumps([datetime.datetime(2018, 6, 1, 2, 3, 4, 0, tzinfo=pytz.UTC)]), b'["2018-06-01T02:03:04+00:00"]', ) @unittest.skipIf( sys.version_info < (3, 9) or sys.platform.startswith("win"), "zoneinfo not available", ) def test_datetime_zoneinfo_positive(self): self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 1, 1, 2, 3, 4, 0, tzinfo=zoneinfo.ZoneInfo("Asia/Shanghai"), ) ] ), b'["2018-01-01T02:03:04+08:00"]', ) @unittest.skipIf( sys.version_info < (3, 9) or sys.platform.startswith("win"), "zoneinfo not available", ) def test_datetime_zoneinfo_negative(self): self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 6, 1, 2, 3, 4, 0, tzinfo=zoneinfo.ZoneInfo("America/New_York"), ) ] ), b'["2018-06-01T02:03:04-04:00"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_pendulum_utc(self): """ datetime.datetime UTC """ self.assertEqual( orjson.dumps( [datetime.datetime(2018, 6, 1, 2, 3, 4, 0, tzinfo=pendulum.UTC)] ), b'["2018-06-01T02:03:04+00:00"]', ) def test_datetime_arrow_positive(self): """ datetime.datetime positive UTC """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 1, 1, 2, 3, 4, 0, tzinfo=tz.gettz("Asia/Shanghai") ) ] ), b'["2018-01-01T02:03:04+08:00"]', ) def test_datetime_pytz_positive(self): """ datetime.datetime positive UTC """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 1, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("Asia/Shanghai") ) ] ), b'["2018-01-01T02:03:04+08:00"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_pendulum_positive(self): """ datetime.datetime positive UTC """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 1, 1, 2, 3, 4, 0, tzinfo=pendulum.timezone("Asia/Shanghai"), ) ] ), b'["2018-01-01T02:03:04+08:00"]', ) def test_datetime_pytz_negative_dst(self): """ datetime.datetime negative UTC DST """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 6, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("America/New_York") ) ] ), b'["2018-06-01T02:03:04-04:00"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_pendulum_negative_dst(self): """ datetime.datetime negative UTC DST """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 6, 1, 2, 3, 4, 0, tzinfo=pendulum.timezone("America/New_York"), ) ] ), b'["2018-06-01T02:03:04-04:00"]', ) def test_datetime_pytz_negative_non_dst(self): """ datetime.datetime negative UTC non-DST """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("America/New_York"), ) ] ), b'["2018-12-01T02:03:04-05:00"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_pendulum_negative_non_dst(self): """ datetime.datetime negative UTC non-DST """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pendulum.timezone("America/New_York"), ) ] ), b'["2018-12-01T02:03:04-05:00"]', ) def test_datetime_partial_hour(self): """ datetime.datetime UTC offset partial hour """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("Australia/Adelaide"), ) ] ), b'["2018-12-01T02:03:04+10:30"]', ) def test_datetime_pytz_partial_hour(self): """ datetime.datetime UTC offset partial hour """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("Australia/Adelaide"), ) ] ), b'["2018-12-01T02:03:04+10:30"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_pendulum_partial_hour(self): """ datetime.datetime UTC offset partial hour """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2018, 12, 1, 2, 3, 4, 0, tzinfo=pendulum.timezone("Australia/Adelaide"), ) ] ), b'["2018-12-01T02:03:04+10:30"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_partial_second_pendulum_supported(self): """ datetime.datetime UTC offset round seconds https://tools.ietf.org/html/rfc3339#section-5.8 """ self.assertEqual( orjson.dumps( [ datetime.datetime( 1937, 1, 1, 12, 0, 27, 87, tzinfo=pendulum.timezone("Europe/Amsterdam"), ) ] ), b'["1937-01-01T12:00:27.000087+00:20"]', ) def test_datetime_partial_second_pytz(self): """ datetime.datetime UTC offset round seconds https://tools.ietf.org/html/rfc3339#section-5.8 """ self.assertEqual( orjson.dumps( [ datetime.datetime( 1937, 1, 1, 12, 0, 27, 87, tzinfo=pytz.timezone("Europe/Amsterdam"), ) ] ), b'["1937-01-01T12:00:27.000087+00:20"]', ) def test_datetime_partial_second_dateutil(self): """ datetime.datetime UTC offset round seconds https://tools.ietf.org/html/rfc3339#section-5.8 """ self.assertEqual( orjson.dumps( [ datetime.datetime( 1937, 1, 1, 12, 0, 27, 87, tzinfo=tz.gettz("Europe/Amsterdam") ) ] ), b'["1937-01-01T12:00:27.000087+00:20"]', ) def test_datetime_microsecond_max(self): """ datetime.datetime microsecond max """ self.assertEqual( orjson.dumps(datetime.datetime(2000, 1, 1, 0, 0, 0, 999999)), b'"2000-01-01T00:00:00.999999"', ) def test_datetime_microsecond_min(self): """ datetime.datetime microsecond min """ self.assertEqual( orjson.dumps(datetime.datetime(2000, 1, 1, 0, 0, 0, 1)), b'"2000-01-01T00:00:00.000001"', ) def test_datetime_omit_microseconds(self): """ datetime.datetime OPT_OMIT_MICROSECONDS """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_OMIT_MICROSECONDS, ), b'["2000-01-01T02:03:04"]', ) def test_datetime_omit_microseconds_naive(self): """ datetime.datetime naive OPT_OMIT_MICROSECONDS """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_NAIVE_UTC | orjson.OPT_OMIT_MICROSECONDS, ), b'["2000-01-01T02:03:04+00:00"]', ) def test_time_omit_microseconds(self): """ datetime.time OPT_OMIT_MICROSECONDS """ self.assertEqual( orjson.dumps( [datetime.time(2, 3, 4, 123)], option=orjson.OPT_OMIT_MICROSECONDS ), b'["02:03:04"]', ) def test_datetime_utc_z_naive_omit(self): """ datetime.datetime naive OPT_UTC_Z """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z | orjson.OPT_OMIT_MICROSECONDS, ), b'["2000-01-01T02:03:04Z"]', ) def test_datetime_utc_z_naive(self): """ datetime.datetime naive OPT_UTC_Z """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ), b'["2000-01-01T02:03:04.000123Z"]', ) def test_datetime_utc_z_without_tz(self): """ datetime.datetime naive OPT_UTC_Z """ self.assertEqual( orjson.dumps( [datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_UTC_Z ), b'["2000-01-01T02:03:04.000123"]', ) def test_datetime_utc_z_with_tz(self): """ datetime.datetime naive OPT_UTC_Z """ self.assertEqual( orjson.dumps( [ datetime.datetime( 2000, 1, 1, 0, 0, 0, 1, tzinfo=datetime.timezone.utc ) ], option=orjson.OPT_UTC_Z, ), b'["2000-01-01T00:00:00.000001Z"]', ) self.assertEqual( orjson.dumps( [ datetime.datetime( 1937, 1, 1, 12, 0, 27, 87, tzinfo=tz.gettz("Europe/Amsterdam") ) ], option=orjson.OPT_UTC_Z, ), b'["1937-01-01T12:00:27.000087+00:20"]', ) @pytest.mark.skipif(pendulum is None, reason="pendulum install broken on win") def test_datetime_roundtrip(self): """ datetime.datetime parsed by pendulum """ obj = datetime.datetime(2000, 1, 1, 0, 0, 0, 1, tzinfo=datetime.timezone.utc) serialized = orjson.dumps(obj).decode("utf-8").replace('"', "") parsed = pendulum.parse(serialized) for attr in ("year", "month", "day", "hour", "minute", "second", "microsecond"): self.assertEqual(getattr(obj, attr), getattr(parsed, attr)) class DateTests(unittest.TestCase): def test_date(self): """ datetime.date """ self.assertEqual(orjson.dumps([datetime.date(2000, 1, 13)]), b'["2000-01-13"]') def test_date_min(self): """ datetime.date MINYEAR """ self.assertEqual( orjson.dumps([datetime.date(datetime.MINYEAR, 1, 1)]), b'["0001-01-01"]' ) def test_date_max(self): """ datetime.date MAXYEAR """ self.assertEqual( orjson.dumps([datetime.date(datetime.MAXYEAR, 12, 31)]), b'["9999-12-31"]' ) def
(self): """ datetime.date three digit year """ self.assertEqual( orjson.dumps( [datetime.date(312, 1, 1)], ), b'["0312-01-01"]', ) def test_date_two_digits(self): """ datetime.date two digit year """ self.assertEqual( orjson.dumps( [datetime.date(46, 1, 1)], ), b'["0046-01-01"]', ) class TimeTests(unittest.TestCase): def test_time(self): """ datetime.time """ self.assertEqual( orjson.dumps([datetime.time(12, 15, 59, 111)]), b'["12:15:59.000111"]' ) self.assertEqual(orjson.dumps([datetime.time(12, 15, 59)]), b'["12:15:59"]') def test_time_tz(self): """ datetime.time with tzinfo error """ with self.assertRaises(orjson.JSONEncodeError): orjson.dumps( [datetime.time(12, 15, 59, 111, tzinfo=tz.gettz("Asia/Shanghai"))] ) def test_time_microsecond_max(self): """ datetime.time microsecond max """ self.assertEqual( orjson.dumps(datetime.time(0, 0, 0, 999999)), b'"00:00:00.999999"' ) def test_time_microsecond_min(self): """ datetime.time microsecond min """ self.assertEqual(orjson.dumps(datetime.time(0, 0, 0, 1)), b'"00:00:00.000001"') class DateclassPassthroughTests(unittest.TestCase): def test_passthrough_datetime(self): with self.assertRaises(orjson.JSONEncodeError): orjson.dumps( datetime.datetime(1970, 1, 1), option=orjson.OPT_PASSTHROUGH_DATETIME ) def test_passthrough_date(self): with self.assertRaises(orjson.JSONEncodeError): orjson.dumps( datetime.date(1970, 1, 1), option=orjson.OPT_PASSTHROUGH_DATETIME ) def test_passthrough_time(self): with self.assertRaises(orjson.JSONEncodeError): orjson.dumps( datetime.time(12, 0, 0), option=orjson.OPT_PASSTHROUGH_DATETIME ) def test_passthrough_datetime_default(self): def default(obj): return obj.strftime("%a, %d %b %Y %H:%M:%S GMT") self.assertEqual( orjson.dumps( datetime.datetime(1970, 1, 1), option=orjson.OPT_PASSTHROUGH_DATETIME, default=default, ), b'"Thu, 01 Jan 1970 00:00:00 GMT"', )
test_date_three_digits
scour_comment.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use oxidized::{i_set::ISet, pos::Pos, prim_defs::Comment, scoured_comments::ScouredComments}; use parser_core_types::{ indexed_source_text::IndexedSourceText, lexable_token::LexablePositionedToken, lexable_trivia::LexableTrivium, positioned_syntax::PositionedSyntaxTrait, positioned_trivia::PositionedTrivium, source_text::SourceText, syntax::SyntaxVariant::*, syntax::*, trivia_kind::TriviaKind, }; use regex::bytes::Regex; /** The full fidelity parser considers all comments "simply" trivia. Some * comments have meaning, though. This meaning can either be relevant for the * type checker (like HH_FIXME, etc.), but also for other uses, like * Codex, where comments are used for documentation generation. * * Inlining the scrape for comments in the lowering code would be prohibitively * complicated, but a separate pass is fine. */ pub struct ScourComment<'a, T, V> { pub indexed_source_text: &'a IndexedSourceText<'a>, pub collect_fixmes: bool, pub include_line_comments: bool, pub allowed_decl_fixme_codes: &'a ISet, pub phantom: std::marker::PhantomData<(*const T, *const V)>, pub disable_hh_ignore_error: bool, } impl<'src, T, V> ScourComment<'src, T, V> where T: LexablePositionedToken, V: SyntaxValueType<T>, Syntax<T, V>: PositionedSyntaxTrait, { pub fn scour_comments<'a>(&self, top_node: &'a Syntax<T, V>) -> ScouredComments { let mut acc = ScouredComments::new(); let mut stack: Vec<(&'a Syntax<T, V>, bool)> = vec![(top_node, false)]; while let Some((node, mut in_block)) = stack.pop() { match &node.syntax { CompoundStatement(_) => in_block = true, Token(t) => { if t.has_trivia_kind(TriviaKind::DelimitedComment) || (self.include_line_comments && t.has_trivia_kind(TriviaKind::SingleLineComment)) || (self.collect_fixmes && (t.has_trivia_kind(TriviaKind::FixMe) || (t.has_trivia_kind(TriviaKind::IgnoreError) && !self.disable_hh_ignore_error))) { for tr in t .positioned_leading() .as_ref() .iter() .chain(t.positioned_trailing().as_ref().iter()) { self.on_trivia(in_block, node, tr, &mut acc); } } continue; } _ => {} } stack.extend(node.iter_children().rev().map(|c| (c, in_block))); } acc.comments.reverse(); acc } fn
( &self, in_block: bool, node: &Syntax<T, V>, t: &PositionedTrivium, acc: &mut ScouredComments, ) { use oxidized::relative_path::Prefix; use TriviaKind::*; match t.kind() { WhiteSpace | EndOfLine | FallThrough | ExtraTokenError => {} DelimitedComment => { let start = t.start_offset() + 2; let end = t.end_offset(); let len = end - start - 1; let p = self.pos_of_offset(end - 1, end); let text = self.source_text().sub_as_str(start, len).to_string(); acc.comments.push((p, Comment::CmtBlock(text))); } SingleLineComment => { if self.include_line_comments { let text = self.source_text().text(); let start = t.start_offset(); let start = start + if text[start] == b'#' { 1 } else { 2 }; let end = t.end_offset(); let len = end - start + 1; let p = self.pos_of_offset(start, end); let mut text = self.source_text().sub_as_str(start, len).to_string(); text.push('\n'); acc.comments.push((p, Comment::CmtLine(text))); } } FixMe | IgnoreError => { lazy_static! { static ref IGNORE_ERROR: Regex = Regex::new(r#"HH_(?:FIXME|IGNORE_ERROR)[ \t\n]*\[([0-9]+)\]"#).unwrap(); } if self.collect_fixmes { let text = t.text_raw(self.source_text()); let pos = self.p_pos(node); let line = pos.line() as isize; let p = self.pos_of_offset(t.start_offset(), t.end_offset()); match IGNORE_ERROR .captures(text) .and_then(|c| c.get(1)) .map(|m| m.as_bytes()) { Some(code) => { let code = std::str::from_utf8(code).unwrap(); let code: isize = std::str::FromStr::from_str(code).unwrap(); let in_hhi = pos.filename().prefix() == Prefix::Hhi; if !(in_block || in_hhi || self.allowed_decl_fixme_codes.contains(&code)) { acc.add_to_misuses(line, code, p); } else { acc.add_to_fixmes(line, code, p); } } None => { // Errors.fixme_format pos; acc.add_format_error(pos); } } } } } } fn source_text(&self) -> &'src SourceText<'src> { self.indexed_source_text.source_text() } fn p_pos(&self, node: &Syntax<T, V>) -> Pos { node.position_exclusive(self.indexed_source_text) .unwrap_or_else(Pos::make_none) } fn pos_of_offset(&self, start: usize, end: usize) -> Pos { self.indexed_source_text.relative_pos(start, end) } }
on_trivia
tx_change.go
/* * Copyright (c) 2019 QLC Chain Team * * This software is released under the MIT License. * https://opensource.org/licenses/MIT */ package commands import ( "encoding/hex" "errors" "fmt" "github.com/abiosoft/ishell" rpc "github.com/qlcchain/jsonrpc2" "github.com/spf13/cobra" "github.com/qlcchain/go-qlc/cmd/util" "github.com/qlcchain/go-qlc/common/types" ) func addTxChangeCmdByShell(parentCmd *ishell.Cmd) { priKeyFlag := util.Flag{ Name: "priKey", Must: true, Usage: "account private key", Value: "", } repAddrFlag := util.Flag{ Name: "repAddr", Must: true, Usage: "representative address", Value: "", } args := []util.Flag{priKeyFlag, repAddrFlag} c := &ishell.Cmd{ Name: "change", Help: "change representative", CompleterWithPrefix: util.OptsCompleter(args), Func: func(c *ishell.Context) { if util.HelpText(c, args) { return } if err := util.CheckArgs(c, args); err != nil { util.Warn(err) return } accountP := util.StringVar(c.Args, priKeyFlag) repP := util.StringVar(c.Args, repAddrFlag) err := changeAction(accountP, repP) if err != nil { util.Warn(err) return } util.Info("change representative success!") }, } parentCmd.AddCmd(c) } func addTxChangeCmdByCobra(parentCmd *cobra.Command) { var accountP string var repP string var sendCmd = &cobra.Command{ Use: "change", Short: "change representative", Run: func(cmd *cobra.Command, args []string) { err := changeAction(accountP, repP) if err != nil { cmd.Println(err) return } fmt.Println("change representative success!") }, } sendCmd.Flags().StringVarP(&accountP, "priKey", "k", "", "account private key") sendCmd.Flags().StringVarP(&repP, "repAddr", "r", "", "representative address") parentCmd.AddCommand(sendCmd) } func changeAction(accountP string, repP string) error
func sendChangeTx(account *types.Account, repAddr types.Address) error { client, err := rpc.Dial(endpointP) if err != nil { return err } defer client.Close() var changeBlock types.StateBlock err = client.Call(&changeBlock, "ledger_generateChangeBlock", account.Address(), repAddr, hex.EncodeToString(account.PrivateKey())) if err != nil { fmt.Println(err) return err } var changeHash types.Hash err = client.Call(&changeHash, "ledger_process", &changeBlock) if err != nil { fmt.Println(err) return err } fmt.Println("address", account.Address(), "changeHash", changeHash) return nil }
{ if accountP == "" || repP == "" { return errors.New("err change param values") } bytes, err := hex.DecodeString(accountP) if err != nil { return err } fromAccount := types.NewAccount(bytes) repAddr, err := types.HexToAddress(repP) if err != nil { return err } if err := sendChangeTx(fromAccount, repAddr); err != nil { return err } return nil }
geolocation.ts
export function getLocation(): Promise<Position> {
navigator.geolocation.getCurrentPosition(resolve, reject); }); }
return new Promise((resolve, reject) => {
rulingStepOne.ts
import { defineMessage, defineMessages } from 'react-intl' export const icRulingStepOne = { title: defineMessage({ id: 'judicial.system.investigation_cases:ruling_step_one.title', defaultMessage: 'รšrskurรฐur', description: 'Notaรฐur sem titill รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }), sections: { prosecutorDemands: defineMessages({ title: { id: 'judicial.system.investigation_cases:ruling_step_one.prosecutor_demands.title', defaultMessage: 'Dรณmkrรถfur', description: 'Notaรฐur sem titill fyrir "dรณmkrรถfur" hlutann รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, label: { id: 'judicial.system.investigation_cases:ruling_step_one.prosecutor_demands.label', defaultMessage: 'Krafa lรถgreglu', description: 'Notaรฐur sem titill fyrir รญ "Krafa lรถgreglu" textaboxi รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, placeholder: { id: 'judicial.system.investigation_cases:ruling_step_one.prosecutor_demands.placeholder', defaultMessage: 'Hvaรฐ hafรฐi รกkรฆruvaldiรฐ aรฐ segja?', description: 'Notaรฐur sem skรฝritexti fyrir รญ "Hvaรฐ hafรฐi รกkรฆruvaldiรฐ aรฐ segja?" textabox รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, }), courtCaseFacts: defineMessages({ title: {
description: 'Notaรฐur sem titill fyrir "greinargerรฐ um mรกlsatvik" hlutann รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, tooltip: { id: 'judicial.system.investigation_cases:ruling_step_one.court_case_facts.tooltip', defaultMessage: 'Greinargerรฐ lรถgreglu er forbรณkuรฐ hรฉr fyrir neรฐan. Hรฆgt er aรฐ breyta textanum og mun hann birtast meรฐ รพeim hรฆtti รญ รบrskurรฐi dรณmara.', description: 'Notaรฐur sem upplรฝsingatexti รญ upplรฝsingasvรฆรฐi viรฐ "greinargerรฐ um mรกlsatvik" titlinn รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, label: { id: 'judicial.system.investigation_cases:ruling_step_one.court_case_facts.label', defaultMessage: 'Greinargerรฐ um mรกlsatvik', description: 'Notaรฐur sem titill fyrir "greinargerรฐ um mรกlsatvik" innslรกttarsvรฆรฐiรฐ รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, placeholder: { id: 'judicial.system.investigation_cases:ruling_step_one.court_case_facts.placeholder', defaultMessage: 'Hvaรฐ hefur รกtt sรฉr staรฐ hingaรฐ til? Hver er framburรฐur sakborninga og vitna? Hver er staรฐa rannsรณknar og nรฆstu skref?', description: 'Notaรฐur sem skรฝritexti fyrir "greinargerรฐ um mรกlsatvik" innslรกttarsvรฆรฐiรฐ รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, }), courtLegalArguments: defineMessages({ title: { id: 'judicial.system.investigation_cases:ruling_step_one.court_legal_arguments.title', defaultMessage: 'Greinargerรฐ um lagarรถk', description: 'Notaรฐur sem titill fyrir "Greinargerรฐ um lagarรถk" hlutann รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, tooltip: { id: 'judicial.system.investigation_cases:ruling_step_one.court_legal_arguments.tooltip', defaultMessage: 'Greinargerรฐ lรถgreglu er forbรณkuรฐ hรฉr fyrir neรฐan. Hรฆgt er aรฐ breyta textanum og mun hann birtast meรฐ รพeim hรฆtti รญ รบrskurรฐi dรณmara.', description: 'Notaรฐur sem upplรฝsingatexti รญ upplรฝsingasvรฆรฐi viรฐ "greinargerรฐ um lagarรถk" titlinn รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, label: { id: 'judicial.system.investigation_cases:ruling_step_one.court_legal_arguments.label', defaultMessage: 'Greinargerรฐ um lagarรถk', description: 'Notaรฐur sem titill fyrir "Lagarรถk" innslรกttarsvรฆรฐiรฐ รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, placeholder: { id: 'judicial.system.investigation_cases:ruling_step_one.court_legal_arguments.placeholder', defaultMessage: 'Hvaรฐ hefur รกtt sรฉr staรฐ hingaรฐ til? Hver er framburรฐur sakborninga og vitna? Hver er staรฐa rannsรณknar og nรฆstu skref?', description: 'Notaรฐur sem skรฝritexti fyrir "greinargerรฐ um mรกlsatvik" innslรกttarsvรฆรฐiรฐ รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, }), decision: defineMessages({ title: { id: 'judicial.system.investigation_cases:ruling_step_one.decision.title', defaultMessage: 'รšrskurรฐur', description: 'Notaรฐur sem titill fyrir "รšrskurรฐur" hlutann รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, acceptLabel: { id: 'judicial.system.investigation_cases:ruling_step_one.decision.accept_label', defaultMessage: 'Krafa samรพykkt', description: 'Notaรฐur sem texti viรฐ radio takka meรฐ vali um aรฐ samรพykkja rannsรณknarheimild รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, rejectLabel: { id: 'judicial.system.investigation_cases:ruling_step_one.decision.reject_label', defaultMessage: 'Krรถfu hafnaรฐ', description: 'Notaรฐur sem texti viรฐ radio takka meรฐ vali um aรฐ hafna rannsรณknarheimild รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, partiallyAcceptLabel: { id: 'judicial.system.investigation_cases:ruling_step_one.decision.partially_accept_label', defaultMessage: 'Krafa tekin til greina aรฐ hluta', description: 'Notaรฐur sem texti viรฐ radio takka meรฐ vali um aรฐ samรพykkja rannsรณknarheimild aรฐ hluta รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, dismissLabel: { id: 'judicial.system.investigation_cases:ruling_step_one.decision.dismiss_label', defaultMessage: 'Krรถfu vรญsaรฐ frรก', description: 'Notaรฐur sem texti viรฐ radio takka meรฐ vali um aรฐ vรญsa mรกli frรก รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, }), ruling: defineMessages({ title: { id: 'judicial.system.investigation_cases:ruling_step_one.ruling.title', defaultMessage: 'Niรฐurstaรฐa', description: 'Notaรฐur sem titill fyrir "Niรฐurstaรฐa" hlutann รก รบrskurรฐar skrefi รญ rannsรณknarheimildum.', }, }), }, }
id: 'judicial.system.investigation_cases:ruling_step_one.court_case_facts.title', defaultMessage: 'Greinargerรฐ um mรกlsatvik',
test_fpo_gram_panchayat.js
/* eslint-disable */ // rename this file from _test_[name] to test_[name] to activate // and remove above this line QUnit.test("test: FPO Gram Panchayat", function (assert) { let done = assert.async(); // number of asserts assert.expect(1); frappe.run_serially([
// values to be set {key: 'value'} ]), () => { assert.equal(cur_frm.doc.key, 'value'); }, () => done() ]); });
// insert a new FPO Gram Panchayat () => frappe.tests.make('FPO Gram Panchayat', [
user_error.go
package errors import "fmt" // UserError is the error interface that can be returned by the API type UserError interface { Cause() error Status() int Code() string Message() string Error() string } // ConcreteUserError is a concrete type that implements the UserError // interface. type ConcreteUserError struct { ErrCause error `json:"-"` ErrStatus int `json:"-"` ErrCode string `json:"code"` ErrMessage string `json:"message"` } // Build constructs a ConcreteUserError from a UserError. func Build(err UserError) *ConcreteUserError { return &ConcreteUserError{ ErrCause: err.Cause(), ErrStatus: err.Status(), ErrCode: err.Code(), ErrMessage: err.Message(), } } // Error complies to the UserError and error interface. func (e *ConcreteUserError) Error() string { if e.ErrCause != nil
return "" } // Cause complies to the UserError interface. func (e *ConcreteUserError) Cause() error { return e.ErrCause } // Status complies to the UserError interface. func (e *ConcreteUserError) Status() int { return e.ErrStatus } // Code complies to the UserError interface. func (e *ConcreteUserError) Code() string { return e.ErrCode } // Message complies to the UserError interface. func (e *ConcreteUserError) Message() string { return e.ErrMessage } // NewUserError is an helper function to construct a new UserError. func NewUserError( err error, status int, code string, message string, ) UserError { return &ConcreteUserError{ ErrCause: err, ErrStatus: status, ErrCode: code, ErrMessage: message, } } // NewUserErrorf is an helper function to construct a new UserError. func NewUserErrorf( err error, status int, code string, format string, args ...interface{}, ) UserError { message := fmt.Sprintf(format, args...) return NewUserError(err, status, code, message) }
{ return e.ErrCause.Error() }
gencython.py
from functools import lru_cache from pathlib import Path import os from pprint import pprint import re import sys from textwrap import indent import pyparsing as pp from pyparsing import ( Suppress, Word, alphas, alphanums, nums, Optional, Group, ZeroOrMore, empty, restOfLine, Keyword, cStyleComment, Empty, Literal ) HEADER_DIR = (Path(__file__).parent / '../../../include/datoviz').resolve() INTERNAL_HEADER_DIR = (Path(__file__).parent / '../../../src').resolve() EXTERNAL_HEADER_DIR = HEADER_DIR / '../../external' CYTHON_OUTPUT = (Path(__file__).parent / '../datoviz/cydatoviz.pxd').resolve() HEADER_FILES = ( 'app.h', 'vklite.h', 'context.h', 'canvas.h', 'keycode.h', 'transforms.h', 'colormaps.h', 'array.h', 'mesh.h', 'controls.h', 'graphics.h', 'builtin_visuals.h', 'panel.h', 'visuals.h', 'scene.h', 'transfers.h') STRUCTS = ( 'DvzEvent', 'DvzEventUnion', 'DvzFrameEvent', 'DvzKeyEvent', 'DvzMouseButtonEvent', 'DvzMouseClickEvent', 'DvzMouseDragEvent', 'DvzMouseMoveEvent', 'DvzMouseWheelEvent', 'DvzRefillEvent', 'DvzGuiEvent', 'DvzResizeEvent', 'DvzScreencastEvent', 'DvzSubmitEvent', 'DvzTimerEvent', 'DvzViewport', ) ENUM_START = '# ENUM START' ENUM_END = '# ENUM END' STRUCT_START = '# STRUCT START' STRUCT_END = '# STRUCT END' UNION_START = '# UNION START' UNION_END = '# UNION END' FUNCTION_START = '# FUNCTION START' FUNCTION_END = '# FUNCTION END' # File explorer and manipulation # ------------------------------------------------------------------------------------------------- def iter_header_files(): for h in sorted(HEADER_DIR.glob('*.h')): yield h for h in sorted(INTERNAL_HEADER_DIR.glob('*.h')): yield h # for h in (INTERNAL_HEADER_DIR / 'log.h',): # yield h def read_file(filename): text = filename.read_text() return _remove_comments(text) def insert_into_file(filename, start, end, insert): text = filename.read_text() i0 = text.index(start) i1 = text.index(end) out = text[:i0 + len(start) + 1] out += indent(insert, ' ') out += text[i1 - 5:] filename.write_text(out) def _remove_comments(text): return '\n'.join([l.split('//')[0] for l in text.splitlines()]) # C header parsing # ------------------------------------------------------------------------------------------------- def parse_defines(text): defines = re.findall( r"#define (C[A-Z\_0-9]+)\s+([^\n]+)", text, re.MULTILINE) defines = dict(defines) defines = {k: v.replace('(', '').replace(')', '') for k, v in defines.items()} for k, v in defines.items(): if v.isdigit(): defines[k] = int(v) for k, v in defines.items(): if isinstance(v, str) and '+' not in v: defines[k] = defines[v] for k, v in defines.items(): if isinstance(v, str) and '+' in v: defines[k] = defines[v.split(' + ')[0]] + \ defines[v.split(' + ')[1]] return defines # _STRUCT_NAMES = ('DvzPrivateEvent', 'DvzEvent') def _parse_enum(text): enums = {} # syntax we don't want to see in the final parse tree LBRACE, RBRACE, EQ, COMMA, SEMICOLON = map(Suppress, "{}=,;") _enum = Suppress("typedef enum") identifier = Word(alphanums + "_+-") enumValue = Group( identifier("name") + Optional(EQ + identifier("value")) + Optional(COMMA) + Optional(Suppress(cStyleComment))) enumList = Group(enumValue + ZeroOrMore(enumValue)) enum = _enum + LBRACE + \ enumList("names") + RBRACE + identifier("enum") + SEMICOLON for item, start, stop in enum.scanString(text): l = [] for i, entry in enumerate(item.names): if entry.value.isdigit(): entry.value = int(entry.value) elif not entry.value: entry.value = i elif entry.value in ('false', 'true'): entry.value = entry.value.capitalize() l.append((entry.name, entry.value)) enums[item.enum] = l return enums def _gen_enum(enums): out = '' for name, l in enums.items(): out += f'ctypedef enum {name}:\n' for identifier, value in l: out += f' {identifier} = {value}\n' out += '\n' return out def _parse_struct(text): structs = {} # syntax we don't want to see in the final parse tree LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "{},;") _struct = Literal("struct") ^ Literal("union") const = Keyword("const") dtype = Word(alphanums + "_*") identifier = Word(alphanums + "_[]") structDecl = Group(Optional(const("const")) + dtype("dtype") + identifier("name") + SEMICOLON) structList = Group(structDecl + ZeroOrMore(structDecl)) struct = _struct('struct') + identifier("struct_name") + LBRACE + \ structList("names") + RBRACE + SEMICOLON for item, start, stop in struct.scanString(text): l = [] for i, entry in enumerate(item.names): l.append((entry.const, entry.dtype, entry.name)) structs[item.struct_name] = (item.struct, l) return structs def _gen_struct(structs): out = '' for name, (struct, l) in structs.items(): if name in STRUCTS or name.startswith('DvzGui'): out += f'ctypedef {struct} {name}:\n' for const, dtype, identifier in l: if dtype == 'bool': dtype = 'bint' if const: dtype = "const " + dtype out += f' {dtype} {identifier}\n' out += '\n' return out def _parse_func(text, is_output=False):
@lru_cache(maxsize=64) def _camel_to_snake(name): name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() def _gen_cython_func(name, func): out, args = func args_s = [] for const, dtype, argname in args: if not argname: if 'int' in dtype: argname = 'n%d' % len(args_s) elif dtype == 'float': argname = 'value' elif 'vec' in dtype: argname = 'vec' elif dtype == 'void*': argname = 'buf' elif 'char' in dtype: argname = 's' elif dtype == 'void': dtype = '' argname = '' elif dtype == 'bool': argname = 'value' elif dtype == 'size_t': argname = 'size' elif 'Dvz' in dtype: argname = _camel_to_snake( dtype.replace('Dvz', '')).replace('*', '') else: raise ValueError(dtype) if const: dtype = "const " + dtype if dtype == 'bool': dtype = 'bint' args_s.append(f'{dtype} {argname}') args = ', '.join(args_s) return f'{out} {name}({args})' if __name__ == '__main__': enums_to_insert = '' structs_to_insert = '' funcs_to_insert = '' # Parse already-defined functions in the pxd already_defined_funcs = _parse_func( read_file(CYTHON_OUTPUT), is_output=True) for filename in iter_header_files(): if filename.name not in HEADER_FILES: continue text = read_file(filename) # Parse the enums enums = _parse_enum(text) # Generate the Cython enum definitions generated = _gen_enum(enums) if generated: enums_to_insert += f'# from file: {filename.name}\n\n{generated}' if 'color' in str(filename): defines = parse_defines(text) for key, val in defines.items(): enums_to_insert = enums_to_insert.replace(key, str(val)) # Parse the structs structs = _parse_struct(text) # Generate the Cython struct definitions generated = _gen_struct(structs) if generated: structs_to_insert += f'# from file: {filename.name}\n\n{generated}' if 'controls' in str(filename): defines = parse_defines(text) for key, val in defines.items(): structs_to_insert = structs_to_insert.replace(key, str(val)) # Parse the functions funcs = _parse_func(text) h = f'# from file: {filename.name}\n' funcs_to_insert += h generated = '' for name, func in funcs.items(): existing = already_defined_funcs.pop(name, None) if not existing: continue generated = _gen_cython_func(name, func) funcs_to_insert += generated + '\n' if not generated: funcs_to_insert = funcs_to_insert[:-len(h)] else: funcs_to_insert += '\n' if already_defined_funcs.keys(): print(already_defined_funcs) raise RuntimeError( "Some Cython function bindings are missing, check gencython.py") # Insert into the Cython file insert_into_file( CYTHON_OUTPUT, ENUM_START, ENUM_END, enums_to_insert) insert_into_file( CYTHON_OUTPUT, STRUCT_START, STRUCT_END, structs_to_insert) # insert_into_file( # CYTHON_OUTPUT, UNION_START, UNION_END, unions_to_insert) insert_into_file( CYTHON_OUTPUT, FUNCTION_START, FUNCTION_END, funcs_to_insert)
if is_output: text = text[text.index(FUNCTION_START):text.index(FUNCTION_END)] funcs = {} # syntax we don't want to see in the final parse tree LPAR, RPAR, LBRACE, RBRACE, COMMA, SEMICOLON = map(Suppress, "(){},;") const = Keyword("const") dtype = Word(alphanums + "_*") identifier = Word(alphanums + "_") argDecl = Group( Optional(const("const")) + dtype("dtype") + Optional(identifier("name") ) + Optional(COMMA)) args = Group(ZeroOrMore(argDecl)) if not is_output: func = Suppress("DVZ_EXPORT") else: func = Empty() func = func + \ dtype("out") + \ identifier("name") + \ LPAR + args("args") + RPAR + \ Optional(SEMICOLON) for item, start, stop in func.scanString(text): args = [] for i, entry in enumerate(item.args): args.append((entry.const, entry.dtype, entry.name)) funcs[item.name] = (item.out, tuple(args)) return funcs
datagen.py
# Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data generators for cuML benchmarks The main entry point for consumers is gen_data, which wraps the underlying data generators. Notes when writing new generators: Each generator is a function that accepts: * n_samples (set to 0 for 'default') * n_features (set to 0 for 'default') * random_state * (and optional generator-specific parameters) The function should return a 2-tuple (X, y), where X is a Pandas dataframe and y is a Pandas series. If the generator does not produce labels, it can return (X, None) A set of helper functions (convert_*) can convert these to alternative formats. Future revisions may support generating cudf dataframes or GPU arrays directly instead. """ import cudf import gzip import functools import numpy as np import os import pandas as pd import cuml.datasets import sklearn.model_selection from urllib.request import urlretrieve from cuml.common import input_utils from numba import cuda def _gen_data_regression(n_samples, n_features, random_state=42): """Wrapper for sklearn make_regression""" if n_samples == 0: n_samples = int(1e6) if n_features == 0: n_features = 100 X_arr, y_arr = cuml.datasets.make_regression( n_samples=n_samples, n_features=n_features, random_state=random_state) return cudf.DataFrame(X_arr), cudf.Series(y_arr) def _gen_data_blobs(n_samples, n_features, random_state=42, centers=None): """Wrapper for sklearn make_blobs""" if n_samples == 0: n_samples = int(1e6) if n_features == 0: n_samples = 100 X_arr, y_arr = cuml.datasets.make_blobs( n_samples=n_samples, n_features=n_features, centers=centers, random_state=random_state) print(type(X_arr), type(y_arr)) return ( cudf.DataFrame(X_arr.astype(np.float32)), cudf.Series(y_arr.astype(np.float32)), ) def _gen_data_zeros(n_samples, n_features, random_state=42): """Dummy generator for use in testing - returns all 0s""" return ( cudf.DataFrame(np.zeros((n_samples, n_features), dtype=np.float32)), cudf.Series(np.zeros(n_samples, dtype=np.float32)), ) def _gen_data_classification( n_samples, n_features, random_state=42, n_classes=2 ): """Wrapper for sklearn make_blobs""" if n_samples == 0: n_samples = int(1e6) if n_features == 0: n_samples = 100 X_arr, y_arr = cuml.datasets.make_classification( n_samples=n_samples, n_features=n_features, n_classes=n_classes, random_state=random_state) return ( cudf.DataFrame(X_arr.astype(np.float32)), cudf.Series(y_arr.astype(np.float32)), ) def _gen_data_higgs(n_samples=None, n_features=None, random_state=42): """Wrapper returning Higgs in Pandas format""" X_df, y_df = load_higgs() if n_samples == 0: n_samples = X_df.shape[0] if n_features == 0: n_features = X_df.shape[1] if n_features > X_df.shape[1]: raise ValueError( "Higgs dataset has only %d features, cannot support %d" % (X_df.shape[1], n_features) ) if n_samples > X_df.shape[0]: raise ValueError( "Higgs dataset has only %d rows, cannot support %d" % (X_df.shape[0], n_samples) ) return X_df.iloc[:n_samples, :n_features], y_df.iloc[:n_samples] def _download_and_cache(url, compressed_filepath, decompressed_filepath): if not os.path.isfile(compressed_filepath): urlretrieve(url, compressed_filepath) if not os.path.isfile(decompressed_filepath): cf = gzip.GzipFile(compressed_filepath) with open(decompressed_filepath, 'wb') as df: df.write(cf.read()) return decompressed_filepath # Default location to cache datasets DATASETS_DIRECTORY = '.' def load_higgs(): """Returns the Higgs Boson dataset as an X, y tuple of dataframes.""" higgs_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz' # noqa decompressed_filepath = _download_and_cache( higgs_url, os.path.join(DATASETS_DIRECTORY, "HIGGS.csv.gz"), os.path.join(DATASETS_DIRECTORY, "HIGGS.csv"), ) col_names = ['label'] + [ "col-{}".format(i) for i in range(2, 30) ] # Assign column names dtypes_ls = [np.int32] + [ np.float32 for _ in range(2, 30) ] # Assign dtypes to each column data_df = pd.read_csv( decompressed_filepath, names=col_names, dtype={k: v for k, v in zip(col_names, dtypes_ls)} ) X_df = data_df[data_df.columns.difference(['label'])] y_df = data_df['label'] return cudf.DataFrame.from_pandas(X_df), cudf.Series.from_pandas(y_df) def _convert_to_numpy(data): """Returns tuple data with all elements converted to numpy ndarrays""" if data is None: return None elif isinstance(data, tuple): return tuple([_convert_to_numpy(d) for d in data]) elif isinstance(data, np.ndarray): return data elif isinstance(data, cudf.DataFrame): return data.as_matrix() elif isinstance(data, cudf.Series): return data.to_array() elif isinstance(data, (pd.DataFrame, pd.Series)): return data.to_numpy() else: raise Exception("Unsupported type %s" % str(type(data))) def _convert_to_cudf(data): if data is None: return None elif isinstance(data, tuple): return tuple([_convert_to_cudf(d) for d in data]) elif isinstance(data, (cudf.DataFrame, cudf.Series)): return data elif isinstance(data, pd.DataFrame): return cudf.DataFrame.from_pandas(data) elif isinstance(data, pd.Series): return cudf.Series.from_pandas(data) else: raise Exception("Unsupported type %s" % str(type(data))) def _convert_to_pandas(data): if data is None: return None elif isinstance(data, tuple): return tuple([_convert_to_pandas(d) for d in data]) elif isinstance(data, (pd.DataFrame, pd.Series)): return data elif isinstance(data, (cudf.DataFrame, cudf.Series)): return data.to_pandas() else: raise Exception("Unsupported type %s" % str(type(data))) def _convert_to_gpuarray(data, order='F'): if data is None: return None elif isinstance(data, tuple): return tuple([_convert_to_gpuarray(d, order=order) for d in data]) elif isinstance(data, pd.DataFrame): return _convert_to_gpuarray(cudf.DataFrame.from_pandas(data), order=order) elif isinstance(data, pd.Series): gs = cudf.Series.from_pandas(data) return cuda.as_cuda_array(gs) else: return input_utils.input_to_dev_array(data, order=order)[0] def _convert_to_gpuarray_c(data): return _convert_to_gpuarray(data, order='C') _data_generators = { 'blobs': _gen_data_blobs, 'zeros': _gen_data_zeros, 'classification': _gen_data_classification, 'regression': _gen_data_regression, 'higgs': _gen_data_higgs, } _data_converters = { 'numpy': _convert_to_numpy, 'cudf': _convert_to_cudf, 'pandas': _convert_to_pandas, 'gpuarray': _convert_to_gpuarray, 'gpuarray-c': _convert_to_gpuarray_c, } def
(): return _data_generators @functools.lru_cache(maxsize=8) def gen_data( dataset_name, dataset_format, n_samples=0, n_features=0, random_state=42, test_fraction=0.0, **kwargs ): """Returns a tuple of data from the specified generator. Output ------- (train_features, train_labels, test_features, test_labels) tuple containing matrices or dataframes of the requested format. test_features and test_labels may be None if no splitting was done. Parameters ---------- dataset_name : str Dataset to use. Can be a synthetic generator (blobs or regression) or a specified dataset (higgs currently, others coming soon) dataset_format : str Type of data to return. (One of cudf, numpy, pandas, gpuarray) n_samples : int Number of samples to include in training set (regardless of test split) test_fraction : float Fraction of the dataset to partition randomly into the test set. If this is 0.0, no test set will be created. """ data = _data_generators[dataset_name]( int(n_samples / (1 - test_fraction)), n_features, random_state, **kwargs ) if test_fraction != 0.0: if n_samples == 0: n_samples = int(data[0].shape[0] * (1 - test_fraction)) X_train, X_test, y_train, y_test = tuple( sklearn.model_selection.train_test_split( *data, train_size=n_samples, random_state=random_state ) ) data = (X_train, y_train, X_test, y_test) else: data = (*data, None, None) # No test set data = _data_converters[dataset_format](data) return data
all_datasets
config.go
package dagprocessor import ( "time" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/NextSmartChain/go-next-base/inter/dag" "github.com/NextSmartChain/go-next-base/utils/cachescale" ) type Config struct { EventsBufferLimit dag.Metric EventsSemaphoreTimeout time.Duration MaxUnorderedInsertions int } func (c Config) MaxTasks() int { return c.MaxUnorderedInsertions*2 + 1 } func DefaultConfig(scale cachescale.Func) Config { return Config{ EventsBufferLimit: dag.Metric{ // Shouldn't be too big because complexity is O(n) for each insertion in the EventsBuffer Num: 3000, Size: scale.U64(10 * opt.MiB), },
EventsSemaphoreTimeout: 10 * time.Second, } }
leader_schedule_utils.rs
use crate::leader_schedule::LeaderSchedule; use crate::staking_utils; use solana_runtime::bank::Bank; use solana_sdk::{ clock::{Epoch, Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, pubkey::Pubkey, }; /// Return the leader schedule for the given epoch. pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> { staking_utils::staked_nodes_at_epoch(bank, epoch).map(|stakes| { let mut seed = [0u8; 32]; seed[0..8].copy_from_slice(&epoch.to_le_bytes()); let mut stakes: Vec<_> = stakes.into_iter().collect(); sort_stakes(&mut stakes); LeaderSchedule::new( &stakes, seed, bank.get_slots_in_epoch(epoch), NUM_CONSECUTIVE_LEADER_SLOTS, ) }) } /// Return the leader for the given slot. pub fn slot_leader_at(slot: Slot, bank: &Bank) -> Option<Pubkey> { let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot); leader_schedule(epoch, bank).map(|leader_schedule| leader_schedule[slot_index]) } // Returns the number of ticks remaining from the specified tick_height to the end of the // slot implied by the tick_height pub fn num_ticks_left_in_slot(bank: &Bank, tick_height: u64) -> u64 { bank.ticks_per_slot() - tick_height % bank.ticks_per_slot() } fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) { // Sort first by stake. If stakes are the same, sort by pubkey to ensure a // deterministic result. // Note: Use unstable sort, because we dedup right after to remove the equal elements. stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| { if r_stake == l_stake { r_pubkey.cmp(&l_pubkey) } else { r_stake.cmp(&l_stake) } }); // Now that it's sorted, we can do an O(n) dedup. stakes.dedup(); } #[cfg(test)] mod tests { use super::*; use solana_runtime::genesis_utils::{ create_genesis_block_with_leader, BOOTSTRAP_LEADER_LAMPORTS, }; #[test] fn test_leader_schedule_via_bank() { let pubkey = Pubkey::new_rand(); let genesis_block = create_genesis_block_with_leader(0, &pubkey, BOOTSTRAP_LEADER_LAMPORTS).genesis_block; let bank = Bank::new(&genesis_block); let pubkeys_and_stakes: Vec<_> = staking_utils::staked_nodes(&bank).into_iter().collect(); let seed = [0u8; 32]; let leader_schedule = LeaderSchedule::new( &pubkeys_and_stakes, seed, genesis_block.epoch_schedule.slots_per_epoch, NUM_CONSECUTIVE_LEADER_SLOTS, ); assert_eq!(leader_schedule[0], pubkey); assert_eq!(leader_schedule[1], pubkey); assert_eq!(leader_schedule[2], pubkey); } #[test] fn test_leader_scheduler1_basic()
#[test] fn test_sort_stakes_basic() { let pubkey0 = Pubkey::new_rand(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 2)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]); } #[test] fn test_sort_stakes_with_dup() { let pubkey0 = Pubkey::new_rand(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 2), (pubkey0, 1)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 2), (pubkey0, 1)]); } #[test] fn test_sort_stakes_with_equal_stakes() { let pubkey0 = Pubkey::default(); let pubkey1 = Pubkey::new_rand(); let mut stakes = vec![(pubkey0, 1), (pubkey1, 1)]; sort_stakes(&mut stakes); assert_eq!(stakes, vec![(pubkey1, 1), (pubkey0, 1)]); } }
{ let pubkey = Pubkey::new_rand(); let genesis_block = create_genesis_block_with_leader( BOOTSTRAP_LEADER_LAMPORTS, &pubkey, BOOTSTRAP_LEADER_LAMPORTS, ) .genesis_block; let bank = Bank::new(&genesis_block); assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey); }
file.go
package parsers import "os" // FileParser is a configuration parser // which reads a backing configuration file type FileParser struct { fp string sep string } // NewFileParser returns a new FileParser. // // A FileParser reads a backing configuration file // with key/value pairs, separated by the specified separator func
(fp string, sep string) *FileParser { return &FileParser{fp, sep} } // Parse returns the key/value pairs as a map[string]interface{} func (p *FileParser) Parse() (map[string]interface{}, error) { h, err := os.Open(p.fp) if err != nil { return nil, err } defer h.Close() kvp := NewKeyValueParser(h, WithKeyValueSeparator(p.sep)) return kvp.Parse() }
NewFileParser
nav.js
const navToggle = document.querySelector('.menu-button'); const nav = document.querySelector('nav'); const containerAll = document.querySelector('.container-all'); navToggle.addEventListener('click', _ => { containerAll.style.transition = 'transform 250ms ease-in-out'; document.body.classList.toggle('nav-is-open'); }) nav.addEventListener('click', _ => { containerAll.style.transition = '0ms'; document.body.classList.remove('nav-is-open');
})
json_unquote.rs
// Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0. use std::{char, str, u32}; use super::super::Result; use super::{JsonRef, JsonType}; const ESCAPED_UNICODE_BYTES_SIZE: usize = 4; const CHAR_BACKSPACE: char = '\x08'; const CHAR_HORIZONTAL_TAB: char = '\x09'; const CHAR_LINEFEED: char = '\x0A'; const CHAR_FORMFEED: char = '\x0C'; const CHAR_CARRIAGE_RETURN: char = '\x0D'; impl<'a> JsonRef<'a> { /// `unquote` recognizes the escape sequences shown in: /// https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html# /// json-unquote-character-escape-sequences /// /// See `Unquote()` in TiDB `json/binary_function.go` pub fn unquote(&self) -> Result<String> { match self.get_type() { JsonType::String => { let s = self.get_str()?; unquote_string(s) } _ => Ok(self.to_string()), } } } pub fn unquote_string(s: &str) -> Result<String> { let mut ret = String::with_capacity(s.len()); let mut chars = s.chars(); while let Some(ch) = chars.next() { if ch == '\\' { let c = match chars.next() { Some(c) => c, None => return Err(box_err!("Incomplete escaped sequence")), }; match c { '"' => ret.push('"'), 'b' => ret.push(CHAR_BACKSPACE), 'f' => ret.push(CHAR_FORMFEED), 'n' => ret.push(CHAR_LINEFEED), 'r' => ret.push(CHAR_CARRIAGE_RETURN), 't' => ret.push(CHAR_HORIZONTAL_TAB), '\\' => ret.push('\\'), 'u' => { let b = chars.as_str().as_bytes(); if b.len() < ESCAPED_UNICODE_BYTES_SIZE { return Err(box_err!("Invalid unicode, byte len too short: {:?}", b)); } let unicode = str::from_utf8(&b[0..ESCAPED_UNICODE_BYTES_SIZE])?; if unicode.len() != ESCAPED_UNICODE_BYTES_SIZE { return Err(box_err!("Invalid unicode, char len too short: {}", unicode)); } let utf8 = decode_escaped_unicode(unicode)?; ret.push(utf8); for _ in 0..ESCAPED_UNICODE_BYTES_SIZE { chars.next(); } } _ => { // For all other escape sequences, backslash is ignored. ret.push(c); } } } else { ret.push(ch); } } Ok(ret) } fn decode_escaped_unicode(s: &str) -> Result<char> { let u = box_try!(u32::from_str_radix(s, 16)); char::from_u32(u).ok_or(box_err!("invalid char from: {}", s)) } #[cfg(test)] mod tests { use super::super::Json; use super::*; use std::collections::BTreeMap; #[test] fn test_decode_escaped_unicode() { let mut test_cases = vec![ ("5e8a", 'ๅบŠ'), ("524d", 'ๅ‰'), ("660e", 'ๆ˜Ž'), ("6708", 'ๆœˆ'), ("5149", 'ๅ…‰'), ]; for (i, (escaped, expected)) in test_cases.drain(..).enumerate() { let d = decode_escaped_unicode(escaped); assert!(d.is_ok(), "#{} expect ok but got err {:?}", i, d); let got = d.unwrap(); assert_eq!( got, expected, "#{} expect {:?} but got {:?}", i, expected, got );
#[test] fn test_json_unquote() { // test unquote json string let mut test_cases = vec![ ("\\b", true, Some("\x08")), ("\\f", true, Some("\x0C")), ("\\n", true, Some("\x0A")), ("\\r", true, Some("\x0D")), ("\\t", true, Some("\x09")), ("\\\\", true, Some("\x5c")), ("\\u597d", true, Some("ๅฅฝ")), ("0\\u597d0", true, Some("0ๅฅฝ0")), ("\\a", true, Some("a")), ("[", true, Some("[")), // invalid input ("\\", false, None), ("\\u59", false, None), ]; for (i, (input, no_error, expected)) in test_cases.drain(..).enumerate() { let j = Json::from_string(String::from(input)).unwrap(); let r = j.as_ref().unquote(); if no_error { assert!(r.is_ok(), "#{} expect unquote ok but got err {:?}", i, r); let got = r.unwrap(); let expected = String::from(expected.unwrap()); assert_eq!( got, expected, "#{} expect {:?} but got {:?}", i, expected, got ); } else { assert!(r.is_err(), "#{} expected error but got {:?}", i, r); } } // test unquote other json types let mut test_cases = vec![ Json::from_object(BTreeMap::new()).unwrap(), Json::from_array(vec![]).unwrap(), Json::from_i64(2017).unwrap(), Json::from_f64(19.28).unwrap(), Json::from_bool(true).unwrap(), Json::none().unwrap(), ]; for (i, j) in test_cases.drain(..).enumerate() { let expected = j.to_string(); let r = j.as_ref().unquote(); assert!(r.is_ok(), "#{} expect unquote ok but got err {:?}", i, r); let got = r.unwrap(); assert_eq!( got, expected, "#{} expect {:?} but got {:?}", i, expected, got ); } } }
} }
lib.rs
use anchor_lang::prelude::*; use instructions::*; use state::game::Tile; pub mod errors; pub mod instructions; pub mod state; // this key needs to be changed to whatever public key is returned by "anchor keys list" declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); #[program] pub mod tic_tac_toe { use super::*; pub fn
(ctx: Context<SetupGame>, player_two: Pubkey) -> Result<()> { instructions::setup_game::setup_game(ctx, player_two) } pub fn play(ctx: Context<Play>, tile: Tile) -> Result<()> { instructions::play::play(ctx, tile) } }
setup_game
lib.rs
// indexeddb_from_rust lib.rs // region: auto_md_to_doc_comments include README.md A //! //! # indexeddb_from_rust //! //! **experimenting with indexeddb in rust wasm PWA** //! ***version: 2022.512.814 date: 2022-05-12 author: [bestia.dev](https://bestia.dev) repository: [Github](https://github.com/bestia-dev/indexeddb_from_rust)*** //! //! [![Lines in Rust code](https://img.shields.io/badge/Lines_in_Rust-1067-green.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! [![Lines in Doc comments](https://img.shields.io/badge/Lines_in_Doc_comments-318-blue.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! [![Lines in Comments](https://img.shields.io/badge/Lines_in_comments-153-purple.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! [![Lines in examples](https://img.shields.io/badge/Lines_in_examples-0-yellow.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! [![Lines in tests](https://img.shields.io/badge/Lines_in_tests-0-orange.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! //! [![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/bestia-dev/indexeddb_from_rust/blob/master/LICENSE) //! [![Rust](https://github.com/bestia-dev/indexeddb_from_rust/workflows/RustAction/badge.svg)](https://github.com/bestia-dev/indexeddb_from_rust/) //! [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fbestia-dev%2Findexeddb_from_rust&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com) //! //! Hashtags: #rustlang #experiment //! //! ## experimenting //! //! Indexeddb is the standard database storage inside the browser. It is not Sql. It is a document database. //! It is more or less a key-value storage, but the value can be a javascript object and that can be complex. //! The api is in javascript, uses old fashioned callbacks and events, completely async, without async/await or Promises. //! How to use it efficiently from rust? This is the question. //! In this project I only experiment with indexeddb. In my next project <https://github.com/bestia-dev/pwa_currency_converter> I actually use this findings for something useful. //! //! ## Development //! //! In Visual Studio Code press `ctrl+j` to open the Terminal window. Use `cargo make` to see the prepared steps for deployment like: //! `$ cargo make release` //! And then follow the instructions on the screen like: //! //! - Run the web server in a separate terminal: cd ~/rustprojects/indexeddb_from_rust/web_server_folder/;basic-http-server //! - Run the web app in your browser: <http://127.0.0.1:4000/indexeddb_from_rust/> //! //! Oh, today I did everything right, but the browser said "This site can't be reached". After many attempts I discovered, that WSL2 `localhost` or `127.0.0.1` connection to Win10 is broken after putting the laptop to sleep. I have to restart the WSL in PowerShell Run as administrator with //! `Get-Service LxssManager | Restart-Service`. //! Not nice and very difficult to discover because WSL2 is running just fine, except this. //! //! In the browser (chrome, edge, firefox) use F12 developer tools to easily see the content of indexeddb in F12. Very convenient. //! //! ## plantuml diagrams //! //! I will give a try to make diagrams for documentation with `plantuml`. Diagrams are defined using a simple and intuitive language. //! It follows the philosophy "everything as code". So it can be easily embedded in the code or documentation. //! The diagram can be created online on <http://www.plantuml.com/plantuml/umla/SoWkIImgAStDuNBAJrBGjLDmpCbCJbMmKiX8pSd9vt98pKi1IW80> //! Then it can be exported as svg and included as an image. //! The cargo-auto contains the automation task to create the svg image and embed it inside the README.md adding the markers. //! //! ```plantuml //! @startuml //! [rust code] ..> [idbr] //! [idbr] ..> [idbr_imports] //! [idbr_imports] ..> [idb_exports] //! [idb_exports] ..> [idb] //! [idb] ..> [indexeddb] //! //! note right of (rust code): with async/await without jsValue //! note right of (idbr): rust library for indexeddb //! note right of (idb): javascript library with async/await\nfrom jakearchibald //! note right of (indexeddb): supported by all major browsers\nbut no support for async/await //! @enduml //! ``` //! //! ![svg_YdyxlCJ7feG0pg_JDSeImB-y0msDFvya54qYMUrJO_Q](https://github.com/bestia-dev/indexeddb_from_rust/raw/main/images/svg_YdyxlCJ7feG0pg_JDSeImB-y0msDFvya54qYMUrJO_Q.svg) //! //! ## indexeddb, idb, idbr //! //! The original api for `indexeddb`is too hard and very old-fashioned without async/await. //! I will use the [idb](https://github.com/jakearchibald/idb) javascript library that makes `indexeddb` easier to use. //! Javascript has changed over time. Javascript is now in ES2020 edition. //! I will make a typescript/javascript module to export functions from `idb`. Then one rust module to import functions from idb_export. //! The rust library `idbr` will use the imported functions and create rust objects/structs and methods/functions. //! My rust code will then use only the `idbr` crate and hopefully there will be no more JsValue or other javascript peculiarities. //! //! ![idbrDiagram](https://github.com/bestia-dev/indexeddb_from_rust/raw/main/images/idbrDiagram.svg) //! //! ## Typescript adventure //! //! For my only module in javascript `idb_export`, I will rather use Typescript. I will write some typescript code, transpile it to javascript and invoke that from rust. //! In runtime my code will use only the javascript file. Typescript is used only in development. //! The Typescript compiler must be installed with `npm` that is a part of `nodejs`. I must first install `nodejs`. //! On Debian the package `sudo apt install nodejs` is old version 10. The recommended version is 14, but it is from another package source. //! nodesource.com is providing a script to add the new package source and install `nodejs`. //! This is the commands: //! //! ```bash //! cd ~ //! curl -sL https://deb.nodesource.com/setup_14.x -o nodesource_setup.sh //! nano nodesource_setup.sh //! sudo bash nodesource_setup.sh //! sudo apt install nodejs //! node -v //! npm -v //! sudo apt install build-essential //! ``` //! //! Now I can install Typescript: //! //! ```bash //! npm install -g typescript //! tsc --version //! tsc --help //! ``` //! //! In the terminal I just use `tsc` to transpile my source code with settings from `tsconfig.json`. //! I added this to my `cargo make` for easy developing. //! The typescript file is inside the `src` folder like rust source code files. //! The resulting javascript file is stored in the `js` folder of the web app folder. //! //! ## typescript/javascript imports //! //! I had major problems with `import` statements. //! I tried first with `npm install --save idb`. It saves the files in a separate `node_modules` folder. That didn't work nice with my `import` statements. I don't know why. //! At last I decided to create `idb` as a separate folder and copy the `node_modules/idb/build/esm`. //! I needed to play with `tsconfig.json` to make it work. //! I added the keys: baseUrl, rootDir, outDir, esModuleInterop and most important path. //! From one side this import paths are just like folder structure, from the other side they are like url paths. //! Confusing. But after a long experimentation I made it work. I hope I don't need ever to change this settings. //! //! ## code flow //! //! The browser opens `index.html`. //! There it runs `import init from "./pkg/indexeddb_from_rust.js";` //! and `init("./pkg/indexeddb_from_rust_bg.wasm");` //! This is the wasm code compiled from `lib.rs` and wasm-bindgen creates the magic to start the designated function. //! The `idb_exports.js` is the result of typescript transpilation of `idb_exports.ts`, my only typescript module. //! Inside that module I need to import the `idb` module with: //! `import * as idb from '/indexeddb_from_rust/idb/index.js';` //! Then Rust code `idbr_imports_mod.rs` imports the `idb_exports.js` javascript module and functions. //! From here on we are now in pure (more or less) rust code. //! //! ## missing unsafe
//! ```config //! "rust-analyzer.diagnostics.disabled": [ //! "missing-unsafe" //! ] //! ``` //! //! ## extern "C" - importing javascript functions //! //! Javascript functions are imported using the `extern "C"` block. //! For now `rustfmt` has a bug that removes the word async, because here we have javascript functions and not C functions. //! The workaround is to add `rustfmt::skip`: //! //! ```rust //! #[rustfmt::skip] //! #[wasm_bindgen(raw_module = "/indexeddb_from_rust/js/idb_exports.js")] //! extern "C" { //! fn check_browser_capability(); //! #[wasm_bindgen(catch)] //! fn init_db() -> Result<(), JsValue>; //! #[wasm_bindgen(catch)] //! fn add_key_value(store: String, key: String, value: String) -> Result<(), JsValue>; //! } //! ``` //! //! For a javascript function with no return value is simple: //! `pub(crate) fn check_browser_capability();` //! A javascript async function can return one JSValue. //! `pub(crate) async fn get_key_value(key: String, ) -> JsValue;` //! If we want to catch errors in the Promise, add attribute `wasm_bindgen(catch)`, then the functions returns `Result<JsValue, JsValue>`: //! //! ```rust //! #[wasm_bindgen(catch)] //! pub(crate) async fn init_db() -> Result<JsValue, JsValue>; //! ``` //! //! The imported async fn needs to be await just like rust functions. The macro wasm_bindgen makes some magic to transform Promises to futures on import: //! `let currdb = open_db().await.unwrap();` //! Some of the functions are async and others are not. It can lead to strange problems if an async function is used as a normal function. This is a thing to be careful about. Rust will hopefully show a warning, but javascript will not. //! //! ## Currency exchange rates //! //! I will get the daily exchange rate in json format from: //! <http://www.floatrates.com/daily/eur.json> //! and fill it into indexeddb. //! //! ## pages //! //! This PWA will have more pages. Pages are complete static html files inside tha pages folder. They use the same css as index.html. //! It is easy to edit and preview pages because they are complete. //! The rust code will fetch the html, extract only the body content and set_inner_html to div_for_wasm_html_injecting. //! A page is a template, and some placeholders will be replaced with data. //! //! ## serde-wasm-bindgen //! //! The indexeddb is key-value. Key is a string and value is any javascript object. //! That is really practical for javascript, but not so for rust. //! I will use [serde-wasm-bindgen](https://github.com/cloudflare/serde-wasm-bindgen) to work directly with javascript values from rust, because indexeddb stores javascript objects. //! From Rust to javascript: //! `serde_wasm_bindgen::to_value(&some_supported_rust_value)` //! From javascript to rust: //! `let value: SomeSupportedRustType = serde_wasm_bindgen::from_value(value)?;` //! //! ## idbr rust functions //! //! ### init_upgrade_db //! //! First of all the db must be initialized and upgraded. //! `idbr_mod::Database::init_upgrade_db("currdb", 2, &rust_closure_for_upgrade).await;` //! When the version is greater that the existing db version, it calls the rust closure. //! The closure looks like this: //! //! ```rust //! let rust_closure_for_upgrade = Closure::wrap(Box::new( //! move |db: JsValue, old_version: JsValue, new_version: JsValue, transaction: JsValue| { //! upgrade_currdb(db, old_version, new_version, transaction); //! }, //! ) //! ``` //! //! We create a new store with: `db.create_object_store("Currency");`. //! To add/modify data in the store we must the given use the `Transaction` in mode `versionchange`. //! First we define the object store and then put the data: //! `let cfg = tx.get_object_store("Config");` //! `cfg.put("base_currency", "EUR");` //! //! ### modify one data with implicit transaction //! //! ```rust //! let db = idb::Database::use_db("currdb").await; //! db.put_key_value("store", "key", "value").await.unwrap(); //! ``` //! //! ### modify many data in one transaction //! //! ```rust //! let db = idb::Database::use_db("currdb").await; //! let tx = db.transaction(); //! let store = tx.get_object_store_readwrite("Currency"); //! store.put("a", "a"); //! store.put("b", "b"); //! store.put("c", "c"); //! tx.close(); //! ``` //! // endregion: auto_md_to_doc_comments include README.md A //! //use unwrap::unwrap; use wasm_bindgen::prelude::*; use wasm_bindgen::JsValue; mod currdb_config_mod; mod currdb_currency_mod; mod currdb_mod; mod idbr_imports_mod; mod idbr_mod; mod page_input_currency_mod; mod page_main_mod; mod page_modal_about_mod; mod page_output_currency_mod; mod utils_mod; mod web_sys_mod; use wasm_bindgen_futures::spawn_local; use crate::web_sys_mod as w; #[wasm_bindgen(start)] /// To start the Wasm application, wasm_bindgen runs this functions pub fn wasm_bindgen_start() -> Result<(), JsValue> { // Initialize debugging for when/if something goes wrong. console_error_panic_hook::set_once(); // write the app version just for debug purposes w::debug_write(&format!( "indexeddb_from_rust v{}", env!("CARGO_PKG_VERSION") )); crate::idbr_mod::check_browser_capability(); //async block spawn_local(async { crate::currdb_mod::init_upgrade_currdb().await; crate::page_main_mod::page_main().await; }); // return Ok(()) }
//! //! When importing javascript functions with `#[wasm_bindgen]` and `extern "C"`, the rust-analyzer shows a warning about `missing unsafe`. This is not correct, the rustc compiler compile it just fine. It is because the attribute macro wasm_bindgen uses magic and makes it safe. But rust-analyzer (for now) cannot understand attribute macros. //! For those looking to disable the missing-unsafe rule until it's fixed and are using VS Code, adding the following to your settings.json and reloading your editor will suppress these errors: //!
math_test.go
/* * Copyright 2021 Chen Quan * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package math import ( "testing" ) func TestMaxInt(t *testing.T) { type args struct { a int b int } tests := []struct { name string args args want int }{ {"1", args{ a: 1, b: 2, }, 2, }, {"2", args{ a: 2, b: 1, }, 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := MaxInt(tt.args.a, tt.args.b); got != tt.want { t.Errorf("MaxInt() = %v, want %v", got, tt.want) } }) } } func TestMaxInt64(t *testing.T) { type args struct { a int64 b int64 } tests := []struct { name string args args want int64 }{ { "1", args{ a: 1, b: 2, }, 2, }, { "2", args{ a: 2, b: 1, }, 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := MaxInt64(tt.args.a, tt.args.b); got != tt.want { t.Errorf("MaxInt64() = %v, want %v", got, tt.want) } }) } } func TestMinInt(t *testing.T) { type args struct { a int b int } tests := []struct { name string args args want int }{ { "1", args{ a: 1, b: 2, }, 1, }, { "2", args{ a: 2, b: 1, }, 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := MinInt(tt.args.a, tt.args.b); got != tt.want { t.Errorf("MinInt() = %v, want %v", got, tt.want) } }) } } func TestMinInt64(t *testing.T) { type args struct { a int64 b int64 }
name string args args want int64 }{ { "1", args{ a: 1, b: 2, }, 1, }, { "2", args{ a: 2, b: 1, }, 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := MinInt64(tt.args.a, tt.args.b); got != tt.want { t.Errorf("MinInt64() = %v, want %v", got, tt.want) } }) } }
tests := []struct {
0010_auto_20210803_1026.py
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making ่“้ฒธๆ™บไบ‘-็”จๆˆท็ฎก็†(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Generated by Django 3.2.5 on 2021-08-03 02:26 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [
('categories', '0009_auto_20210413_1702'), ] operations = [ migrations.CreateModel( name='SyncProgress', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now_add=True)), ('update_time', models.DateTimeField(auto_now=True)), ('task_id', models.UUIDField(db_index=True, verbose_name='ไปปๅŠกid')), ('step', models.CharField(choices=[('users', '็”จๆˆทๆ•ฐๆฎๆ›ดๆ–ฐ'), ('departments', '็ป„็ป‡ๆ•ฐๆฎๆ›ดๆ–ฐ'), ('users_relationship', '็”จๆˆท้—ดๅ…ณ็ณปๆ•ฐๆฎๆ›ดๆ–ฐ'), ('dept_user_relationship', '็”จๆˆทๅ’Œ็ป„็ป‡ๅ…ณ็ณปๆ•ฐๆฎๆ›ดๆ–ฐ')], max_length=32, verbose_name='ๅŒๆญฅๆญฅ้ชค')), ('status', models.CharField(choices=[('successful', 'ๆˆๅŠŸ'), ('failed', 'ๅคฑ่ดฅ'), ('running', 'ๅŒๆญฅไธญ')], default='running', max_length=16, verbose_name='็Šถๆ€')), ('successful_count', models.IntegerField(verbose_name='ๅŒๆญฅๆˆๅŠŸๆ•ฐ้‡', default=0)), ('failed_count', models.IntegerField(verbose_name='ๅŒๆญฅๅคฑ่ดฅๆ•ฐ้‡', default=0)), ], ), migrations.AlterField( model_name='profilecategory', name='type', field=models.CharField(choices=[('local', 'ๆœฌๅœฐ็›ฎๅฝ•'), ('mad', 'Microsoft Active Directory'), ('ldap', 'OpenLDAP'), ('tof', 'TOF'), ('custom', '่‡ชๅฎšไน‰็›ฎๅฝ•'), ('pluggable', 'ๅฏๆ’ๆ‹”็›ฎๅฝ•')], max_length=32, verbose_name='็ฑปๅž‹'), ), migrations.CreateModel( name='SyncProgressLog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now_add=True)), ('update_time', models.DateTimeField(auto_now=True)), ('logs', models.TextField(verbose_name='ๆ—ฅๅฟ—')), ('failed_records', models.JSONField(default=list)), ('progress', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='log', to='categories.syncprogress')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='syncprogress', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='categories.profilecategory', verbose_name='็”จๆˆท็›ฎๅฝ•'), ), migrations.AlterUniqueTogether( name='syncprogress', unique_together={('category', 'step', 'task_id')}, ), ]
xprates.go
package jagex // normalXPRates for each skill var normalXPRates = [...]int{ 0, 90000, 90000, 90000, 300000, 150000, 200000, 100000, 400000, 70000, 250000, 70000, 200000, 150000, 250000, 60000, 200000, 44000, 100000, 50000, 100000, 50000, 120000, 400000, } // ironmanXPRates for each skill var ironmanXPRates = [...]int{ 0, 90000, 90000, 90000, 250000, 120000, 150000, 90000, 350000, 50000, 200000, 60000, 150000, 125000, 200000, 50000, 150000, 44000, 90000, 50000, 100000, 50000, 100000,
} // ultimateXPRates for each skill var ultimateXPRates = [...]int{ 0, 70000, 70000, 70000, 200000, 100000, 125000, 80000, 300000, 50000, 175000, 50000, 125000, 100000, 150000, 50000, 125000, 44000, 80000, 45000, 90000, 50000, 100000, 250000, }
300000,
pronunciation-normalization-issue.ts
import { WordPronunciation } from '../../pronunciation-sources.ts/pronunciation-source'; import { PronunciationNormalizationError } from '../normalization'; import { DictionaryCreationIssueBase } from './dictionary-creation-issue-base'; import { errorTypeToMessage } from './utils'; export class PronunciationNormalizationIssue extends DictionaryCreationIssueBase { constructor( private wordPronunciation: WordPronunciation, private error: PronunciationNormalizationError, ) { super(wordPronunciation.language); } get message() { return errorTypeToMessage(this.error.type); } get cells() { const { location } = this.error; return [ { title: 'Invalid IPA symbol', value: JSON.stringify( location.input.substring(location.start, location.end), ), }, { title: 'Pronunciation', value: this.wordPronunciation.pronunciation, }, { title: 'Word', value: this.wordPronunciation.word, }, ]; } }
issue195.go
// run package main import ( "fmt" ) func pointer() *int { a := 10 return &a } func main()
{ fmt.Print(*pointer()) }
path.py
""" sphinx.testing.path ~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import shutil import sys if False: # For type annotation import builtins # NOQA from typing import Any, Callable, IO, List # NOQA FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding() class path(str): """ Represents a path which behaves like a string. """ @property def parent(self): # type: () -> path """ The name of the directory the file or directory is in. """ return self.__class__(os.path.dirname(self)) def basename(self): # type: () -> str return os.path.basename(self) def abspath(self): # type: () -> path """ Returns the absolute path. """ return self.__class__(os.path.abspath(self)) def isabs(self): # type: () -> bool """ Returns ``True`` if the path is absolute. """ return os.path.isabs(self) def isdir(self): # type: () -> bool """ Returns ``True`` if the path is a directory. """ return os.path.isdir(self) def isfile(self): # type: () -> bool """ Returns ``True`` if the path is a file. """ return os.path.isfile(self) def islink(self): # type: () -> bool """ Returns ``True`` if the path is a symbolic link. """ return os.path.islink(self) def ismount(self): # type: () -> bool """ Returns ``True`` if the path is a mount point. """ return os.path.ismount(self) def rmtree(self, ignore_errors=False, onerror=None): # type: (bool, Callable) -> None """ Removes the file or directory and any files or directories it may contain. :param ignore_errors: If ``True`` errors are silently ignored, otherwise an exception is raised in case an error occurs. :param onerror: A callback which gets called with the arguments `func`, `path` and `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove` or :func:`os.rmdir`. `path` is the argument to the function which caused it to fail and `exc_info` is a tuple as returned by :func:`sys.exc_info`. """ shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror) def copytree(self, destination, symlinks=False): # type: (str, bool) -> None """ Recursively copy a directory to the given `destination`. If the given `destination` does not exist it will be created. :param symlinks: If ``True`` symbolic links in the source tree result in symbolic links in the destination tree otherwise the contents of the files pointed to by the symbolic links are copied. """ shutil.copytree(self, destination, symlinks=symlinks) def movetree(self, destination): # type: (str) -> None """ Recursively move the file or directory to the given `destination` similar to the Unix "mv" command. If the `destination` is a file it may be overwritten depending on the :func:`os.rename` semantics. """ shutil.move(self, destination) move = movetree def unlink(self): # type: () -> None """ Removes a file. """ os.unlink(self) def stat(self): # type: () -> Any """ Returns a stat of the file. """ return os.stat(self) def utime(self, arg): # type: (Any) -> None os.utime(self, arg) def open(self, mode='r', **kwargs): # type: (str, Any) -> IO return open(self, mode, **kwargs) def write_text(self, text, encoding='utf-8', **kwargs): # type: (str, str, Any) -> None """ Writes the given `text` to the file. """ with open(self, 'w', encoding=encoding, **kwargs) as f: f.write(text) def text(self, encoding='utf-8', **kwargs): # type: (str, Any) -> str """ Returns the text in the file. """ with open(self, encoding=encoding, **kwargs) as f: return f.read() def bytes(self): # type: () -> builtins.bytes """ Returns the bytes in the file. """ with open(self, mode='rb') as f: return f.read() def
(self, bytes, append=False): # type: (str, bool) -> None """ Writes the given `bytes` to the file. :param append: If ``True`` given `bytes` are added at the end of the file. """ if append: mode = 'ab' else: mode = 'wb' with open(self, mode=mode) as f: f.write(bytes) def exists(self): # type: () -> bool """ Returns ``True`` if the path exist. """ return os.path.exists(self) def lexists(self): # type: () -> bool """ Returns ``True`` if the path exists unless it is a broken symbolic link. """ return os.path.lexists(self) def makedirs(self, mode=0o777, exist_ok=False): # type: (int, bool) -> None """ Recursively create directories. """ os.makedirs(self, mode, exist_ok=exist_ok) def joinpath(self, *args): # type: (Any) -> path """ Joins the path with the argument given and returns the result. """ return self.__class__(os.path.join(self, *map(self.__class__, args))) def listdir(self): # type: () -> List[str] return os.listdir(self) __div__ = __truediv__ = joinpath def __repr__(self): # type: () -> str return '%s(%s)' % (self.__class__.__name__, super().__repr__())
write_bytes
lib.rs
mod error; pub use crate::error::Error; pub fn getrandom(_: &mut [u8]) -> Result<(), Error> { Ok(())
}
bitcoin_el_GR.ts
<?xml version="1.0" ?><!DOCTYPE TS><TS language="el_GR" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;MinersHeavenCoin&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright ยฉ 2009-2014 The Bitcoin developers Copyright ยฉ 2012-2014 The NovaCoin developers Copyright ยฉ 2014 The MinersHeavenCoin developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>ฮ”ฮนฯ€ฮปฯŒ-ฮบฮปฮนฮบ ฮณฮนฮฑ ฮตฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฯ„ฮทฯ‚ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฮฎ ฯ„ฮทฯ‚ ฮตฯ„ฮนฮบฮญฯ„ฮฑฯ‚</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>ฮ”ฮทฮผฮนฮฟฯฯฮณฮทฯƒฮต ฮฝฮญฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>ฮ‘ฮฝฯ„ฮญฮณฯฮฑฯˆฮต ฯ„ฮทฮฝ ฮตฯ€ฮนฮปฮตฮณฮผฮญฮฝฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯƒฯ„ฮฟ ฯ€ฯฯŒฯ‡ฮตฮนฯฮฟ ฯ„ฮฟฯ… ฯƒฯ…ฯƒฯ„ฮฎฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-46"/> <source>These are your MinersHeavenCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation>&amp;ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮท ฯ„ฮทฯ‚ ฮตฯ€ฮนฮปฮตฮณฮผฮตฮฝฮทฯ‚ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฯƒฯ„ฮฟ ฯ€ฯฯŒฯ‡ฮตฮนฯฮฟ ฯ„ฮฟฯ… ฯƒฯ…ฯƒฯ„ฮทฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;ฮ”ฮนฮฑฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ &amp;ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎฯ‚</translation> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation>&amp;ฮ•ฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ</translation> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>ฮ‘ฯฯ‡ฮตฮฏฮฟ ฮฟฯฮนฮฟฮธฮตฯ„ฮทฮผฮญฮฝฮฟ ฮผฮต ฮบฯŒฮผฮผฮฑฯ„ฮฑ (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>ฮ•ฯ„ฮนฮบฮญฯ„ฮฑ</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(ฯ‡ฯ‰ฯฮฏฯ‚ ฮตฯ„ฮนฮบฮญฯ„ฮฑ)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>ฮฆฯฮฌฯƒฮท ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚ </translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>ฮ’ฮฌฮปฯ„ฮต ฮบฯ‰ฮดฮนฮบฯŒ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>ฮฮญฮฟฯ‚ ฮบฯ‰ฮดฮนฮบฯŒฯ‚ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>ฮ•ฯ€ฮฑฮฝฮญฮปฮฑฮฒฮต ฯ„ฮฟฮฝ ฮฝฮญฮฟ ฮบฯ‰ฮดฮนฮบฯŒ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>ฮ•ฮนฯƒฮฌฮณฮตฯ„ฮต ฯ„ฮฟฮฝ ฮฝฮญฮฟ ฮบฯ‰ฮดฮนฮบฯŒ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚ ฯƒฯ„ฮฟฮฝ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน &lt;br/&gt; ฮ ฮฑฯฮฑฮบฮฑฮปฯŽ ฯ‡ฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮนฮตฮฏฯƒฯ„ฮต ฮญฮฝฮฑ ฮบฯ‰ฮดฮนฮบฯŒ ฮผฮต &lt;b&gt; 10 ฮฎ ฯ€ฮตฯฮนฯƒฯƒฯŒฯ„ฮตฯฮฟฯ…ฯ‚ ฯ„ฯ…ฯ‡ฮฑฮฏฮฟฯ…ฯ‚ ฯ‡ฮฑฯฮฑฮบฯ„ฮฎฯฮตฯ‚&lt;/b&gt; ฮฎ &lt;b&gt; ฮฟฯ‡ฯ„ฯŽ ฮฎ ฯ€ฮฑฯฮฑฯ€ฮฌฮฝฯ‰ ฮปฮญฮพฮตฮนฯ‚&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>ฮšฯฯ…ฯ€ฯ„ฮฟฮณฯฮฌฯ†ฮทฯƒฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>ฮ‘ฯ…ฯ„ฮท ฮท ฮตฮฝฮตฯฮณฮตฮฏฮฑ ฯ‡ฯฮตฮนฮฌฮถฮตฯ„ฮฑฮน ฯ„ฮฟฮฝ ฮบฯ‰ฮดฮนฮบฯŒ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮณฮนฮฑ ฮฝฮฑ ฮพฮตฮบฮปฮตฮนฮดฯŽฯƒฮตฮน ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>ฮžฮตฮบฮปฮตฮนฮดฯ‰ฯƒฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>ฮ‘ฯ…ฯ„ฮท ฮท ฮตฮฝฮตฯฮณฮตฮนฮฑ ฯ‡ฯฮตฮนฮฌฮถฮตฯ„ฮฑฮน ฯ„ฮฟฮฝ ฮบฯ‰ฮดฮนฮบฮฟ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ… ฮณฮนฮฑ ฮฝฮฑ ฮฑฯ€ฮฟฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฯƒฮตฮนฮน ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>ฮ‘ฯ€ฮฟฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฌฯ†ฮทฯƒฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>ฮ†ฮปฮปฮฑฮพฮต ฮบฯ‰ฮดฮนฮบฮฟ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>ฮ•ฮนฯƒฮฌฮณฮตฯ„ฮต ฯ„ฮฟฮฝ ฯ€ฮฑฮปฮนฯŒ ฮบฮฑฮน ฯ„ฮฟฮฝ ฮฝฮตฮฟ ฮบฯ‰ฮดฮนฮบฮฟ ฯƒฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>ฮ•ฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮต ฯ„ฮทฮฝ ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>ฮ•ฮฏฯƒฯ„ฮต ฯƒฮฏฮณฮฟฯ…ฯฮฟฮน ฯŒฯ„ฮน ฮธฮญฮปฮตฯ„ฮต ฮฝฮฑ ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮฎฯƒฮตฯ„ฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน ฯƒฮฑฯ‚;</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>ฮฃฮ—ฮœฮ‘ฮฮคฮ™ฮšฮŸ: ฮคฮฑ ฯ€ฯฮฟฮทฮณฮฟฯฮผฮตฮฝฮฑ ฮฑฮฝฯ„ฮฏฮณฯฮฑฯ†ฮฑ ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚ ฯ€ฮฟฯ… ฮญฯ‡ฮตฯ„ฮต ฮบฮฌฮฝฮตฮน ฮฑฯ€ฯŒ ฯ„ฮฟ ฮฑฯฯ‡ฮตฮฏฮฟ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮนฮฟฯ… ฯƒฮฑฯ‚ ฮธฮฑ ฯ€ฯฮญฯ€ฮตฮน ฮฝฮฑ ฮฑฮฝฯ„ฮนฮบฮฑฯ„ฮฑฯƒฯ„ฮฑฮธฮฟฯ…ฮฝ ฮผฮต ฯ„ฮฟ ฮฝฮญฮฟ ฯ€ฮฟฯ… ฮดฮทฮผฮนฮฟฯ…ฯฮณฮตฮฏฯ„ฮฑฮน, ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮญฮฝฮฟ ฮฑฯฯ‡ฮตฮฏฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮนฮฟฯ…. ฮ“ฮนฮฑ ฮปฯŒฮณฮฟฯ…ฯ‚ ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚, ฯ„ฮฑ ฯ€ฯฮฟฮทฮณฮฟฯฮผฮตฮฝฮฑ ฮฑฮฝฯ„ฮฏฮณฯฮฑฯ†ฮฑ ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚ ฯ„ฮฟฯ… ฮผฮท ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮญฮฝฮฟฯ… ฮฑฯฯ‡ฮตฮฏฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮนฮฟฯ… ฮธฮฑ ฮบฮฑฯ„ฮฑฯƒฯ„ฮฟฯ…ฮฝ ฮฌฯ‡ฯฮทฯƒฯ„ฮฑ ฮผฯŒฮปฮนฯ‚ ฮฑฯฯ‡ฮฏฯƒฮตฯ„ฮต ฮฝฮฑ ฯ‡ฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮนฮตฮฏฯ„ฮต ฯ„ฮฟ ฮฝฮญฮฟ ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮญฮฝฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน. </translation> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>ฮ ฯฮฟฯƒฮฟฯ‡ฮท: ฯ„ฮฟ ฯ€ฮปฮฎฮบฯ„ฯฮฟ Caps Lock ฮตฮฏฮฝฮฑฮน ฮตฮฝฮตฯฮณฮฟ.</translation> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>ฮšฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮตฮฝฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน</translation> </message> <message> <location line="-58"/> <source>MinersHeavenCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>ฮ— ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>ฮ— ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฌฯ†ฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต ฮปฮฟฮณฯ‰ ฮตฯƒฯ‰ฯ„ฮตฯฮนฮบฮฟฯ ฯƒฯ†ฮฌฮปฮผฮฑฯ„ฮฟฯ‚. ฮคฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮน ฮดฮตฮฝ ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮธฮทฮบฮต.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>ฮŸฮน ฮตฮนฯƒฮฑฯ‡ฮธฮญฮฝฯ„ฮตฯ‚ ฮบฯ‰ฮดฮนฮบฮฟฮฏ ฮดฮตฮฝ ฯ„ฮฑฮนฯฮนฮฌฮถฮฟฯ…ฮฝ.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>ฯ„ฮฟ ฮพฮตฮบฮปฮตฮฏฮดฯ‰ฮผฮฑ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>ฮŸ ฮบฯ‰ฮดฮนฮบฮฟฯ‚ ฯ€ฮฟฯ… ฮตฮนฯƒฮฎฯ‡ฮธฮท ฮณฮนฮฑ ฯ„ฮทฮฝ ฮฑฯ€ฮฟฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฎฯ„ฮฑฮฝ ฮปฮฑฮธฮฟฯ‚.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>ฮ— ฮฑฯ€ฮฟฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>ฮŸ ฮบฯ‰ฮดฮนฮบฮฟฯ‚ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฌฮปฮปฮฑฮพฮต ฮผฮต ฮตฯ€ฮนฯ„ฯ…ฯ‡ฮฏฮฑ.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+280"/> <source>Sign &amp;message...</source> <translation>ฮฅฯ€ฮฟฮณฯฮฑฯ†ฮฎ &amp;ฮœฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚...</translation> </message> <message> <location line="+242"/> <source>Synchronizing with network...</source> <translation>ฮฃฯ…ฮณฯ‡ฯฮฟฮฝฮนฯƒฮผฯŒฯ‚ ฮผฮต ฯ„ฮฟ ฮดฮฏฮบฯ„ฯ…ฮฟ...</translation> </message> <message> <location line="-308"/> <source>&amp;Overview</source> <translation>&amp;ฮ•ฯ€ฮนฯƒฮบฯŒฯ€ฮทฯƒฮท</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮต ฯ„ฮท ฮณฮตฮฝฮนฮบฮฎ ฮตฮนฮบฯŒฮฝฮฑ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;ฮฃฯ…ฮฝฮฑฮปฮปฮฑฮณฮญฯ‚</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>ฮ ฮตฯฮนฮฎฮณฮทฯƒฮท ฯƒฯ„ฮฟ ฮนฯƒฯ„ฮฟฯฮนฮบฯŒ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฯŽฮฝ</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>&amp;Receive coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>&amp;Send coins</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation>ฮˆ&amp;ฮพฮฟฮดฮฟฯ‚</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>ฮ•ฮพฮฟฮดฮฟฯ‚ ฮฑฯ€ฯŒ ฯ„ฮทฮฝ ฮตฯ†ฮฑฯฮผฮฟฮณฮฎ</translation> </message> <message> <location line="+4"/> <source>Show information about MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>ฮฃฯ‡ฮตฯ„ฮนฮบฮฌ ฮผฮต &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮต ฯ€ฮปฮทฯฮฟฯ†ฮฟฯฮฏฮตฯ‚ ฯƒฯ‡ฮตฯ„ฮนฮบฮฌ ฮผฮต Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;ฮ•ฯ€ฮนฮปฮฟฮณฮญฯ‚...</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation>&amp;ฮšฯฯ…ฯ€ฯ„ฮฟฮณฯฮฌฯ†ฮทฯƒฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;ฮ‘ฮฝฯ„ฮฏฮณฯฮฑฯ†ฮฟ ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>&amp;ฮ†ฮปฮปฮฑฮพฮต ฮบฯ‰ฮดฮนฮบฮฟ ฯ€ฯฯŒฯƒฮฒฮฑฯƒฮทฯ‚</translation> </message> <message numerus="yes"> <location line="+250"/> <source>~%n block(s) remaining</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation type="unfinished"/> </message> <message> <location line="-247"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-62"/> <source>Send coins to a MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Modify configuration options for MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation>ฮ”ฮทฮผฮนฮฟฯ…ฯฮณฮฏฮฑ ฮฑฮฝฯ„ฮนฮณฯฮฌฯ†ฮฟฯ… ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚ ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฯƒฮต ฮฌฮปฮปฮท ฯ„ฮฟฯ€ฮฟฮธฮตฯƒฮฏฮฑ</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>ฮ‘ฮปฮปฮฑฮณฮฎ ฯ„ฮฟฯ… ฮบฯ‰ฮดฮนฮบฮฟฯ ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฌฯ†ฮทฯƒฮทฯ‚ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation>&amp;ฮ ฮฑฯฮฌฮธฯ…ฯฮฟ ฮฑฯ€ฮฟฯƒฯ†ฮฑฮปฮผฮฌฯ„ฯ‰ฯƒฮทฯ‚</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>ฮ†ฮฝฮฟฮนฮณฮผฮฑ ฮบฮฟฮฝฯƒฯŒฮปฮฑฯ‚ ฮฑฯ€ฮฟฯƒฯ†ฮฑฮปฮผฮฌฯ„ฯ‰ฯƒฮทฯ‚ ฮบฮฑฮน ฮดฮนฮฑฮณฮฝฯ‰ฯƒฯ„ฮนฮบฯŽฮฝ</translation> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>&amp;ฮ•ฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท ฮผฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="-200"/> <source>MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet</source> <translation>ฮ ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+178"/> <source>&amp;About MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮต/ฮšฯฯฯˆฮต</translation> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>&amp;File</source> <translation>&amp;ฮ‘ฯฯ‡ฮตฮฏฮฟ</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>&amp;ฮกฯ…ฮธฮผฮฏฯƒฮตฮนฯ‚</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>&amp;ฮ’ฮฟฮฎฮธฮตฮนฮฑ</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>ฮ•ฯฮณฮฑฮปฮตฮนฮฟฮธฮฎฮบฮท ฮบฮฑฯฯ„ฮตฮปฯŽฮฝ</translation> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+0"/> <location line="+60"/> <source>MinersHeavenCoin client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to MinersHeavenCoin network</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="-284"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+288"/> <source>%n minute(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>ฮ•ฮฝฮทฮผฮตฯฯ‰ฮผฮญฮฝฮฟ</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>ฮ•ฮฝฮทฮผฮญฯฯ‰ฯƒฮท...</translation> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>ฮ— ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎ ฮฑฯ€ฮตฯƒฯ„ฮฌฮปฮท</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>ฮ•ฮนฯƒฮตฯฯ‡ฯŒฮผฮตฮฝฮท ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎ</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ: %1 ฮ ฮฟฯƒฯŒ: %2 ฮคฯฯ€ฮฟฯ‚: %3 ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท: %4 </translation> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid MinersHeavenCoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>ฮคฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน ฮตฮฏฮฝฮฑฮน &lt;b&gt;ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮญฮฝฮฟ&lt;/b&gt; ฮบฮฑฮน &lt;b&gt;ฮพฮตฮบฮปฮตฮฏฮดฯ‰ฯ„ฮฟ&lt;/b&gt;</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>ฮคฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน ฮตฮฏฮฝฮฑฮน &lt;b&gt;ฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮผฮญฮฝฮฟ&lt;/b&gt; ฮบฮฑฮน &lt;b&gt;ฮบฮปฮตฮนฮดฯ‰ฮผฮญฮฝฮฟ&lt;/b&gt;</translation> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation><numerusform>%n ฯŽฯฮตฯ‚ </numerusform><numerusform>%n ฯŽฯฮตฯ‚ </numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n ฮทฮผฮญฯฮตฯ‚ </numerusform><numerusform>%n ฮทฮผฮญฯฮตฯ‚ </numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. MinersHeavenCoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation>ฮ•ฮนฮดฮฟฯ€ฮฟฮฏฮทฯƒฮท ฮ”ฮนฮบฯ„ฯฮฟฯ…</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Amount:</source> <translation>ฮ ฮฟฯƒฯŒ:</translation> </message> <message> <location line="+32"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change:</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>List mode</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Amount</source> <translation>ฮ ฮฟฯƒฯŒ</translation> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation>ฮ•ฯ€ฮนฮบฯ…ฯฯ‰ฮผฮญฮฝฮตฯ‚</translation> </message> <message> <location line="+5"/> <source>Priority</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎฯ‚</translation> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฯ€ฮฟฯƒฮฟฯ</translation> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮท ฯ„ฮฟฯ… ID ฮฃฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+317"/> <source>highest</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium-high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>low-medium</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>low</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>lowest</source> <translation type="unfinished"/> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(ฯ‡ฯ‰ฯฮฏฯ‚ ฮตฯ„ฮนฮบฮญฯ„ฮฑ)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>(change)</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>ฮ•ฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;ฮ•ฯ€ฮนฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation>ฮฮญฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮปฮฎฯˆฮทฯ‚</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>ฮฮญฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>ฮ•ฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฮปฮฎฯˆฮทฯ‚</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>ฮ•ฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>ฮ— ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท &quot;%1&quot; ฮฒฯฮฏฯƒฮบฮตฯ„ฮฑฮน ฮฎฮดฮท ฯƒฯ„ฮฟ ฮฒฮนฮฒฮปฮฏฮฟ ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid MinersHeavenCoin address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>ฮ”ฮตฮฝ ฮตฮฏฮฝฮฑฮน ฮดฯ…ฮฝฮฑฯ„ฯŒ ฯ„ฮฟ ฮพฮตฮบฮปฮตฮฏฮดฯ‰ฮผฮฑ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>ฮ— ฮดฮทฮผฮนฮฟฯ…ฯฮณฮฏฮฑ ฮฝฮญฮฟฯ… ฮบฮปฮตฮนฮดฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>MinersHeavenCoin-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>ฮกฯ…ฮธฮผฮฏฯƒฮตฮนฯ‚</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;ฮšฯฯฮนฮฟ</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>ฮ‘ฮผฮฟฮนฮฒฮฎ &amp;ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start MinersHeavenCoin after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start MinersHeavenCoin on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;ฮ”ฮฏฮบฯ„ฯ…ฮฟ</translation> </message> <message> <location line="+6"/> <source>Automatically open the MinersHeavenCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>ฮ‘ฯ€ฯŒฮดฮฟฯƒฮท ฮธฯ…ฯฯŽฮฝ ฮผฮต ฯ‡ฯฮฎฯƒฯ„ฮท &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the MinersHeavenCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP ฮดฮนฮฑฮผฮตฯƒฮฟฮปฮฑฮฒฮทฯ„ฮฎ:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;ฮ˜ฯฯฮฑ:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>ฮ˜ฯฯฮฑ ฮดฮนฮฑฮผฮตฯƒฮฟฮปฮฑฮฒฮทฯ„ฮฎ</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>SOCKS &amp;ฮˆฮบฮดฮฟฯƒฮท:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>SOCKS ฮตฮบฮดฮฟฯƒฮท ฯ„ฮฟฯ… ฮดฮนฮฑฮผฮตฯƒฮฟฮปฮฑฮฒฮทฯ„ฮท (e.g. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;ฮ ฮฑฯฮฌฮธฯ…ฯฮฟ</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮท ฮผฯŒฮฝฮฟ ฮตฮนฮบฮฟฮฝฮนฮดฮฏฮฟฯ… ฯƒฯ„ฮทฮฝ ฯ€ฮตฯฮนฮฟฯ‡ฮฎ ฮตฮนฮดฮฟฯ€ฮฟฮนฮฎฯƒฮตฯ‰ฮฝ ฮบฮฑฯ„ฮฌ ฯ„ฮทฮฝ ฮตฮปฮฑฯ‡ฮนฯƒฯ„ฮฟฯ€ฮฟฮฏฮทฯƒฮท</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;ฮ•ฮปฮฑฯ‡ฮนฯƒฯ„ฮฟฯ€ฮฟฮฏฮทฯƒฮท ฯƒฯ„ฮทฮฝ ฯ€ฮตฯฮนฮฟฯ‡ฮฎ ฮตฮนฮดฮฟฯ€ฮฟฮนฮฎฯƒฮตฯ‰ฮฝ ฮฑฮฝฯ„ฮฏ ฯ„ฮทฯ‚ ฮณฯฮฑฮผฮผฮฎฯ‚ ฮตฯฮณฮฑฯƒฮนฯŽฮฝ</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>ฮ•ฮปฮฑฯ‡ฮนฯƒฯ„ฮฟฯ€ฮฟฮฏฮทฯƒฮท ฮฑฮฝฯ„ฮฏ ฮณฮนฮฑ ฮญฮพฮฟฮดฮฟ ฮบฮฑฯ„ฮฌ ฯ„ฮฟ ฮบฮปฮตฮฏฯƒฮนฮผฮฟ ฯ„ฮฟฯ… ฯ€ฮฑฯฮฑฮธฯฯฮฟฯ…</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>ฮ•&amp;ฮปฮฑฯ‡ฮนฯƒฯ„ฮฟฯ€ฮฟฮฏฮทฯƒฮท ฮบฮฑฯ„ฮฌ ฯ„ฮฟ ฮบฮปฮตฮฏฯƒฮนฮผฮฟ</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;ฮ‘ฯ€ฮตฮนฮบฯŒฮฝฮนฯƒฮท</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>ฮ“ฮปฯŽฯƒฯƒฮฑ ฯ€ฮตฯฮนฮฒฮฌฮปฮปฮฟฮฝฯ„ฮฟฯ‚ ฮตฯฮณฮฑฯƒฮฏฮฑฯ‚: </translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting MinersHeavenCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;ฮœฮฟฮฝฮฌฮดฮฑ ฮผฮญฯ„ฯฮทฯƒฮทฯ‚:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>ฮ”ฮนฮฑฮปฮญฮพฯ„ฮต ฯ„ฮทฮฝ ฯ€ฯฮฟฮตฯ€ฮนฮปฮตฮณฮผฮญฮฝฮท ฯ…ฯ€ฮฟฮดฮนฮฑฮฏฯฮตฯƒฮท ฯ€ฮฟฯ… ฮธฮฑ ฮตฮผฯ†ฮฑฮฝฮฏฮถฮตฯ„ฮฑฮน ฯŒฯ„ฮฑฮฝ ฯƒฯ„ฮญฮปฮฝฮตฯ„ฮต ฮฝฮฟฮผฮฏฯƒฮผฮฑฯ„ฮฑ.</translation> </message> <message> <location line="+9"/> <source>Whether to show MinersHeavenCoin addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮท ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ ฯƒฯ„ฮท ฮปฮฏฯƒฯ„ฮฑ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฯŽฮฝ</translation> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;ฮŸฮš</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;ฮ‘ฮบฯฯฯ‰ฯƒฮท</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting MinersHeavenCoin.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>ฮ”ฮตฮฝ ฮตฮฏฮฝฮฑฮน ฮญฮณฮบฯ…ฯฮท ฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮดฮนฮฑฮผฮตฯƒฮฟฮปฮฑฮฒฮทฯ„ฮฎ</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>ฮฆฯŒฯฮผฮฑ</translation> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the MinersHeavenCoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-107"/> <source>Wallet</source> <translation>ฮ ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation>ฮคฮฟ ฯ„ฯฮญฯ‡ฮฟฮฝ ฮดฮนฮฑฮธฮญฯƒฮนฮผฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ</translation> </message> <message> <location line="+71"/> <source>Immature:</source> <translation>ฮ‘ฮฝฯŽฯฮนฮผฮฟฯ‚</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>ฮ•ฮพฮฟฯฯ…ฮณฮผฮตฮฝฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ ฯ€ฮฟฯ… ฮดฮตฮฝ ฮญฯ‡ฮตฮน ฮฑฮบฯŒฮผฮฑ ฯ‰ฯฮนฮผฮฌฯƒฮตฮน </translation> </message> <message> <location line="+20"/> <source>Total:</source> <translation>ฮฃฯฮฝฮฟฮปฮฟ:</translation> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation>ฮคฮฟ ฯ„ฯฮญฯ‡ฮฟฮฝ ฯƒฯ…ฮฝฮฟฮปฮนฮบฯŒ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;ฮ ฯฯŒฯƒฯ†ฮฑฯ„ฮตฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮญฯ‚&lt;/b&gt;</translation> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation>ฮตฮบฯ„ฯŒฯ‚ ฯƒฯ…ฮณฯ‡ฯฮฟฮฝฮนฯƒฮผฮฟฯ</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message>
<source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>ฮŒฮฝฮฟฮผฮฑ ฮ ฮตฮปฮฌฯ„ฮท</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation>ฮœฮท ฮดฮนฮฑฮธฮญฯƒฮนฮผฮฟ</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>ฮˆฮบฮดฮฟฯƒฮท ฮ ฮตฮปฮฌฯ„ฮท</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;ฮ ฮปฮทฯฮฟฯ†ฮฟฯฮฏฮฑ</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>ฮงฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮนฮทฯƒฮท ฯ„ฮทฯ‚ OpenSSL ฮตฮบฮดฮฟฯƒฮทฯ‚</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>ฮงฯฯŒฮฝฮฟฯ‚ ฮตฮบฮบฮฏฮฝฮทฯƒฮทฯ‚</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>ฮ”ฮฏฮบฯ„ฯ…ฮฟ</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>ฮ‘ฯฮนฮธฮผฯŒฯ‚ ฯƒฯ…ฮฝฮดฮญฯƒฮตฯ‰ฮฝ</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>ฮ‘ฮปฯ…ฯƒฮฏฮดฮฑ ฮผฯ€ฮปฮฟฮบ</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>ฮคฯฮญฯ‡ฮฟฮฝ ฮฑฯฮนฮธฮผฯŒฯ‚ ฮผฯ€ฮปฮฟฮบ</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>ฮšฮฑฯ„&apos; ฮตฮบฯ„ฮฏฮผฮทฯƒฮท ฯƒฯ…ฮฝฮฟฮปฮนฮบฮฌ ฮผฯ€ฮปฮฟฮบฯ‚</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>ฮงฯฯŒฮฝฮฟฯ‚ ฯ„ฮตฮปฮตฯ…ฯ„ฮฑฮฏฮฟฯ… ฮผฯ€ฮปฮฟฮบ</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;ฮ†ฮฝฮฟฮนฮณฮผฮฑ</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the MinersHeavenCoin-Qt help message to get a list with possible MinersHeavenCoin command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;ฮšฮฟฮฝฯƒฯŒฮปฮฑ</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ ฮบฮฑฯ„ฮฑฯƒฮบฮตฯ…ฮฎฯ‚</translation> </message> <message> <location line="-104"/> <source>MinersHeavenCoin - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>MinersHeavenCoin Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>ฮ‘ฯฯ‡ฮตฮฏฮฟ ฮบฮฑฯ„ฮฑฮณฯฮฑฯ†ฮฎฯ‚ ฮตฮฝฯ„ฮฟฯ€ฮนฯƒฮผฮฟฯ ฯƒฯ†ฮฑฮปฮผฮฌฯ„ฯ‰ฮฝ </translation> </message> <message> <location line="+7"/> <source>Open the MinersHeavenCoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>ฮšฮฑฮธฮฑฯฮนฯƒฮผฯŒฯ‚ ฮบฮฟฮฝฯƒฯŒฮปฮฑฯ‚</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the MinersHeavenCoin RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>ฮงฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮนฮฎฯƒฯ„ฮต ฯ„ฮฟ ฯ€ฮฌฮฝฯ‰ ฮบฮฑฮน ฮบฮฌฯ„ฯ‰ ฮฒฮญฮปฮฟฯ‚ ฮณฮนฮฑ ฮฝฮฑ ฯ€ฮตฯฮนฮทฮณฮทฮธฮตฮฏฯ„ฮต ฯƒฯ„ฮฟ ฮนฯƒฯ„ฮฟฯฮนฮบฮฟ, ฮบฮฑฮน &lt;b&gt;Ctrl-L&lt;/b&gt; ฮณฮนฮฑ ฮตฮบฮบฮฑฮธฮฑฯฮนฯƒฮท ฮฟฮธฮฟฮฝฮทฯ‚.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>ฮ“ฯฮฌฯˆฯ„ฮต &lt;b&gt;help&lt;/b&gt; ฮณฮนฮฑ ฮผฮนฮฑ ฮตฯ€ฮนฯƒฮบฯŒฯ€ฮทฯƒฮท ฯ„ฯ‰ฮฝ ฮดฮนฮฑฮธฮญฯƒฮนฮผฯ‰ฮฝ ฮตฮฝฯ„ฮฟฮปฯŽฮฝ</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮฎ ฮฝฮฟฮผฮนฯƒฮผฮฌฯ„ฯ‰ฮฝ</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Amount:</source> <translation>ฮ ฮฟฯƒฯŒ:</translation> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 BC</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮฎ ฯƒฮต ฯ€ฮฟฮปฮปฮฟฯฯ‚ ฮฑฯ€ฮฟฮดฮญฮบฯ„ฮตฯ‚ ฯ„ฮฑฯ…ฯ„ฯŒฯ‡ฯฮฟฮฝฮฑ</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>&amp;ฮ ฯฮฟฯƒฮธฮฎฮบฮท ฮฑฯ€ฮฟฮดฮญฮบฯ„ฮท</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>ฮšฮฑฮธฮฑฯฮนฯƒฮผฯŒฯ‚ &amp;ฮŒฮปฯ‰ฮฝ</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>ฮฅฯ€ฯŒฮปฮฟฮนฯ€ฮฟ:</translation> </message> <message> <location line="+16"/> <source>123.456 BC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>ฮ•ฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮท</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a MinersHeavenCoin address (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฯ€ฮฟฯƒฮฟฯ</translation> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>ฮ•ฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚ ฮฝฮฟฮผฮนฯƒฮผฮฌฯ„ฯ‰ฮฝ</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation>ฮ— ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯ„ฮฟฯ… ฮฑฯ€ฮฟฮดฮญฮบฯ„ฮท ฮดฮตฮฝ ฮตฮฏฮฝฮฑฮน ฯƒฯ‰ฯƒฯ„ฮฎ. ฮ ฮฑฯฮฑฮบฮฑฮปฯŽ ฮตฮปฮญฮณฮพฯ„ฮต ฮพฮฑฮฝฮฌ.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>ฮคฮฟ ฯ€ฮฟฯƒฯŒ ฯ€ฮปฮทฯฯ‰ฮผฮฎฯ‚ ฯ€ฯฮญฯ€ฮตฮน ฮฝฮฑ ฮตฮฏฮฝฮฑฮน ฮผฮตฮณฮฑฮปฯฯ„ฮตฯฮฟ ฮฑฯ€ฯŒ 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>ฮคฮฟ ฯ€ฮฟฯƒฯŒ ฮพฮตฯ€ฮตฯฮฝฮฌฮตฮน ฯ„ฮฟ ฮดฮนฮฑฮธฮญฯƒฮนฮผฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>ฮคฮฟ ฯƒฯฮฝฮฟฮปฮฟ ฯ…ฯ€ฮตฯฮฒฮฑฮฏฮฝฮตฮน ฯ„ฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฯŒ ฯƒฮฑฯ‚ ฯŒฯ„ฮฑฮฝ ฯƒฯ…ฮผฯ€ฮตฯฮนฮปฮทฯ†ฮธฮตฮฏ ฮบฮฑฮน ฮท ฮฑฮผฮฟฮนฮฒฮฎ %1</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>ฮ’ฯฮญฮธฮทฮบฮต ฮท ฮฏฮดฮนฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮดฯฮฟ ฯ†ฮฟฯฮญฯ‚. ฮ•ฯ€ฮนฯ„ฯฮญฯ€ฮตฯ„ฮฑฮน ฮผฮฏฮฑ ฮผฯŒฮฝฮฟ ฮตฮณฮณฯฮฑฯ†ฮฎ ฮณฮนฮฑ ฮบฮฌฮธฮต ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท, ฯƒฮต ฮบฮฌฮธฮต ฮดฮนฮฑฮดฮนฮบฮฑฯƒฮฏฮฑ ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(ฯ‡ฯ‰ฯฮฏฯ‚ ฮตฯ„ฮนฮบฮญฯ„ฮฑ)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>&amp;ฮ ฮฟฯƒฯŒ:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>ฮ ฮปฮทฯฯ‰ฮผฮฎ &amp;ฯƒฮต:</translation> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation>ฮ•ฮนฯƒฮฌฮณฮตฯ„ฮต ฮผฮนฮฑ ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎ ฮณฮนฮฑ ฮฑฯ…ฯ„ฮฎ ฯ„ฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯŽฯƒฯ„ฮต ฮฝฮฑ ฮบฮฑฯ„ฮฑฯ‡ฯ‰ฯฮทฮธฮตฮฏ ฯƒฯ„ฮฟ ฮฒฮนฮฒฮปฮฏฮฟ ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ</translation> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation>&amp;ฮ•ฯ€ฮนฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>ฮ•ฯ€ฮนฮบฯŒฮปฮปฮทฯƒฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฮฑฯ€ฯŒ ฯ„ฮฟ ฯ€ฯฯŒฯ‡ฮตฮนฯฮฟ</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a MinersHeavenCoin address (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>ฮฅฯ€ฮฟฮณฯฮฑฯ†ฮญฯ‚ - ฮ•ฮฏฯƒฮฟฮดฮฟฯ‚ / ฮ•ฯ€ฮฑฮปฮฎฮธฮตฯ…ฯƒฮท ฮผฮฎฮฝฯ…ฮผฮฑฯ„ฮฟฯ‚ </translation> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation>&amp;ฮฅฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮœฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>ฮœฯ€ฮฟฯฮตฮฏฯ„ฮต ฮฝฮฑ ฯ…ฯ€ฮฟฮณฯฮฌฯ†ฮตฯ„ฮต ฮผฮทฮฝฯฮผฮฑฯ„ฮฑ ฮผฮต ฯ„ฮนฯ‚ ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฮนฯ‚ ฯƒฮฑฯ‚, ฯŽฯƒฯ„ฮต ฮฝ&apos; ฮฑฯ€ฮฟฮดฮตฮนฮบฮฝฯฮตฯ„ฮต ฯ€ฯ‰ฯ‚ ฮฑฯ…ฯ„ฮญฯ‚ ฯƒฮฑฯ‚ ฮฑฮฝฮฎฮบฮฟฯ…ฮฝ. ฮ‘ฯ€ฮฟฯ†ฮตฯฮณฮตฯ„ฮต ฮฝฮฑ ฯ…ฯ€ฮฟฮณฯฮฌฯ†ฮตฯ„ฮต ฮบฮฌฯ„ฮน ฮฑฯŒฯฮนฯƒฯ„ฮฟ ฮบฮฑฮธฯŽฯ‚ ฮตฮฝฮดฮญฯ‡ฮตฯ„ฮฑฮน ฮฝฮฑ ฮตฮพฮฑฯ€ฮฑฯ„ฮทฮธฮตฮฏฯ„ฮต. ฮฅฯ€ฮฟฮณฯฮฌฯ†ฮตฯ„ฮต ฮผฯŒฮฝฮฟ ฯ€ฮปฮฎฯฮทฯ‚ ฮดฮทฮปฯŽฯƒฮตฮนฯ‚ ฮผฮต ฯ„ฮนฯ‚ ฮฟฯ€ฮฟฮฏฮตฯ‚ ฯƒฯ…ฮผฯ†ฯ‰ฮฝฮตฮฏฯ„ฮต.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>ฮ•ฯ€ฮนฮบฯŒฮปฮปฮทฯƒฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฮฑฯ€ฯŒ ฯ„ฮฟ ฮฒฮนฮฒฮปฮฏฮฟ ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>ฮ•ฮนฯƒฮฌฮณฮตฯ„ฮต ฮตฮดฯŽ ฯ„ฮฟ ฮผฮฎฮฝฯ…ฮผฮฑ ฯ€ฮฟฯ… ฮธฮญฮปฮตฯ„ฮต ฮฝฮฑ ฯ…ฯ€ฮฟฮณฯฮฌฯˆฮตฯ„ฮต</translation> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation>ฮ‘ฮฝฯ„ฮญฮณฯฮฑฯ†ฮท ฯ„ฮทฯ‚ ฮตฯ€ฮนฮปฮตฮณฮผฮตฮฝฮทฯ‚ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚ ฯƒฯ„ฮฟ ฯ€ฯฯŒฯ‡ฮตฮนฯฮฟ ฯ„ฮฟฯ… ฯƒฯ…ฯƒฯ„ฮทฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation>ฮ•ฯ€ฮฑฮฝฮฑฯ†ฮฟฯฮฌ ฯŒฮปฯ‰ฮฝ ฯ„ฯ‰ฮฝ ฯ€ฮตฮดฮฏฯ‰ฮฝ ฮผฮฎฮฝฯ…ฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>ฮšฮฑฮธฮฑฯฮนฯƒฮผฯŒฯ‚ &amp;ฮŒฮปฯ‰ฮฝ</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation>&amp;ฮ•ฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท ฮผฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚</translation> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>ฮ ฮปฮทฮบฯ„ฯฮฟฮปฮฟฮณฮฎฯƒฯ„ฮต ฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚, ฮผฮฎฮฝฯ…ฮผฮฑ (ฮฒฮตฮฒฮฑฮนฯ‰ฮธฮตฮฏฯ„ฮต ฯŒฯ„ฮน ฮญฯ‡ฮตฯ„ฮต ฮฑฮฝฯ„ฮนฮณฯฮฌฯˆฮตฮน ฯ„ฮนฯ‚ ฮฑฮปฮปฮฑฮณฮญฯ‚ ฮณฯฮฑฮผฮผฮฎฯ‚, ฮบฮตฮฝฮฌ, tabs, ฮบ.ฮปฯ€. ฮฑฮบฯฮนฮฒฯŽฯ‚) ฮบฮฑฮน ฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฯ€ฮฑฯฮฑฮบฮฌฯ„ฯ‰, ฮณฮนฮฑ ฮฝฮฑ ฮตฮปฮญฮณฮพฮตฮน ฯ„ฮฟ ฮผฮฎฮฝฯ…ฮผฮฑ. ฮฮฑ ฮตฮฏฯƒฯ„ฮต ฯ€ฯฮฟฯƒฮตฮบฯ„ฮนฮบฮฟฮฏ ฮณฮนฮฑ ฮฝฮฑ ฮผฮทฮฝ ฮดฮนฮฑฮฒฮฌฯƒฮตฯ„ฮต ฯ€ฮตฯฮนฯƒฯƒฯŒฯ„ฮตฯฮฑ ฯƒฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฯŒ, ฯ„ฮน ฮตฮฏฮฝฮฑฮน ฯƒฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮฏฮดฮนฮฟ ฯ„ฮฟ ฮผฮฎฮฝฯ…ฮผฮฑ , ฮณฮนฮฑ ฮฝฮฑ ฮผฮทฮฝ ฮตฮพฮฑฯ€ฮฑฯ„ฮทฮธฮฟฯฮฝ ฮฑฯ€ฯŒ ฮญฮฝฮฑฮฝ ฮฌฮฝฮธฯฯ‰ฯ€ฮฟ -in - the-middle ฮตฯ€ฮฏฮธฮตฯƒฮท.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified MinersHeavenCoin address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation>ฮ•ฯ€ฮฑฮฝฮฑฯ†ฮฟฯฮฌ ฯŒฮปฯ‰ฮฝ ฮตฯ€ฮฑฮปฮฎฮธฮตฯ…ฮผฮตฮฝฯ‰ฮฝ ฯ€ฮตฮดฮฏฯ‰ฮฝ ฮผฮฎฮฝฯ…ฮผฮฑฯ„ฮฟฯ‚ </translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a MinersHeavenCoin address (e.g. M8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>ฮšฮฌฮฝฯ„ฮต ฮบฮปฮนฮบ ฯƒฯ„ฮฟ &quot;ฮฅฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮœฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚&quot; ฮณฮนฮฑ ฮฝฮฑ ฮปฮฌฮฒฮตฯ„ฮต ฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+3"/> <source>Enter MinersHeavenCoin signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>ฮ— ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯ€ฮฟฯ… ฮตฮนฯƒฮฎฯ‡ฮธฮท ฮตฮฏฮฝฮฑฮน ฮปฮฌฮธฮฟฯ‚.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>ฮ ฮฑฯฮฑฮบฮฑฮปฮฟฯฮผฮต ฮตฮปฮญฮณฮพฯ„ฮต ฯ„ฮทฮฝ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮบฮฑฮน ฮดฮฟฮบฮนฮผฮฌฯƒฯ„ฮต ฮพฮฑฮฝฮฌ.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>ฮ— ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฯ€ฮฟฯ… ฮญฯ‡ฮตฮน ฮตฮนฯƒฮฑฯ‡ฮธฮตฮฏ ฮดฮตฮฝ ฮฑฮฝฮฑฯ†ฮญฯฮตฯ„ฮฑฮน ฯƒฮต ฮญฮฝฮฑ ฯ€ฮปฮฎฮบฯ„ฯฮฟ.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>ฯ„ฮฟ ฮพฮตฮบฮปฮตฮฏฮดฯ‰ฮผฮฑ ฯ„ฮฟฯ… ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>ฮคฮฟ ฯ€ฯฮฟฯƒฯ‰ฯ€ฮนฮบฯŒ ฮบฮปฮตฮนฮดฮฏ ฮตฮนฯƒฮฑฮณฮผฮตฮฝฮทฯ‚ ฮดฮนฮตฯ…ฮธฯ…ฮฝฯƒฮทฯ‚ ฮดฮตฮฝ ฮตฮฏฮฝฮฑฮน ฮดฮนฮฑฮธฮญฯƒฮนฮผฮฟ.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>ฮ— ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฯ„ฮฟฯ… ฮผฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>ฮœฮฎฮฝฯ…ฮผฮฑ ฯ…ฯ€ฮตฮณฯฮฌฯ†ฮท.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>ฮ— ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮดฮตฮฝ ฮผฯ€ฯŒฯฮตฯƒฮต ฮฝฮฑ ฮฑฯ€ฮฟฮบฯฯ…ฯ€ฯ„ฮฟฮณฯฮฑฯ†ฮทฮธฮตฮฏ.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>ฮ ฮฑฯฮฑฮบฮฑฮปฮฟฯฮผฮต ฮตฮปฮญฮณฮพฯ„ฮต ฯ„ฮทฮฝ ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮบฮฑฮน ฮดฮฟฮบฮนฮผฮฌฯƒฯ„ฮต ฮพฮฑฮฝฮฌ.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>ฮ— ฯ…ฯ€ฮฟฮณฯฮฑฯ†ฮฎ ฮดฮตฮฝ ฯ„ฮฑฮนฯฮนฮฌฮถฮตฮน ฮผฮต ฯ„ฮฟ ฮผฮฎฮฝฯ…ฮผฮฑ. </translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>ฮ— ฮตฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท ฯ„ฮฟฯ… ฮผฮทฮฝฯฮผฮฑฯ„ฮฟฯ‚ ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>ฮœฮฎฮฝฯ…ฮผฮฑ ฮตฯ€ฮนฮฒฮตฮฒฮฑฮนฯŽฮธฮทฮบฮต.</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation>ฮ‘ฮฝฮฟฮนฯ‡ฯ„ฯŒ ฮผฮญฯ‡ฯฮน %1</translation> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/ฯ‡ฯ‰ฯฮฏฯ‚ ฯƒฯฮฝฮดฮตฯƒฮท;</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/ฯ‡ฯ‰ฯฮฏฯ‚ ฮตฯ€ฮนฮฒฮตฮฒฮฑฮฏฯ‰ฯƒฮท</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 ฮตฯ€ฮนฮฒฮตฮฒฮฑฮนฯŽฯƒฮตฮนฯ‚</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>ฮšฮฑฯ„ฮฌฯƒฯ„ฮฑฯƒฮท</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, ฮญฯ‡ฮตฮน ฮผฮตฯ„ฮฑฮดฮฟฮธฮตฮฏ ฮผฮญฯƒฯ‰ %n ฮบฯŒฮผฮฒฯ‰ฮฝ</numerusform><numerusform>, ฮญฯ‡ฮตฮน ฮผฮตฯ„ฮฑฮดฮฟฮธฮตฮฏ ฮผฮญฯƒฯ‰ %n ฮบฯŒฮผฮฒฯ‰ฮฝ</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>ฮ ฮทฮณฮฎ</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>ฮ”ฮทฮผฮนฮฟฯ…ฯฮณฮฏฮฑ </translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>ฮ‘ฯ€ฯŒ</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>ฮ ฯฮฟฯ‚</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation> ฮดฮนฮบฮฎ ฯƒฮฑฯ‚ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท </translation> </message> <message> <location line="-2"/> <source>label</source> <translation>eฯ€ฮนฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>ฮ ฮฏฯƒฯ„ฯ‰ฯƒฮท </translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>ฯ‰ฯฮฏฮผฮฑฮฝฯƒฮท ฯƒฮต %n ฮตฯ€ฮนฯ€ฮปฮญฮฟฮฝ ฮผฯ€ฮปฮฟฮบ</numerusform><numerusform>ฯ‰ฯฮฏฮผฮฑฮฝฯƒฮท ฯƒฮต %n ฮตฯ€ฮนฯ€ฮปฮญฮฟฮฝ ฮผฯ€ฮปฮฟฮบ</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>ฮผฮท ฮฑฯ€ฮฟฮดฮตฮบฯ„ฯŒ</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Debit</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>ฮคฮญฮปฮฟฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚ </translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>ฮšฮฑฮธฮฑฯฯŒ ฯ€ฮฟฯƒฯŒ</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>ฮœฮฎฮฝฯ…ฮผฮฑ</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>ฮฃฯ‡ฯŒฮปฮนฮฟ:</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID ฮฃฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚:</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 60 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>ฮ ฮปฮทฯฮฟฯ†ฮฟฯฮฏฮตฯ‚ ฮฑฯ€ฮฟฯƒฯ†ฮฑฮปฮผฮฌฯ„ฯ‰ฯƒฮทฯ‚</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>ฮฃฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎ</translation> </message> <message> <location line="+5"/> <source>Inputs</source> <translation>ฮตฮนฯƒฯฮฟฮญฯ‚ </translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>ฮ ฮฟฯƒฯŒ</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>ฮฑฮปฮทฮธฮฎฯ‚</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>ฮฑฮฝฮฑฮปฮทฮธฮฎฯ‚ </translation> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation>, ฮดฮตฮฝ ฮญฯ‡ฮตฮน ฮฑฮบฯŒฮผฮฑ ฮผฮตฯ„ฮฑฮดฮฟฮธฮตฮฏ ฮผ&apos; ฮตฯ€ฮนฯ„ฯ…ฯ‡ฮฏฮฑ</translation> </message> <message> <location line="+35"/> <source>unknown</source> <translation>ฮฌฮณฮฝฯ‰ฯƒฯ„ฮฟ</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>ฮ›ฮตฯ€ฯ„ฮฟฮผฮญฯฮตฮนฮตฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>ฮ‘ฯ…ฯ„ฯŒ ฯ„ฮฟ ฯ€ฮฑฯฮฌฮธฯ…ฯฮฟ ฮดฮตฮฏฯ‡ฮฝฮตฮน ฮผฮนฮฑ ฮปฮตฯ€ฯ„ฮฟฮผฮตฯฮฎ ฯ€ฮตฯฮนฮณฯฮฑฯ†ฮฎ ฯ„ฮทฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>ฮคฯฯ€ฮฟฯ‚</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>ฮ ฮฟฯƒฯŒ</translation> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation>ฮ‘ฮฝฮฟฮนฯ‡ฯ„ฯŒ ฮผฮญฯ‡ฯฮน %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation>ฮ•ฯ€ฮนฮบฯ…ฯฯ‰ฮผฮญฮฝฮท (%1 ฮตฯ€ฮนฮบฯ…ฯฯŽฯƒฮตฮนฯ‚)</translation> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation><numerusform>ฮ‘ฮฝฮฟฮนฯ‡ฯ„ฯŒ ฮณฮนฮฑ %n ฮผฯ€ฮปฮฟฮบ</numerusform><numerusform>ฮ‘ฮฝฮฟฮนฯ‡ฯ„ฯŒ ฮณฮนฮฑ %n ฮผฯ€ฮปฮฟฮบ</numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>ฮ‘ฯ…ฯ„ฯŒ ฯ„ฮฟ ฮผฯ€ฮปฮฟฮบ ฮดฮตฮฝ ฮญฯ‡ฮตฮน ฯ€ฮฑฯฮฑฮปฮทฯ†ฮธฮตฮฏ ฮฑฯ€ฯŒ ฮบฮฑฮฝฮญฮฝฮฑฮฝ ฮฌฮปฮปฮฟ ฮบฯŒฮผฮฒฮฟ ฮบฮฑฮน ฮบฮฑฯ„ฮฌ ฯ€ฮฌฯƒฮฑ ฯ€ฮนฮธฮฑฮฝฯŒฯ„ฮทฯ„ฮฑ ฮธฮฑ ฮฑฯ€ฮฟฯฯฮนฯ†ฮธฮตฮฏ!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>ฮ”ฮทฮผฮนฮฟฯ…ฯฮณฮฎฮธฮทฮบฮต ฮฑฮปฮปฮฌ ฮฑฯ€ฮฟฯฯฮฏฯ†ฮธฮทฮบฮต</translation> </message> <message> <location line="+42"/> <source>Received with</source> <translation>ฮ ฮฑฯฮฑฮปฮฑฮฒฮฎ ฮผฮต</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>ฮ•ฮปฮฎฯ†ฮธฮท ฮฑฯ€ฯŒ</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮฎ ฯ€ฯฮฟฯ‚</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>ฮ ฮปฮทฯฯ‰ฮผฮฎ ฯ€ฯฮฟฯ‚ ฮตฯƒฮฌฯ‚</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>ฮ•ฮพฯŒฯฯ…ฮพฮท</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(ฮด/ฮฑ)</translation> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>ฮšฮฑฯ„ฮฌฯƒฯ„ฮฑฯƒฮท ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚. ฮ ฮทฮณฮฑฮฏฮฝฮตฯ„ฮต ฯ„ฮฟ ฯ€ฮฟฮฝฯ„ฮฏฮบฮน ฯ€ฮฌฮฝฯ‰ ฮฑฯ€ฯŒ ฮฑฯ…ฯ„ฯŒ ฯ„ฮฟ ฯ€ฮตฮดฮฏฮฟ ฮณฮนฮฑ ฮฝฮฑ ฮดฮตฮฏฯ„ฮต ฯ„ฮฟฮฝ ฮฑฯฮนฮธฮผฯŒ ฯ„ฯ‰ฮฝ ฮตฯ€ฮนฮบฯ…ฯฯŽฯƒฮตฯ‰ฮฝ</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ ฮบฮน ฯŽฯฮฑ ฮปฮฎฯˆฮทฯ‚ ฯ„ฮทฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>ฮ•ฮฏฮดฮฟฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚ ฯ„ฮทฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>ฮ ฮฟฯƒฯŒ ฯ€ฮฟฯ… ฮฑฯ†ฮฑฮนฯฮญฮธฮทฮบฮต ฮฎ ฯ€ฯฮฟฯƒฯ„ฮญฮธฮทฮบฮต ฯƒฯ„ฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation>ฮŒฮปฮฑ</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>ฮฃฮฎฮผฮตฯฮฑ</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>ฮ‘ฯ…ฯ„ฮฎ ฯ„ฮทฮฝ ฮตฮฒฮดฮฟฮผฮฌฮดฮฑ</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>ฮ‘ฯ…ฯ„ฯŒฮฝ ฯ„ฮฟฮฝ ฮผฮฎฮฝฮฑ</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>ฮคฮฟฮฝ ฯ€ฯฮฟฮทฮณฮฟฯฮผฮตฮฝฮฟ ฮผฮฎฮฝฮฑ</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>ฮ‘ฯ…ฯ„ฯŒ ฯ„ฮฟ ฮญฯ„ฮฟฯ‚</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>ฮˆฮบฯ„ฮฑฯƒฮท...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>ฮ•ฮปฮฎฯ†ฮธฮท ฮผฮต</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>ฮ‘ฯ€ฮตฯƒฯ„ฮฌฮปฮท ฯ€ฯฮฟฯ‚</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>ฮ ฯฮฟฯ‚ ฮตฯƒฮฌฯ‚</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>ฮ•ฮพฯŒฯฯ…ฮพฮท</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>ฮ†ฮปฮปฮฟ</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>ฮ‘ฮฝฮฑฮถฮฎฯ„ฮทฯƒฮท ฮผฮต ฮฒฮฌฯƒฮท ฯ„ฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮฎ ฯ„ฮทฮฝ ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>ฮ•ฮปฮฌฯ‡ฮนฯƒฯ„ฮฟ ฯ€ฮฟฯƒฯŒ</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮทฯ‚</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎฯ‚</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮฎ ฯ€ฮฟฯƒฮฟฯ</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>ฮ‘ฮฝฯ„ฮนฮณฯฮฑฯ†ฮท ฯ„ฮฟฯ… ID ฮฃฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>ฮ•ฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฮตฯ€ฮนฮณฯฮฑฯ†ฮฎฯ‚</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>ฮ•ฮผฯ†ฮฌฮฝฮนฯƒฮท ฮปฮตฯ€ฯ„ฮฟฮผฮตฯฮตฮนฯŽฮฝ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎฯ‚</translation> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>ฮ‘ฯฯ‡ฮตฮฏฮฟ ฮฟฯฮนฮฟฮธฮตฯ„ฮทฮผฮญฮฝฮฟ ฮผฮต ฮบฯŒฮผฮผฮฑฯ„ฮฑ (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>ฮ•ฯ€ฮนฮบฯ…ฯฯ‰ฮผฮญฮฝฮตฯ‚</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>ฮ—ฮผฮตฯฮฟฮผฮทฮฝฮฏฮฑ</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>ฮคฯฯ€ฮฟฯ‚</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>ฮ•ฯ€ฮนฮณฯฮฑฯ†ฮฎ</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>ฮ”ฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>ฮ ฮฟฯƒฯŒ</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation>ฮˆฮบฯ„ฮฑฯƒฮท:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>ฮญฯ‰ฯ‚</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>MinersHeavenCoin version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation>ฮงฯฮฎฯƒฮท:</translation> </message> <message> <location line="+1"/> <source>Send command to -server or minersheavencoind</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation>ฮ›ฮฏฯƒฯ„ฮฑ ฮตฮฝฯ„ฮฟฮปฯŽฮฝ</translation> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation>ฮ•ฯ€ฮตฮพฮฎฮณฮทฯƒฮท ฮตฮฝฯ„ฮฟฮปฮฎฯ‚</translation> </message> <message> <location line="+2"/> <source>Options:</source> <translation>ฮ•ฯ€ฮนฮปฮฟฮณฮญฯ‚:</translation> </message> <message> <location line="+2"/> <source>Specify configuration file (default: minersheavencoin.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: minersheavencoind.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>ฮŸฯฮนฯƒฮผฯŒฯ‚ ฯ†ฮฑฮบฮญฮปฮฟฯ… ฮดฮตฮดฮฟฮผฮญฮฝฯ‰ฮฝ</translation> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>ฮŒฯฮนฯƒฮต ฯ„ฮฟ ฮผฮญฮณฮตฮธฮฟฯ‚ ฯ„ฮทฯ‚ ฮฒฮฌฯƒฮทฯ‚ ฯ€ฯฮฟฯƒฯ‰ฯฮนฮฝฮฎฯ‚ ฮฑฯ€ฮฟฮธฮฎฮบฮตฯ…ฯƒฮทฯ‚ ฯƒฮต megabytes(ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ:25)</translation> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 38345 or testnet: 48345)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>ฮœฮญฮณฮนฯƒฯ„ฮตฯ‚ ฮฑฯฮนฮธฮผฯŒฯ‚ ฯƒฯ…ฮฝฮดฮญฯƒฮตฯ‰ฮฝ ฮผฮต ฯ„ฮฟฯ…ฯ‚ peers &lt;n&gt; (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 125)</translation> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>ฮฃฯฮฝฮดฮตฯƒฮท ฯƒฮต ฮญฮฝฮฑฮฝ ฮบฯŒฮผฮฒฮฟ ฮณฮนฮฑ ฯ„ฮทฮฝ ฮฑฮฝฮฌฮบฯ„ฮทฯƒฮท ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ ฮฑฯ€ฯŒ ฮฟฮผฮฟฯ„ฮฏฮผฮฟฯ…ฯ‚, ฮบฮฑฮน ฮฑฯ€ฮฟฯƒฯ…ฮฝฮดฮญฯƒh</translation> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation>ฮ”ฮนฮตฯ…ฮบฯฮนฮฝฮฏฯƒฯ„ฮต ฯ„ฮท ฮดฮนฮบฮนฮฌ ฯƒฮฑฯ‚ ฮดฮทฮผฯŒฯƒฮนฮฑ ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท.</translation> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>ฮŒฯฮนฮฟ ฮฑฯ€ฮฟฯƒฯฮฝฮดฮตฯƒฮทฯ‚ ฯ€ฯฮฟฮฒฮปฮทฮผฮฑฯ„ฮนฮบฯŽฮฝ peers (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 100)</translation> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>ฮ”ฮตฯ…ฯ„ฮตฯฯŒฮปฮตฯ€ฯ„ฮฑ ฯ€ฯฮนฮฝ ฮตฯ€ฮนฯ„ฯฮฑฯ€ฮตฮฏ ฮพฮฑฮฝฮฌ ฮท ฯƒฯฮฝฮดฮตฯƒฮท ฯ„ฯ‰ฮฝ ฯ€ฯฮฟฮฒฮปฮทฮผฮฑฯ„ฮนฮบฯŽฮฝ peers (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 86400)</translation> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>ฮˆฮฝฮฑ ฯƒฯ†ฮฌฮปฮผฮฑ ฯƒฯ…ฮฝฮญฮฒฮท ฮบฮฑฮธฯŽฯ‚ ฯ€ฯฮฟฮตฯ„ฮฟฮนฮผฮฑฮถฯŒฯ„ฮฑฮฝ ฮท ฯ€ฯŒฯฯ„ฮฑ RPC %u ฮณฮนฮฑ ฮฑฮฝฮฑฮผฮฟฮฝฮฎ IPv4: %s</translation> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 38346 or testnet: 48346)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation>ฮ‘ฯ€ฮฟฮดฮฟฯ‡ฮฎ ฮตฮฝฯ„ฮฟฮปฯŽฮฝ ฮบฮฟฮฝฯƒฯŒฮปฮฑฯ‚ ฮบฮฑฮน JSON-RPC</translation> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation>ฮ•ฮบฯ„ฮญฮปฮตฯƒฮท ฯƒฯ„ฮฟ ฯ€ฮฑฯฮฑฯƒฮบฮฎฮฝฮนฮฟ ฮบฮน ฮฑฯ€ฮฟฮดฮฟฯ‡ฮฎ ฮตฮฝฯ„ฮฟฮปฯŽฮฝ</translation> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation>ฮงฯฮฎฯƒฮท ฯ„ฮฟฯ… ฮดฮฟฮบฮนฮผฮฑฯƒฯ„ฮนฮบฮฟฯ ฮดฮนฮบฯ„ฯฮฟฯ…</translation> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>ฮฮฑ ฮดฮญฯ‡ฮตฯƒฮฑฮน ฯƒฯ…ฮฝฮดฮญฯƒฮตฮนฯ‚ ฮฑฯ€ฯŒ ฮญฮพฯ‰(ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ:1)</translation> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>ฮˆฮฝฮฑ ฯƒฯ†ฮฌฮปฮผฮฑ ฯƒฯ…ฮฝฮญฮฒฮท ฮบฮฑฮธฯŽฯ‚ ฯ€ฯฮฟฮตฯ„ฮฟฮนฮผฮฑฮถฯŒฯ„ฮฑฮฝ ฮท ฯ…ฯ€ฮฟฮดฮฟฯ‡ฮท RPC %u ฮณฮนฮฑ ฮฑฮฝฮฑฮผฮฟฮฝฮท ฯ„ฮฟฯ… IPv6, ฮตฯ€ฮตฯƒฮต ฯ€ฮนฯƒฯ‰ ฯƒฯ„ฮฟ IPv4:%s</translation> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>ฮ ฯฮฟฮตฮนฮดฮฟฯ€ฮฟฮฏฮทฯƒฮท: ฮ— ฯ€ฮฑฯฮฌฮผฮตฯ„ฯฮฟฯ‚ -paytxfee ฮตฮฏฮฝฮฑฮน ฯ€ฮฟฮปฯ ฯ…ฯˆฮทฮปฮฎ. ฮ ฯฯŒฮบฮตฮนฯ„ฮฑฮน ฮณฮนฮฑ ฯ„ฮทฮฝ ฮฑฮผฮฟฮนฮฒฮฎ ฯ€ฮฟฯ… ฮธฮฑ ฯ€ฮปฮทฯฯŽฮฝฮตฯ„ฮต ฮณฮนฮฑ ฮบฮฌฮธฮต ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮฎ ฯ€ฮฟฯ… ฮธฮฑ ฯƒฯ„ฮญฮปฮฝฮตฯ„ฮต.</translation> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong MinersHeavenCoin will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>ฮ ฯฮฟฮตฮนฮดฮฟฯ€ฮฟฮฏฮทฯƒฮท : ฮฃฯ†ฮฌฮปฮผฮฑ wallet.dat ฮบฮฑฯ„ฮฑ ฯ„ฮทฮฝ ฮฑฮฝฮฌฮณฮฝฯ‰ฯƒฮท ! ฮŒฮปฮฑ ฯ„ฮฑ ฮบฮปฮตฮนฮดฮนฮฌ ฮฑฮฝฮฑฮณฮฝฯ‰ฯฮนฯƒฮธฮทฮบฮฑฮฝ ฯƒฯ‰ฯƒฯ„ฮฌ, ฮฑฮปฮปฮฌ ฯ„ฮฑ ฮดฮตฮดฮฟฮผฮญฮฝฮฑ ฯ„ฯ‰ฮฝ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฯŽฮฝ ฮฎ ฮบฮฑฯ„ฮฑฯ‡ฯ‰ฯฮฎฯƒฮตฮนฯ‚ ฯƒฯ„ฮฟ ฮฒฮนฮฒฮปฮฏฮฟ ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ ฮผฯ€ฮฟฯฮตฮฏ ฮฝฮฑ ฮตฮฏฮฝฮฑฮน ฮตฮปฮปฮนฯ€ฮตฮฏฯ‚ ฮฎ ฮปฮฑฮฝฮธฮฑฯƒฮผฮญฮฝฮฑ. </translation> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>ฮ ฯฮฟฮตฮนฮดฮฟฯ€ฮฟฮฏฮทฯƒฮท : ฯ„ฮฟ ฮฑฯฯ‡ฮตฮนฮฟ wallet.dat ฮตฮนฮฝฮฑฮน ฮดฮนฮตฯ†ฮธฮฑฯฮผฮญฮฝฮฟ, ฯ„ฮฑ ฮดฮตฮดฮฟฮผฮญฮฝฮฑ ฯƒฯŽฮถฮฟฮฝฯ„ฮฑฮน ! Original wallet.dat ฮฑฯ€ฮฟฮธฮทฮบฮตฯฮฟฮฝฯ„ฮฑฮน ฯ‰ฯ‚ wallet.{timestamp}.bak ฯƒฯ„ฮฟ %s . ฮ‘ฮฝ ฯ„ฮฟ ฯ…ฯ€ฯŒฮปฮฟฮนฯ€ฮฟ ฯ„ฮฟฯ… ฮฎ ฯ„ฮนฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮญฯ‚ ฯƒฮฑฯ‚, ฮตฮฏฮฝฮฑฮน ฮปฮฌฮธฮฟฯ‚ ฮธฮฑ ฯ€ฯฮญฯ€ฮตฮน ฮฝฮฑ ฮตฯ€ฮฑฮฝฮฑฯ†ฮญฯฮตฯ„ฮต ฮฑฯ€ฯŒ ฮญฮฝฮฑ ฮฑฮฝฯ„ฮฏฮณฯฮฑฯ†ฮฟ ฮฑฯƒฯ†ฮฑฮปฮตฮฏฮฑฯ‚</translation> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>ฮ ฯฮฟฯƒฯ€ฮฌฮธฮตฮนฮฑ ฮณฮนฮฑ ฮฑฮฝฮฑฮบฯ„ฮทฯƒฮตฮน ฮนฮดฮนฯ‰ฯ„ฮนฮบฯ‰ฮฝ ฮบฮปฮตฮนฮดฮนฯ‰ฮฝ ฮฑฯ€ฯŒ ฮตฮฝฮฑ ฮดฮนฮตฯ†ฮธฮฑฯฮผฮญฮฝฮฟ ฮฑฯฯ‡ฮตฮนฮฟ wallet.dat </translation> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation>ฮ‘ฯ€ฮฟฮบฮปฮตฮนฯƒฮผฯŒฯ‚ ฮตฯ€ฮนฮปฮฟฮณฯ‰ฮฝ ฮดฮทฮผฮนฮฟฯ…ฯฮณฮฏฮฑฯ‚: </translation> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation>ฮฃฯฮฝฮดฮตฯƒฮท ฮผฯŒฮฝฮฟ ฮผฮต ฮฟฯฮนฯƒฮผฮญฮฝฮฟฯ…ฯ‚ ฮบฯŒฮผฮฒฮฟฯ…ฯ‚</translation> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>ฮ‘ฮฝฮฑฮบฮฑฮปฯฯˆฯ„ฮต ฯ„ฮทฮฝ ฮดฮนฮบฮท ฯƒฮฑฯ‚ IP ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 1 ฯŒฯ„ฮฑฮฝ ฮฑฮบฮฟฯฮตฮน ฮบฮฑฮน ฮดฮตฮฝ - externalip) </translation> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>ฯ„ฮฑฮปฮฑฮนฯ€ฯ‰ฯฮทฮธฮตฮนฯ„ฮต ฮณฮนฮฑ ฮฝฮฑ ฮฑฮบฮฟฯฯƒฮตฯ„ฮต ฯƒฮต ฮฟฯ€ฮฟฮนฮฑฮดฮฎฯ€ฮฟฯ„ฮต ฮธฯฯฮฑ. ฮงฯฮฎฯƒฮท - ฮฑฮบฮฟฯฯƒฯ„ฮต = 0 , ฮฑฮฝ ฮธฮญฮปฮตฯ„ฮต ฮฑฯ…ฯ„ฯŒ.</translation> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>ฮœฮญฮณฮนฯƒฯ„ฮฟฯ‚ buffer ฮปฮฎฯˆฮทฯ‚ ฮฑฮฝฮฌ ฯƒฯฮฝฮดฮตฯƒฮท, &lt;n&gt;*1000 bytes (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>ฮœฮญฮณฮนฯƒฯ„ฮฟฯ‚ buffer ฮฑฯ€ฮฟฯƒฯ„ฮฟฮปฮฎฯ‚ ฮฑฮฝฮฌ ฯƒฯฮฝฮดฮตฯƒฮท, &lt;n&gt;*1000 bytes (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 1000)</translation> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation> ฮฃฯ…ฮฝฮดฮญฯƒฮท ฮผฯŒฮฝฮฟ ฯƒฮต ฮบฯŒฮผฮฒฮฟฯ…ฯ‚ ฯ„ฮฟฯ… ฮดฮนฮบฯ„ฯฮฟฯ… &lt;net&gt; (IPv4, IPv6 ฮฎ Tor) </translation> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation>ฮกฯ…ฮธฮผฮฏฯƒฮตฮนฯ‚ SSL: (ฮฑฮฝฮฑฯ„ฯฮญฮพฯ„ฮต ฯƒฯ„ฮฟ Bitcoin Wiki ฮณฮนฮฑ ฮฟฮดฮทฮณฮฏฮตฯ‚ ฯฯ…ฮธฮผฮฏฯƒฮตฯ‰ฮฝ SSL)</translation> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮฎ ฯ€ฮปฮทฯฮฟฯ†ฮฟฯฮนฯŽฮฝ ฮตฮฝฯ„ฮฟฯ€ฮนฯƒฮผฮฟฯ ฯƒฯ†ฮฑฮปฮผฮฌฯ„ฯ‰ฮฝ ฯƒฯ„ฮทฮฝ ฮบฮฟฮฝฯƒฯŒฮปฮฑ ฮฑฮฝฯ„ฮฏ ฯ„ฮฟฯ… ฮฑฯฯ‡ฮตฮฏฮฟฯ… debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>ฮŸฯฮฏฯƒฯ„ฮต ฯ„ฮฟ ฮผฮญฮณฮนฯƒฯ„ฮฟ ฮผฮญฮณฮตฮธฮฟฯ‚ ฮผฯ€ฮปฮฟฮบ ฯƒฮต bytes (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 0)</translation> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>ฮฃฯ…ฯฯฮฏฮบฮฝฯ‰ฯƒฮท ฯ„ฮฟฯ… ฮฑฯฯ‡ฮตฮฏฮฟ debug.log ฮบฮฑฯ„ฮฑ ฯ„ฮทฮฝ ฮตฮบฮบฮฏฮฝฮทฯƒฮท ฯ„ฮฟฯ… ฯ€ฮตฮปฮฌฯ„ฮท (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 1 ฯŒฯ„ฮฑฮฝ ฮดฮตฮฝ-debug)</translation> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>ฮŸฯฮนฯƒฮผฯŒฯ‚ ฮปฮฎฮพฮทฯ‚ ฯ‡ฯฮฟฮฝฮนฮบฮฟฯ ฮฟฯฮฏฮฟฯ… ฯƒฮต ฯ‡ฮนฮปฮนฮฟฯƒฯ„ฮฌ ฯ„ฮฟฯ… ฮดฮตฯ…ฯ„ฮตฯฮฟฮปฮญฯ€ฯ„ฮฟฯ…(ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ:5000)</translation> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>ฮงฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮฏฮทฯƒฮท ฯ„ฮฟฯ… UPnP ฮณฮนฮฑ ฯ„ฮทฮฝ ฯ‡ฯฮฎฯƒฮท ฯ„ฮทฯ‚ ฯ€ฯŒฯฯ„ฮฑฯ‚ ฮฑฮฝฮฑฮผฮฟฮฝฮฎฯ‚ (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ:0)</translation> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>ฮงฯฮทฯƒฮนฮผฮฟฯ€ฮฟฮฏฮทฯƒฮท ฯ„ฮฟฯ… UPnP ฮณฮนฮฑ ฯ„ฮทฮฝ ฯ‡ฯฮฎฯƒฮท ฯ„ฮทฯ‚ ฯ€ฯŒฯฯ„ฮฑฯ‚ ฮฑฮฝฮฑฮผฮฟฮฝฮฎฯ‚ (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ:1)</translation> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation>ฮŒฮฝฮฟฮผฮฑ ฯ‡ฯฮฎฯƒฯ„ฮท ฮณฮนฮฑ ฯ„ฮนฯ‚ ฯƒฯ…ฮฝฮดฮญฯƒฮตฮนฯ‚ JSON-RPC</translation> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>ฮ ฯฮฟฮตฮนฮดฮฟฯ€ฮฟฮฏฮทฯƒฮท: ฮ‘ฯ…ฯ„ฮฎ ฮท ฮญฮบฮดฮฟฯƒฮท ฮตฮฏฮฝฮฑฮน ฮพฮตฯ€ฮตฯฮฑฯƒฮผฮญฮฝฮท, ฮฑฯ€ฮฑฮนฯ„ฮตฮฏฯ„ฮฑฮน ฮฑฮฝฮฑฮฒฮฌฮธฮผฮนฯƒฮท </translation> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation>ฮคฮฟ ฮฑฯฯ‡ฮตฮนฮฟ wallet.dat ฮตฮนฮฝฮฑฮน ฮดฮนฮตฯ†ฮธฮฑฯฮผฮญฮฝฮฟ, ฮท ฮดฮนฮฌฯƒฯ‰ฯƒฮท ฮฑฯ€ฮญฯ„ฯ…ฯ‡ฮต</translation> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation>ฮšฯ‰ฮดฮนฮบฯŒฯ‚ ฮณฮนฮฑ ฯ„ฮนฯ‚ ฯƒฯ…ฮฝฮดฮญฯƒฮตฮนฯ‚ JSON-RPC</translation> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=minersheavencoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;MinersHeavenCoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>ฮ‘ฯ€ฮฟฮดฮฟฯ‡ฮฎ ฯƒฯ…ฮฝฮดฮญฯƒฮตฯ‰ฮฝ JSON-RPC ฮฑฯ€ฯŒ ฯƒฯ…ฮณฮบฮตฮบฯฮนฮผฮญฮฝฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท IP</translation> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>ฮ‘ฯ€ฮฟฯƒฯ„ฮฟฮปฮฎ ฮตฮฝฯ„ฮฟฮปฯŽฮฝ ฯƒฯ„ฮฟฮฝ ฮบฯŒฮผฮฒฮฟ &lt;ip&gt; (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 127.0.0.1)</translation> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>ฮ•ฮบฯ„ฮญฮปฮตฯƒฮต ฯ„ฮทฮฝ ฮตฮฝฯ„ฮฟฮปฮฎ ฯŒฯ„ฮฑฮฝ ฯ„ฮฟ ฮบฮฑฮปฯฯ„ฮตฯฮฟ ฮผฯ€ฮปฮฟฮบ ฮฑฮปฮปฮฌฮพฮตฮน(%s ฯƒฯ„ฮทฮฝ ฮตฮฝฯ„ฮฟฮปฮฎ ฮฑฮฝฯ„ฮนฮบฮฑฮธฮฏฯƒฯ„ฮฑฯ„ฮฑฮน ฮฑฯ€ฯŒ ฯ„ฮฟ hash ฯ„ฮฟฯ… ฮผฯ€ฮปฮฟฮบ)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>ฮ•ฮบฯ„ฮญฮปฮตฯƒฮต ฯ„ฮทฮฝ ฮตฮฝฯ„ฮฟฮปฮฎ ฯŒฯ„ฮฑฮฝ ฯ„ฮฟ ฮบฮฑฮปฯฯ„ฮตฯฮฟ ฮผฯ€ฮปฮฟฮบ ฮฑฮปฮปฮฌฮพฮตฮน(%s ฯƒฯ„ฮทฮฝ ฮตฮฝฯ„ฮฟฮปฮฎ ฮฑฮฝฯ„ฮนฮบฮฑฮธฮฏฯƒฯ„ฮฑฯ„ฮฑฮน ฮฑฯ€ฯŒ ฯ„ฮฟ hash ฯ„ฮฟฯ… ฮผฯ€ฮปฮฟฮบ)</translation> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation>ฮ‘ฮฝฮฑฮฒฮฌฮธฮผฮนฯƒฮต ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน ฯƒฯ„ฮทฮฝ ฯ„ฮตฮปฮตฯ…ฯ„ฮฑฮฏฮฑ ฮญฮบฮดฮฟฯƒฮท</translation> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>ฮŒฯฮนฮฟ ฯ€ฮปฮฎฮธฮฟฯ…ฯ‚ ฮบฮปฮตฮนฮดฮนฯŽฮฝ pool &lt;n&gt; (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: 100)</translation> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>ฮ•ฯ€ฮฑฮฝฮญฮปฮตฮณฯ‡ฮฟฯ‚ ฯ„ฮทฯ‚ ฮฑฮปฯ…ฯƒฮฏฮดฮฑฯ‚ ฮผฯ€ฮปฮฟฮบ ฮณฮนฮฑ ฮฑฯ€ฮฟฯฯƒฮตฯ‚ ฯƒฯ…ฮฝฮฑฮปฮปฮฑฮณฮญฯ‚</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>ฮงฯฮฎฯƒฮท ฯ„ฮฟฯ… OpenSSL (https) ฮณฮนฮฑ ฯƒฯ…ฮฝฮดฮญฯƒฮตฮนฯ‚ JSON-RPC</translation> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation>ฮ‘ฯฯ‡ฮตฮฏฮฟ ฯ€ฮนฯƒฯ„ฮฟฯ€ฮฟฮนฮทฯ„ฮนฮบฮฟฯ ฯ„ฮฟฯ… ฮดฮนฮฑฮบฮฟฮผฮนฯƒฯ„ฮฎ (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>ฮ ฯฮฟฯƒฯ‰ฯ€ฮนฮบฯŒ ฮบฮปฮตฮนฮดฮฏ ฯ„ฮฟฯ… ฮดฮนฮฑฮบฮฟฮผฮนฯƒฯ„ฮฎ (ฯ€ฯฮฟฮตฯ€ฮนฮปฮฟฮณฮฎ: server.pem)</translation> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation>ฮ‘ฯ…ฯ„ฯŒ ฯ„ฮฟ ฮบฮตฮฏฮผฮตฮฝฮฟ ฮฒฮฟฮฎฮธฮตฮนฮฑฯ‚</translation> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. MinersHeavenCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>ฮ‘ฮดฯฮฝฮฑฯ„ฮท ฮท ฯƒฯฮฝฮดฮตฯƒฮท ฮผฮต ฯ„ฮท ฮธฯฯฮฑ %s ฮฑฯ…ฯ„ฮฟฯ ฯ„ฮฟฯ… ฯ…ฯ€ฮฟฮปฮฟฮณฮนฯƒฯ„ฮฎ (bind returned error %d, %s) </translation> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>ฮฮฑ ฮตฯ€ฮนฯ„ฯฮญฯ€ฮฟฮฝฯ„ฮฑฮน ฮฟฮน ฮญฮปฮตฮณฯ‡ฮฟฮน DNS ฮณฮนฮฑ ฯ€ฯฮฟฯƒฮธฮฎฮบฮท ฮบฮฑฮน ฯƒฯฮฝฮดฮตฯƒฮท ฮบฯŒฮผฮฒฯ‰ฮฝ</translation> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation>ฮฆฯŒฯฯ„ฯ‰ฯƒฮท ฮดฮนฮตฯ…ฮธฯฮฝฯƒฮตฯ‰ฮฝ...</translation> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>ฮฃฯ†ฮฌฮปฮผฮฑ ฯ†ฯŒฯฯ„ฯ‰ฯƒฮทฯ‚ wallet.dat: ฮšฮฑฯ„ฮตฯƒฯ„ฯฮฑฮผฮผฮญฮฝฮฟ ฮ ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of MinersHeavenCoin</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart MinersHeavenCoin to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation>ฮฃฯ†ฮฌฮปฮผฮฑ ฯ†ฯŒฯฯ„ฯ‰ฯƒฮทฯ‚ ฮฑฯฯ‡ฮตฮฏฮฟฯ… wallet.dat</translation> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>ฮ”ฮตฮฝ ฮตฮฏฮฝฮฑฮน ฮญฮณฮบฯ…ฯฮท ฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท ฮดฮนฮฑฮผฮตฯƒฮฟฮปฮฑฮฒฮทฯ„ฮฎ: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>ฮ†ฮณฮฝฯ‰ฯƒฯ„o ฮดฮฏฮบฯ„ฯ…ฮฟ ฮฟฯฮฏฮถฮตฯ„ฮฑฮน ฯƒฮต onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>ฮ†ฮณฮฝฯ‰ฯƒฯ„o ฮดฮฏฮบฯ„ฯ…ฮฟ ฮฟฯฮฏฮถฮตฯ„ฮฑฮน: %i</translation> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>ฮ”ฮตฮฝ ฮผฯ€ฮฟฯฯŽ ฮฝฮฑ ฮณฯฮฌฯˆฯ‰ ฯ„ฮทฮฝ ฯ€ฯฮฟฮตฯ€ฮนฮปฮตฮณฮผฮญฮฝฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท: &apos;%s&apos;</translation> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>ฮ”ฮตฮฝ ฮผฯ€ฮฟฯฯŽ ฮฝฮฑ ฮณฯฮฌฯˆฯ‰ ฯ„ฮทฮฝ ฯ€ฯฮฟฮตฯ€ฮนฮปฮตฮณฮผฮญฮฝฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท: &apos;%s&apos;</translation> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>ฮœฮท ฮญฮณฮบฯ…ฯฮฟ ฯ€ฮฟฯƒฯŒ ฮณฮนฮฑ ฯ„ฮทฮฝ ฯ€ฮฑฯฮฌฮผฮตฯ„ฯฮฟ -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation>ฮ›ฮฌฮธฮฟฯ‚ ฯ€ฮฟฯƒฯŒฯ„ฮทฯ„ฮฑ</translation> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation>ฮ‘ฮฝฮตฯ€ฮฑฯฮบฮญฯ‚ ฮบฮตฯ†ฮฌฮปฮฑฮนฮฟ</translation> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation>ฮฆฯŒฯฯ„ฯ‰ฯƒฮท ฮตฯ…ฯฮตฯ„ฮทฯฮฏฮฟฯ… ฮผฯ€ฮปฮฟฮบ...</translation> </message> <message> <location line="-103"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>ฮ ฯฮฟฯƒฮญฮธฮตฯƒฮต ฮญฮฝฮฑ ฮบฯŒฮผฮฒฮฟ ฮณฮนฮฑ ฯƒฯฮฝฮดฮตฯƒฮท ฮบฮฑฮน ฯ€ฯฮฟฯƒฯ€ฮฌฮธฮทฯƒฮต ฮฝฮฑ ฮบฯฮฑฯ„ฮฎฯƒฮตฮนฯ‚ ฯ„ฮทฮฝ ฯƒฯฮฝฮดฮตฯƒฮท ฮฑฮฝฮฟฮนฯ‡ฯ„ฮฎ</translation> </message> <message> <location line="+122"/> <source>Unable to bind to %s on this computer. MinersHeavenCoin is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation>ฮฆฯŒฯฯ„ฯ‰ฯƒฮท ฯ€ฮฟฯฯ„ฮฟฯ†ฮฟฮปฮนฮฟฯ...</translation> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation>ฮ”ฮตฮฝ ฮผฯ€ฮฟฯฯŽ ฮฝฮฑ ฯ…ฯ€ฮฟฮฒฮฑฮธฮผฮฏฯƒฯ‰ ฯ„ฮฟ ฯ€ฮฟฯฯ„ฮฟฯ†ฯŒฮปฮน</translation> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation>ฮ”ฮตฮฝ ฮผฯ€ฮฟฯฯŽ ฮฝฮฑ ฮณฯฮฌฯˆฯ‰ ฯ„ฮทฮฝ ฯ€ฯฮฟฮตฯ€ฮนฮปฮตฮณฮผฮญฮฝฮท ฮดฮนฮตฯฮธฯ…ฮฝฯƒฮท</translation> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation>ฮ‘ฮฝฮฏฯ‡ฮฝฮตฯ…ฯƒฮท...</translation> </message> <message> <location line="+5"/> <source>Done loading</source> <translation>ฮ— ฯ†ฯŒฯฯ„ฯ‰ฯƒฮท ฮฟฮปฮฟฮบฮปฮทฯฯŽฮธฮทฮบฮต</translation> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation>ฮงฯฮฎฯƒฮท ฯ„ฮทฯ‚ %s ฮตฯ€ฮนฮปฮฟฮณฮฎฯ‚</translation> </message> <message> <location line="+14"/> <source>Error</source> <translation>ฮฃฯ†ฮฌฮปฮผฮฑ</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>ฮ ฯฮญฯ€ฮตฮน ฮฝฮฑ ฮฒฮฌฮปฮตฮนฯ‚ ฮญฮฝฮฑ ฮบฯ‰ฮดฮนฮบฯŒ ฯƒฯ„ฮฟ ฮฑฯฯ‡ฮตฮฏฮฟ ฯ€ฮฑฯฮฑฮผฮญฯ„ฯฯ‰ฮฝ: %s ฮ•ฮฌฮฝ ฯ„ฮฟ ฮฑฯฯ‡ฮตฮฏฮฟ ฮดฮตฮฝ ฯ…ฯ€ฮฌฯฯ‡ฮตฮน, ฮดฮทฮผฮนฮฟฯฯฮณฮทฯƒฮต ฯ„ฮฟ ฮผฮต ฮดฮนฮบฮฑฮนฯŽฮผฮฑฯ„ฮฑ ฮผฯŒฮฝฮฟ ฮณฮนฮฑ ฮฑฮฝฮฌฮณฮฝฯ‰ฯƒฮท ฮฑฯ€ฯŒ ฯ„ฮฟฮฝ ฮดฮทฮผฮนฮฟฯ…ฯฮณฯŒ</translation> </message> </context> </TS>
<location line="+56"/>
server.rs
//! Conveniences for creating a TiKV server use super::setup::*; use super::signal_handler; use engine::rocks; use engine::rocks::util::metrics_flusher::{MetricsFlusher, DEFAULT_FLUSHER_INTERVAL}; use engine::rocks::util::security::encrypted_env_from_cipher_file; use engine::Engines; use fs2::FileExt; use kvproto::backup::create_backup; use kvproto::deadlock::create_deadlock; use kvproto::debugpb::create_debug; use kvproto::import_sstpb::create_import_sst; use pd_client::{PdClient, RpcClient}; use std::fs::File; use std::path::Path; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::Duration; use tikv::config::TiKvConfig; use tikv::coprocessor; use tikv::import::{ImportSSTService, SSTImporter}; use tikv::raftstore::coprocessor::{CoprocessorHost, RegionInfoAccessor}; use tikv::raftstore::store::fsm::store::{StoreMeta, PENDING_VOTES_CAP}; use tikv::raftstore::store::{fsm, LocalReader}; use tikv::raftstore::store::{new_compaction_listener, SnapManagerBuilder}; use tikv::server::lock_manager::LockManager; use tikv::server::resolve; use tikv::server::service::DebugService; use tikv::server::status_server::StatusServer; use tikv::server::transport::ServerRaftStoreRouter; use tikv::server::DEFAULT_CLUSTER_ID; use tikv::server::{create_raft_storage, Node, RaftKv, Server}; use tikv::storage::{self, AutoGCConfig, DEFAULT_ROCKSDB_SUB_DIR}; use tikv_util::check_environment_variables; use tikv_util::security::SecurityManager; use tikv_util::time::Monitor; use tikv_util::worker::FutureWorker; const RESERVED_OPEN_FDS: u64 = 1000; pub fn run_tikv(mut config: TiKvConfig) { // Sets the global logger ASAP. // It is okay to use the config w/o `validate()`, // because `initial_logger()` handles various conditions. initial_logger(&config); tikv_util::set_panic_hook(false, &config.storage.data_dir); // Print version information. tikv::log_tikv_info(); info!( "using config"; "config" => serde_json::to_string(&config).unwrap(), ); config.write_into_metrics(); // Do some prepare works before start. pre_start(&config); let security_mgr = Arc::new( SecurityManager::new(&config.security) .unwrap_or_else(|e| fatal!("failed to create security manager: {}", e.description())), ); let pd_client = RpcClient::new(&config.pd, Arc::clone(&security_mgr)) .unwrap_or_else(|e| fatal!("failed to create rpc client: {}", e)); let cluster_id = pd_client .get_cluster_id() .unwrap_or_else(|e| fatal!("failed to get cluster id: {}", e)); if cluster_id == DEFAULT_CLUSTER_ID { fatal!("cluster id can't be {}", DEFAULT_CLUSTER_ID); } config.server.cluster_id = cluster_id; info!( "connect to PD cluster"; "cluster_id" => cluster_id ); let _m = Monitor::default(); run_raft_server(pd_client, &config, security_mgr); } fn
(pd_client: RpcClient, cfg: &TiKvConfig, security_mgr: Arc<SecurityManager>) { let store_path = Path::new(&cfg.storage.data_dir); let lock_path = store_path.join(Path::new("LOCK")); let db_path = store_path.join(Path::new(DEFAULT_ROCKSDB_SUB_DIR)); let snap_path = store_path.join(Path::new("snap")); let raft_db_path = Path::new(&cfg.raft_store.raftdb_path); let import_path = store_path.join("import"); let f = File::create(lock_path.as_path()) .unwrap_or_else(|e| fatal!("failed to create lock at {}: {}", lock_path.display(), e)); if f.try_lock_exclusive().is_err() { fatal!( "lock {} failed, maybe another instance is using this directory.", store_path.display() ); } if tikv_util::panic_mark_file_exists(&cfg.storage.data_dir) { fatal!( "panic_mark_file {} exists, there must be something wrong with the db.", tikv_util::panic_mark_file_path(&cfg.storage.data_dir).display() ); } // Initialize raftstore channels. let (router, system) = fsm::create_raft_batch_system(&cfg.raft_store); let compaction_listener = new_compaction_listener(router.clone()); // Create pd client and pd worker let pd_client = Arc::new(pd_client); let pd_worker = FutureWorker::new("pd-worker"); let (mut worker, resolver) = resolve::new_resolver(Arc::clone(&pd_client)) .unwrap_or_else(|e| fatal!("failed to start address resolver: {}", e)); let pd_sender = pd_worker.scheduler(); // Create encrypted env from cipher file let encrypted_env = if !cfg.security.cipher_file.is_empty() { match encrypted_env_from_cipher_file(&cfg.security.cipher_file, None) { Err(e) => fatal!( "failed to create encrypted env from cipher file, err {:?}", e ), Ok(env) => Some(env), } } else { None }; // Create block cache. let cache = cfg.storage.block_cache.build_shared_cache(); // Create raft engine. let mut raft_db_opts = cfg.raftdb.build_opt(); if let Some(ref ec) = encrypted_env { raft_db_opts.set_env(ec.clone()); } let raft_db_cf_opts = cfg.raftdb.build_cf_opts(&cache); let raft_engine = rocks::util::new_engine_opt( raft_db_path.to_str().unwrap(), raft_db_opts, raft_db_cf_opts, ) .unwrap_or_else(|s| fatal!("failed to create raft engine: {}", s)); // Create kv engine, storage. let mut kv_db_opts = cfg.rocksdb.build_opt(); kv_db_opts.add_event_listener(compaction_listener); if let Some(ec) = encrypted_env { kv_db_opts.set_env(ec); } // Before create kv engine we need to check whether it needs to upgrade from v2.x to v3.x. // if let Err(e) = tikv::raftstore::store::maybe_upgrade_from_2_to_3( // &raft_engine, // db_path.to_str().unwrap(), // kv_db_opts.clone(), // &cfg.rocksdb, // &cache, // ) { // fatal!("failed to upgrade from v2.x to v3.x: {:?}", e); // }; // Create kv engine, storage. let kv_cfs_opts = cfg.rocksdb.build_cf_opts(&cache); let kv_engine = rocks::util::new_engine_opt(db_path.to_str().unwrap(), kv_db_opts, kv_cfs_opts) .unwrap_or_else(|s| fatal!("failed to create kv engine: {}", s)); let engines = Engines::new(Arc::new(kv_engine), Arc::new(raft_engine), cache.is_some()); let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_VOTES_CAP))); let local_reader = LocalReader::new(engines.kv.clone(), store_meta.clone(), router.clone()); let raft_router = ServerRaftStoreRouter::new(router.clone(), local_reader); let engine = RaftKv::new(raft_router.clone()); let storage_read_pool = storage::readpool_impl::build_read_pool( &cfg.readpool.storage, pd_sender.clone(), engine.clone(), ); let mut lock_mgr = if cfg.pessimistic_txn.enabled { Some(LockManager::new()) } else { None }; let storage = create_raft_storage( engine.clone(), &cfg.storage, storage_read_pool, Some(engines.kv.clone()), Some(raft_router.clone()), lock_mgr.clone(), ) .unwrap_or_else(|e| fatal!("failed to create raft storage: {}", e)); // Create snapshot manager, server. let snap_mgr = SnapManagerBuilder::default() .max_write_bytes_per_sec(cfg.server.snap_max_write_bytes_per_sec.0) .max_total_size(cfg.server.snap_max_total_size.0) .build( snap_path.as_path().to_str().unwrap().to_owned(), Some(router.clone()), ); let server_cfg = Arc::new(cfg.server.clone()); // Create coprocessor endpoint. let cop_read_pool = coprocessor::readpool_impl::build_read_pool( &cfg.readpool.coprocessor, pd_sender.clone(), engine.clone(), ); let cop = coprocessor::Endpoint::new(&server_cfg, cop_read_pool); let importer = Arc::new(SSTImporter::new(import_path).unwrap()); let import_service = ImportSSTService::new( cfg.import.clone(), raft_router.clone(), engines.kv.clone(), Arc::clone(&importer), ); // Create Debug service. let debug_service = DebugService::new(engines.clone(), raft_router.clone()); // Create Backup service. let mut backup_worker = tikv_util::worker::Worker::new("backup-endpoint"); let backup_scheduler = backup_worker.scheduler(); let backup_service = backup::Service::new(backup_scheduler); // Create server let mut server = Server::new( &server_cfg, &security_mgr, storage.clone(), cop, raft_router, resolver.clone(), snap_mgr.clone(), ) .unwrap_or_else(|e| fatal!("failed to create server: {}", e)); // Register services. if server .register_service(create_import_sst(import_service)) .is_some() { fatal!("failed to register import service"); } if server .register_service(create_debug(debug_service)) .is_some() { fatal!("failed to register debug service"); } if let Some(lm) = lock_mgr.as_ref() { if server .register_service(create_deadlock(lm.deadlock_service())) .is_some() { fatal!("failed to register deadlock service"); } } if server .register_service(create_backup(backup_service)) .is_some() { fatal!("failed to register backup service"); } let trans = server.transport(); // Create node. let mut node = Node::new(system, &server_cfg, &cfg.raft_store, pd_client.clone()); // Create CoprocessorHost. let mut coprocessor_host = CoprocessorHost::new(cfg.coprocessor.clone(), router); // Create region collection. let region_info_accessor = RegionInfoAccessor::new(&mut coprocessor_host); region_info_accessor.start(); // Register the role change observer of the lock manager. if let Some(lm) = lock_mgr.as_ref() { lm.register_detector_role_change_observer(&mut coprocessor_host); } node.start( engines.clone(), trans, snap_mgr, pd_worker, store_meta, coprocessor_host, importer, ) .unwrap_or_else(|e| fatal!("failed to start node: {}", e)); initial_metric(&cfg.metric, Some(node.id())); // Start backup endpoint. let backup_endpoint = backup::Endpoint::new( node.id(), engine.clone(), region_info_accessor.clone(), engines.kv.clone(), ); backup_worker .start(backup_endpoint) .unwrap_or_else(|e| fatal!("failed to start backup endpoint: {}", e)); // Start auto gc let auto_gc_cfg = AutoGCConfig::new( Arc::clone(&pd_client), region_info_accessor.clone(), node.id(), ); if let Err(e) = storage.start_auto_gc(auto_gc_cfg) { fatal!("failed to start auto_gc on storage, error: {}", e); } let mut metrics_flusher = MetricsFlusher::new( engines.clone(), Duration::from_millis(DEFAULT_FLUSHER_INTERVAL), ); // Start metrics flusher if let Err(e) = metrics_flusher.start() { error!( "failed to start metrics flusher"; "err" => %e ); } if let Some(lock_mgr) = lock_mgr.as_mut() { lock_mgr .start( node.id(), pd_client, resolver, Arc::clone(&security_mgr), &cfg.pessimistic_txn, ) .unwrap_or_else(|e| fatal!("failed to start lock manager: {}", e)); } // Run server. server .build_and_bind() .unwrap_or_else(|e| fatal!("failed to build server: {}", e)); server .start(server_cfg, security_mgr) .unwrap_or_else(|e| fatal!("failed to start server: {}", e)); let server_cfg = cfg.server.clone(); let mut status_enabled = cfg.metric.address.is_empty() && !server_cfg.status_addr.is_empty(); // Create a status server. // TODO: How to keep cfg updated? let mut status_server = StatusServer::new(server_cfg.status_thread_pool_size, cfg.clone()); if status_enabled { // Start the status server. if let Err(e) = status_server.start(server_cfg.status_addr) { error!( "failed to bind addr for status service"; "err" => %e ); status_enabled = false; } } signal_handler::handle_signal(Some(engines)); // Stop backup worker. if let Some(j) = backup_worker.stop() { j.join() .unwrap_or_else(|e| fatal!("failed to stop backup: {:?}", e)) } // Stop server. server .stop() .unwrap_or_else(|e| fatal!("failed to stop server: {}", e)); if status_enabled { // Stop the status server. status_server.stop() } metrics_flusher.stop(); node.stop(); region_info_accessor.stop(); if let Some(lm) = lock_mgr.as_mut() { lm.stop(); } if let Some(Err(e)) = worker.stop().map(JoinHandle::join) { info!( "ignore failure when stopping resolver"; "err" => ?e ); } } /// Various sanity-checks and logging before running a server. /// /// Warnings are logged and fatal errors exit. /// /// # Logs /// /// The presence of these environment variables that affect the database /// behavior is logged. /// /// - `GRPC_POLL_STRATEGY` /// - `http_proxy` and `https_proxy` /// /// # Warnings /// /// - if `net.core.somaxconn` < 32768 /// - if `net.ipv4.tcp_syncookies` is not 0 /// - if `vm.swappiness` is not 0 /// - if data directories are not on SSDs /// - if the "TZ" environment variable is not set on unix /// /// # Fatal errors /// /// If the max open file descriptor limit is not high enough to support /// the main database and the raft database. fn pre_start(cfg: &TiKvConfig) { // Before any startup, check system configuration and environment variables. check_system_config(&cfg); check_environment_variables(); if cfg.panic_when_unexpected_key_or_data { info!("panic-when-unexpected-key-or-data is on"); tikv_util::set_panic_when_unexpected_key_or_data(true); } } fn check_system_config(config: &TiKvConfig) { info!("beginning system configuration check"); let mut rocksdb_max_open_files = config.rocksdb.max_open_files; if config.rocksdb.titan.enabled { // Titan engine maintains yet another pool of blob files and uses the same max // number of open files setup as rocksdb does. So we double the max required // open files here rocksdb_max_open_files *= 2; } if let Err(e) = tikv_util::config::check_max_open_fds( RESERVED_OPEN_FDS + (rocksdb_max_open_files + config.raftdb.max_open_files) as u64, ) { fatal!("{}", e); } for e in tikv_util::config::check_kernel() { warn!( "check: kernel"; "err" => %e ); } // Check RocksDB data dir if let Err(e) = tikv_util::config::check_data_dir(&config.storage.data_dir) { warn!( "check: rocksdb-data-dir"; "path" => &config.storage.data_dir, "err" => %e ); } // Check raft data dir if let Err(e) = tikv_util::config::check_data_dir(&config.raft_store.raftdb_path) { warn!( "check: raftdb-path"; "path" => &config.raft_store.raftdb_path, "err" => %e ); } }
run_raft_server
wallaby.js
module.exports = function(wallaby) { return { maxConsoleMessagesPerTest: 100000, files: ['src/**/*.js'],
tests: ['test/**/*.spec.js'], setup: function() { var chai = require('chai'); global.expect = chai.expect; }, env: { type: 'node', runner: 'node' }, compilers: { '**/*.js': wallaby.compilers.babel() } }; };
code_block.rs
pub mod code_block { use crate::util::string::string::escape_code_string; use once_cell::sync::Lazy; use regex::Regex; static CODE_BLOCK_PAREN_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"^```[^`|.]*$").unwrap()); pub fn is_code_block_start(input: &String) -> bool { CODE_BLOCK_PAREN_PATTERN.is_match(input) } pub fn parse_code_block(input: Vec<String>) -> Vec<String> { input.iter().map(|s| escape_code_string(s)).collect() } #[cfg(test)] mod test_code_block { use super::*; #[test] fn test_is_code_block_start() { #[derive(Debug)] struct TestCase { it: String, input: String, expected: bool, } let test_cases = [ TestCase { it: String::from("should return true when input is ```"), input: String::from("```"), expected: true, }, TestCase { it: String::from("should return true when input is ```<langname>"), input: String::from("```html"), expected: true, }, TestCase { it: String::from("should return true when input is ``"), input: String::from("``"), expected: false, }, TestCase { it: String::from("should return true when input is `"), input: String::from("``"), expected: false, }, ]; for test_case in test_cases.iter() { let output = is_code_block_start(&test_case.input); assert_eq!(output, test_case.expected, "Failed: {}\n", test_case.it); } } #[test] fn test_parse_code_block()
} }
{ let input = [ r#"<script src="/a/b.js">alert('aaa')</script>"#, r#"'aaa'"#, r#""aaa""#, ] .iter() .map(|&s| s.into()) .collect(); let expected: Vec<String> = [ "&lt;script src=&quot;/a/b.js&quot;&gt;alert(&#39;aaa&#39;)&lt;/script&gt;", "&#39;aaa&#39;", "&quot;aaa&quot;", ] .iter() .map(|&s| s.into()) .collect(); let output = parse_code_block(input); assert_eq!(output, expected); }
ray.go
package ray import ( "github.com/nicholasblaskey/raytracer/matrix" "github.com/nicholasblaskey/raytracer/tuple" ) type Ray struct { Origin tuple.Tuple Direction tuple.Tuple } func New(origin, dir tuple.Tuple) Ray
func (r Ray) PositionAt(t float64) tuple.Tuple { return r.Origin.Add(r.Direction.Mul(t)) } func (r Ray) Transform(m matrix.Mat4) Ray { return Ray{ m.Mul4x1(r.Origin), m.Mul4x1(r.Direction), } }
{ return Ray{origin, dir} }
test.py
import pytest from helpers.cluster import ClickHouseCluster import random import string import os import time from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) node = cluster.add_instance('node', main_configs=['configs/enable_test_keeper.xml', 'configs/logs_conf.xml'], with_zookeeper=True) from kazoo.client import KazooClient _genuine_zk_instance = None _fake_zk_instance = None def get_genuine_zk(): global _genuine_zk_instance if not _genuine_zk_instance: print("Zoo1", cluster.get_instance_ip("zoo1")) _genuine_zk_instance = cluster.get_kazoo_client('zoo1') return _genuine_zk_instance def get_fake_zk(): global _fake_zk_instance if not _fake_zk_instance: print("node", cluster.get_instance_ip("node")) _fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") _fake_zk_instance.start() return _fake_zk_instance def random_string(length): return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) def create_random_path(prefix="", depth=1): if depth == 0: return prefix return create_random_path(os.path.join(prefix, random_string(3)), depth - 1) @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() yield cluster finally: cluster.shutdown() if _genuine_zk_instance: _genuine_zk_instance.stop() _genuine_zk_instance.close() if _fake_zk_instance: _fake_zk_instance.stop() _fake_zk_instance.close() def test_simple_commands(started_cluster): genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for zk in [genuine_zk, fake_zk]: zk.create("/test_simple_commands", b"") zk.create("/test_simple_commands/somenode1", b"hello") zk.set("/test_simple_commands/somenode1", b"world") for zk in [genuine_zk, fake_zk]: assert zk.exists("/test_simple_commands") assert zk.exists("/test_simple_commands/somenode1") print(zk.get("/test_simple_commands/somenode1")) assert zk.get("/test_simple_commands/somenode1")[0] == b"world" def test_sequential_nodes(started_cluster): genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_sequential_nodes") fake_zk.create("/test_sequential_nodes") for i in range(1, 11): genuine_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) genuine_zk.create("/test_sequential_nodes/" + ("b" * i)) fake_zk.create("/test_sequential_nodes/" + ("a" * i) + "-", sequence=True) fake_zk.create("/test_sequential_nodes/" + ("b" * i)) genuine_childs = list(sorted(genuine_zk.get_children("/test_sequential_nodes"))) fake_childs = list(sorted(fake_zk.get_children("/test_sequential_nodes"))) assert genuine_childs == fake_childs def assert_eq_stats(stat1, stat2): assert stat1.version == stat2.version assert stat1.cversion == stat2.cversion assert stat1.aversion == stat2.aversion assert stat1.aversion == stat2.aversion assert stat1.dataLength == stat2.dataLength assert stat1.numChildren == stat2.numChildren def test_stats(started_cluster): genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_stats_nodes") fake_zk.create("/test_stats_nodes") genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") assert_eq_stats(genuine_stats, fake_stats) for i in range(1, 11): genuine_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True) genuine_zk.create("/test_stats_nodes/" + ("b" * i)) fake_zk.create("/test_stats_nodes/" + ("a" * i) + "-", sequence=True) fake_zk.create("/test_stats_nodes/" + ("b" * i)) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") assert_eq_stats(genuine_stats, fake_stats) for i in range(1, 11): print("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) genuine_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) genuine_zk.delete("/test_stats_nodes/" + ("b" * i)) fake_zk.delete("/test_stats_nodes/" + ("a" * i) + "-" + "{:010d}".format((i - 1) * 2)) fake_zk.delete("/test_stats_nodes/" + ("b" * i)) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") print(genuine_stats) print(fake_stats) assert_eq_stats(genuine_stats, fake_stats) for i in range(100): genuine_zk.set("/test_stats_nodes", ("q" * i).encode()) fake_zk.set("/test_stats_nodes", ("q" * i).encode()) genuine_stats = genuine_zk.exists("/test_stats_nodes") fake_stats = fake_zk.exists("/test_stats_nodes") print(genuine_stats) print(fake_stats) assert_eq_stats(genuine_stats, fake_stats) def test_watchers(started_cluster): genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() genuine_zk.create("/test_data_watches") fake_zk.create("/test_data_watches") genuine_data_watch_data = None def genuine_callback(event): print("Genuine data watch called") nonlocal genuine_data_watch_data genuine_data_watch_data = event fake_data_watch_data = None def fake_callback(event): print("Fake data watch called") nonlocal fake_data_watch_data fake_data_watch_data = event genuine_zk.get("/test_data_watches", watch=genuine_callback) fake_zk.get("/test_data_watches", watch=fake_callback) print("Calling set genuine") genuine_zk.set("/test_data_watches", b"a") print("Calling set fake") fake_zk.set("/test_data_watches", b"a") time.sleep(3) print("Genuine data", genuine_data_watch_data) print("Fake data", fake_data_watch_data) assert genuine_data_watch_data == fake_data_watch_data genuine_children = None def genuine_child_callback(event): print("Genuine child watch called") nonlocal genuine_children genuine_children = event fake_children = None def fake_child_callback(event): print("Fake child watch called") nonlocal fake_children fake_children = event genuine_zk.get_children("/test_data_watches", watch=genuine_child_callback) fake_zk.get_children("/test_data_watches", watch=fake_child_callback) print("Calling genuine child") genuine_zk.create("/test_data_watches/child", b"b") print("Calling fake child") fake_zk.create("/test_data_watches/child", b"b") time.sleep(3) print("Genuine children", genuine_children) print("Fake children", fake_children) assert genuine_children == fake_children def test_multitransactions(started_cluster): genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for zk in [genuine_zk, fake_zk]: zk.create('/test_multitransactions') t = zk.transaction() t.create('/test_multitransactions/freddy') t.create('/test_multitransactions/fred', ephemeral=True) t.create('/test_multitransactions/smith', sequence=True) results = t.commit() assert len(results) == 3 assert results[0] == '/test_multitransactions/freddy' assert results[2].startswith('/test_multitransactions/smith0') is True from kazoo.exceptions import RolledBackError, NoNodeError for i, zk in enumerate([genuine_zk, fake_zk]): print("Processing ZK", i) t = zk.transaction() t.create('/test_multitransactions/q') t.delete('/test_multitransactions/a') t.create('/test_multitransactions/x') results = t.commit() print("Results", results) assert results[0].__class__ == RolledBackError assert results[1].__class__ == NoNodeError assert zk.exists('/test_multitransactions/q') is None assert zk.exists('/test_multitransactions/a') is None assert zk.exists('/test_multitransactions/x') is None def exists(zk, path):
def get(zk, path): result = zk.get(path) return result[0] def get_children(zk, path): return [elem for elem in list(sorted(zk.get_children(path))) if elem not in ('clickhouse', 'zookeeper')] READ_REQUESTS = [ ("exists", exists), ("get", get), ("get_children", get_children), ] def create(zk, path, data): zk.create(path, data.encode()) def set_data(zk, path, data): zk.set(path, data.encode()) WRITE_REQUESTS = [ ("create", create), ("set_data", set_data), ] def delete(zk, path): zk.delete(path) DELETE_REQUESTS = [ ("delete", delete) ] class Request(object): def __init__(self, name, arguments, callback, is_return): self.name = name self.arguments = arguments self.callback = callback self.is_return = is_return def __str__(self): arg_str = ', '.join([str(k) + "=" + str(v) for k, v in self.arguments.items()]) return "ZKRequest name {} with arguments {}".format(self.name, arg_str) def generate_requests(iters=1): requests = [] existing_paths = [] for i in range(iters): for _ in range(100): rand_length = random.randint(0, 10) path = "/" for j in range(1, rand_length): path = create_random_path(path, 1) existing_paths.append(path) value = random_string(1000) request = Request("create", {"path" : path, "value": value[0:10]}, lambda zk, path=path, value=value: create(zk, path, value), False) requests.append(request) for _ in range(100): path = random.choice(existing_paths) value = random_string(100) request = Request("set", {"path": path, "value": value[0:10]}, lambda zk, path=path, value=value: set_data(zk, path, value), False) requests.append(request) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) def read_func1(zk, path=path, callback=callback): return callback[1](zk, path) request = Request(callback[0], {"path": path}, read_func1, True) requests.append(request) for _ in range(30): path = random.choice(existing_paths) request = Request("delete", {"path": path}, lambda zk, path=path: delete(zk, path), False) for _ in range(100): path = random.choice(existing_paths) callback = random.choice(READ_REQUESTS) def read_func2(zk, path=path, callback=callback): return callback[1](zk, path) request = Request(callback[0], {"path": path}, read_func2, True) requests.append(request) return requests def test_random_requests(started_cluster): requests = generate_requests(10) genuine_zk = get_genuine_zk() fake_zk = get_fake_zk() for i, request in enumerate(requests): genuine_throw = False fake_throw = False fake_result = None genuine_result = None try: genuine_result = request.callback(genuine_zk) except Exception as ex: genuine_throw = True try: fake_result = request.callback(fake_zk) except Exception as ex: fake_throw = True assert fake_throw == genuine_throw, "Fake throw genuine not or vise versa" assert fake_result == genuine_result, "Zookeeper results differ" root_children_genuine = [elem for elem in list(sorted(genuine_zk.get_children("/"))) if elem not in ('clickhouse', 'zookeeper')] root_children_fake = [elem for elem in list(sorted(fake_zk.get_children("/"))) if elem not in ('clickhouse', 'zookeeper')] assert root_children_fake == root_children_genuine def test_end_of_session(started_cluster): fake_zk1 = None fake_zk2 = None genuine_zk1 = None genuine_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() genuine_zk1 = cluster.get_kazoo_client('zoo1') genuine_zk1.start() genuine_zk2 = cluster.get_kazoo_client('zoo1') genuine_zk2.start() fake_zk1.create("/test_end_of_session") genuine_zk1.create("/test_end_of_session") fake_ephemeral_event = None def fake_ephemeral_callback(event): print("Fake watch triggered") nonlocal fake_ephemeral_event fake_ephemeral_event = event genuine_ephemeral_event = None def genuine_ephemeral_callback(event): print("Genuine watch triggered") nonlocal genuine_ephemeral_event genuine_ephemeral_event = event assert fake_zk2.exists("/test_end_of_session") is not None assert genuine_zk2.exists("/test_end_of_session") is not None fake_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) genuine_zk1.create("/test_end_of_session/ephemeral_node", ephemeral=True) assert fake_zk2.exists("/test_end_of_session/ephemeral_node", watch=fake_ephemeral_callback) is not None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node", watch=genuine_ephemeral_callback) is not None print("Stopping genuine zk") genuine_zk1.stop() print("Closing genuine zk") genuine_zk1.close() print("Stopping fake zk") fake_zk1.stop() print("Closing fake zk") fake_zk1.close() assert fake_zk2.exists("/test_end_of_session/ephemeral_node") is None assert genuine_zk2.exists("/test_end_of_session/ephemeral_node") is None assert fake_ephemeral_event == genuine_ephemeral_event finally: try: for zk in [fake_zk1, fake_zk2, genuine_zk1, genuine_zk2]: if zk: zk.stop() zk.close() except: pass def test_end_of_watches_session(started_cluster): fake_zk1 = None fake_zk2 = None try: fake_zk1 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk1.start() fake_zk2 = KazooClient(hosts=cluster.get_instance_ip("node") + ":9181") fake_zk2.start() fake_zk1.create("/test_end_of_watches_session") dummy_set = 0 def dummy_callback(event): nonlocal dummy_set dummy_set += 1 print(event) for child_node in range(100): fake_zk1.create("/test_end_of_watches_session/" + str(child_node)) fake_zk1.get_children("/test_end_of_watches_session/" + str(child_node), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(0), watch=dummy_callback) fake_zk2.get_children("/test_end_of_watches_session/" + str(1), watch=dummy_callback) fake_zk1.stop() fake_zk1.close() for child_node in range(100): fake_zk2.create("/test_end_of_watches_session/" + str(child_node) + "/" + str(child_node), b"somebytes") assert dummy_set == 2 finally: try: for zk in [fake_zk1, fake_zk2]: if zk: zk.stop() zk.close() except: pass def test_concurrent_watches(started_cluster): fake_zk = get_fake_zk() fake_zk.restart() global_path = "/test_concurrent_watches_0" fake_zk.create(global_path) dumb_watch_triggered_counter = 0 all_paths_triggered = [] existing_path = [] all_paths_created = [] watches_created = 0 def create_path_and_watch(i): nonlocal watches_created nonlocal all_paths_created fake_zk.ensure_path(global_path + "/" + str(i)) # new function each time def dumb_watch(event): nonlocal dumb_watch_triggered_counter dumb_watch_triggered_counter += 1 nonlocal all_paths_triggered all_paths_triggered.append(event.path) fake_zk.get(global_path + "/" + str(i), watch=dumb_watch) all_paths_created.append(global_path + "/" + str(i)) watches_created += 1 existing_path.append(i) trigger_called = 0 def trigger_watch(i): nonlocal trigger_called trigger_called += 1 fake_zk.set(global_path + "/" + str(i), b"somevalue") try: existing_path.remove(i) except: pass def call(total): for i in range(total): create_path_and_watch(random.randint(0, 1000)) time.sleep(random.random() % 0.5) try: rand_num = random.choice(existing_path) trigger_watch(rand_num) except: pass while existing_path: try: rand_num = random.choice(existing_path) trigger_watch(rand_num) except: pass p = Pool(10) arguments = [100] * 10 watches_must_be_created = sum(arguments) watches_trigger_must_be_called = sum(arguments) watches_must_be_triggered = sum(arguments) p.map(call, arguments) p.close() # waiting for late watches for i in range(50): if dumb_watch_triggered_counter == watches_must_be_triggered: break time.sleep(0.1) assert watches_created == watches_must_be_created assert trigger_called >= watches_trigger_must_be_called assert len(existing_path) == 0 if dumb_watch_triggered_counter != watches_must_be_triggered: print("All created paths", all_paths_created) print("All triggerred paths", all_paths_triggered) print("All paths len", len(all_paths_created)) print("All triggered len", len(all_paths_triggered)) print("Diff", list(set(all_paths_created) - set(all_paths_triggered))) assert dumb_watch_triggered_counter == watches_must_be_triggered
result = zk.exists(path) return result is not None
preprocess.py
import argparse import sys from collections import Counter from tqdm import tqdm from transformers import AutoTokenizer def read_and_preprocess(file:str): subword_len_counter = 0 with open(file, "rt") as f_p: for line in f_p: line = line.rstrip() if not line: yield line subword_len_counter = 0 continue token = line.split()[0] current_subwords_len = len(tokenizer.tokenize(token)) # Token contains strange control characters like \x96 or \x95 # Just filter out the complete line if current_subwords_len == 0: continue if (subword_len_counter + current_subwords_len) > max_len: yield "" yield line subword_len_counter = current_subwords_len continue subword_len_counter += current_subwords_len yield line def build_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) # parser.add_argument( # "--data_dir", # default=None, # type=str, # required=True, # help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", # ) args = parser.parse_args() return args def get_label(s:str): x = s.split(' ') if len(x)==2: label = x[1] else: label = None return label if __name__ == '__main__': args = build_args() tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) max_len = args.max_seq_length max_len -= tokenizer.num_special_tokens_to_add() label_counter = Counter() def count_and_return(l:str):
for split_name in ['train','dev','test']: dataset = "%s.txt.tmp"%split_name with open("%s.txt"%split_name,'w') as f: f.writelines("%s\n"%count_and_return(l) for l in tqdm(read_and_preprocess(dataset))) with open('labels.txt','w') as f: f.writelines("%s\n"%l for l in label_counter.keys())
label = get_label(l) if label is not None: label_counter.update({label:1}) return l
dcim_devices_list_responses.go
// Code generated by go-swagger; DO NOT EDIT. // Copyright 2018 The go-netbox Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package dcim // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "io" "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" "github.com/go-openapi/swag" "github.com/go-openapi/validate" strfmt "github.com/go-openapi/strfmt" models "github.com/h0x91b-wix/go-netbox/netbox/models" ) // DcimDevicesListReader is a Reader for the DcimDevicesList structure. type DcimDevicesListReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *DcimDevicesListReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 200: result := NewDcimDevicesListOK() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("unknown error", response, response.Code()) } } // NewDcimDevicesListOK creates a DcimDevicesListOK with default headers values func NewDcimDevicesListOK() *DcimDevicesListOK
/*DcimDevicesListOK handles this case with default header values. DcimDevicesListOK dcim devices list o k */ type DcimDevicesListOK struct { Payload *DcimDevicesListOKBody } func (o *DcimDevicesListOK) Error() string { return fmt.Sprintf("[GET /dcim/devices/][%d] dcimDevicesListOK %+v", 200, o.Payload) } func (o *DcimDevicesListOK) GetPayload() *DcimDevicesListOKBody { return o.Payload } func (o *DcimDevicesListOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { o.Payload = new(DcimDevicesListOKBody) // response payload if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { return err } return nil } /*DcimDevicesListOKBody dcim devices list o k body swagger:model DcimDevicesListOKBody */ type DcimDevicesListOKBody struct { // count // Required: true Count *int64 `json:"count"` // next // Format: uri Next *strfmt.URI `json:"next,omitempty"` // previous // Format: uri Previous *strfmt.URI `json:"previous,omitempty"` // results // Required: true Results []*models.DeviceWithConfigContext `json:"results"` } // Validate validates this dcim devices list o k body func (o *DcimDevicesListOKBody) Validate(formats strfmt.Registry) error { var res []error if err := o.validateCount(formats); err != nil { res = append(res, err) } if err := o.validateNext(formats); err != nil { res = append(res, err) } if err := o.validatePrevious(formats); err != nil { res = append(res, err) } if err := o.validateResults(formats); err != nil { res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (o *DcimDevicesListOKBody) validateCount(formats strfmt.Registry) error { if err := validate.Required("dcimDevicesListOK"+"."+"count", "body", o.Count); err != nil { return err } return nil } func (o *DcimDevicesListOKBody) validateNext(formats strfmt.Registry) error { if swag.IsZero(o.Next) { // not required return nil } if err := validate.FormatOf("dcimDevicesListOK"+"."+"next", "body", "uri", o.Next.String(), formats); err != nil { return err } return nil } func (o *DcimDevicesListOKBody) validatePrevious(formats strfmt.Registry) error { if swag.IsZero(o.Previous) { // not required return nil } if err := validate.FormatOf("dcimDevicesListOK"+"."+"previous", "body", "uri", o.Previous.String(), formats); err != nil { return err } return nil } func (o *DcimDevicesListOKBody) validateResults(formats strfmt.Registry) error { if err := validate.Required("dcimDevicesListOK"+"."+"results", "body", o.Results); err != nil { return err } for i := 0; i < len(o.Results); i++ { if swag.IsZero(o.Results[i]) { // not required continue } if o.Results[i] != nil { if err := o.Results[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("dcimDevicesListOK" + "." + "results" + "." + strconv.Itoa(i)) } return err } } } return nil } // MarshalBinary interface implementation func (o *DcimDevicesListOKBody) MarshalBinary() ([]byte, error) { if o == nil { return nil, nil } return swag.WriteJSON(o) } // UnmarshalBinary interface implementation func (o *DcimDevicesListOKBody) UnmarshalBinary(b []byte) error { var res DcimDevicesListOKBody if err := swag.ReadJSON(b, &res); err != nil { return err } *o = res return nil }
{ return &DcimDevicesListOK{} }
cardapio.module.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { FormsModule } from '@angular/forms'; import { IonicModule } from '@ionic/angular'; import { CardapioPageRoutingModule } from './cardapio-routing.module'; import { CardapioPage } from './cardapio.page'; import { ComponentsModule } from '../components/components.module'; import { DetalhesItemPage } from '../detalhes-item/detalhes-item.page'; import { EnderecosPage } from '../enderecos/enderecos.page'; @NgModule({ entryComponents:[DetalhesItemPage], imports: [ ComponentsModule, CommonModule, FormsModule, IonicModule, CardapioPageRoutingModule ], declarations: [CardapioPage, DetalhesItemPage] })
export class CardapioPageModule {}
pool.rs
use std::convert::{TryFrom, TryInto}; use std::marker::PhantomData; use serde::{Deserialize, Serialize}; use serde_json::{from_value, Value}; use crate::backends::convnets::{AvgPool2D, DataFormat, MaxPool2D, Padding, Pool2, Stride2}; use crate::backends::Backend; use crate::common::traits::Name; use crate::common::types::{HError, HResult}; use crate::layers::traits::{Apply, FromJson}; use crate::model::binary_format::WeightsMap; #[derive(Serialize, Deserialize)] struct Pool2DLayerSpec { name: String, pool_window: Pool2, strides: Stride2, data_format: String, padding: String, } pub(crate) struct AvgPool2DLayer<B: Backend> { name: String, pool_window: Pool2, strides: Stride2, data_format: DataFormat, padding: Padding, _marker: PhantomData<B>, } impl<B: Backend> AvgPool2DLayer<B> { fn new( name: String, pool_window: Pool2, strides: Stride2, data_format: DataFormat, padding: Padding, ) -> AvgPool2DLayer<B> { AvgPool2DLayer { name, pool_window, strides, data_format, padding, _marker: PhantomData::<B>, } } } impl<B: Backend> Name for AvgPool2DLayer<B> { fn name(&self) -> &String { &self.name } } impl<B: Backend> Apply<B> for AvgPool2DLayer<B> { fn apply(&self, input: B::CommonRepr) -> HResult<B::CommonRepr> { let input: B::Tensor4D = <B::CommonRepr as TryInto<B::Tensor4D>>::try_into(input)?; let output: B::Tensor4D = input.avg_pool2d( self.pool_window.clone(), self.strides.clone(), self.padding, self.data_format, )?; Ok(output.try_into()?) } } impl<B: Backend> FromJson for AvgPool2DLayer<B> { const TYPE: &'static str = "AvgPool2D"; type Error = HError; fn from_json(json: &Value, _weights: &mut WeightsMap) -> HResult<Self> { let spec: Pool2DLayerSpec = from_value(json.clone())?; let padding = Padding::try_from(spec.padding.as_str())?; let data_format = DataFormat::try_from(spec.data_format.as_str())?; Ok(AvgPool2DLayer::new( spec.name, spec.pool_window, spec.strides, data_format, padding, )) } } pub(crate) struct MaxPool2DLayer<B: Backend> { name: String, pool_window: Pool2, strides: Stride2, data_format: DataFormat, padding: Padding, _marker: PhantomData<B>, } impl<B: Backend> MaxPool2DLayer<B> {
data_format: DataFormat, padding: Padding, ) -> MaxPool2DLayer<B> { MaxPool2DLayer { name, pool_window, strides, data_format, padding, _marker: PhantomData::<B>, } } } impl<B: Backend> Name for MaxPool2DLayer<B> { fn name(&self) -> &String { &self.name } } impl<B: Backend> Apply<B> for MaxPool2DLayer<B> { fn apply(&self, input: B::CommonRepr) -> HResult<B::CommonRepr> { let input: B::Tensor4D = <B::CommonRepr as TryInto<B::Tensor4D>>::try_into(input)?; let output: B::Tensor4D = input.max_pool2d( self.pool_window.clone(), self.strides.clone(), self.padding, self.data_format, )?; Ok(output.try_into()?) } } impl<B: Backend> FromJson for MaxPool2DLayer<B> { const TYPE: &'static str = "MaxPool2D"; type Error = HError; fn from_json(json: &Value, _weights: &mut WeightsMap) -> HResult<Self> { let spec: Pool2DLayerSpec = from_value(json.clone())?; let padding = Padding::try_from(spec.padding.as_str())?; let data_format = DataFormat::try_from(spec.data_format.as_str())?; Ok(MaxPool2DLayer::new( spec.name, spec.pool_window, spec.strides, data_format, padding, )) } }
fn new( name: String, pool_window: Pool2, strides: Stride2,
mechon_mamre_org.py
#!/usr/bin/env python # coding: utf-8 """Download and parse Tanakh from <http://mechon-mamre.org/>. The text is based on the [Aleppo Codex][1]. [1]: https://en.wikipedia.org/wiki/Aleppo_Codex Each book is in a separate HTML file (e.g., `c01.htm`) and contains navigation and textual data. The relevant structure is: ```html <BODY> <H1>...</H1> <P> <B>...,...</B> ... </P> </BODY> ``` Notes: - verses are newline-delimited - `<H1>` Hebrew book name - `<B>` comma-separated Hebrew numbering of chapter and verse - for multipart volumes (e.g., Samuel, Kings) also contains the part number - `<BIG>`, `<SMALL>`, `<SUP>` around specific letter (we keep) - `<A...>...</A>` links to notes (we ignore) - `<BR>` within the text indicates a line break (we replace with a space) - `{...}<BR>` indicates `pe` break (we ignore) - `{...}` indicates `samekh` break (we ignore) - `(...)` indicates the qere (we keep) - the unvowelized previous word is the ketiv (we ignore) """ # native from functools import partial from multiprocessing import Queue from pathlib import Path from typing import List import os import re # lib from tqdm import tqdm # pkg from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database from .. import tokens as T, grammar BOOK_NAMES = { "ื‘ืจืืฉื™ืช": "Genesis", "ืฉืžื•ืช": "Exodus", "ื•ื™ืงืจื": "Leviticus", "ื‘ืžื“ื‘ืจ": "Numbers", "ื“ื‘ืจื™ื": "Deuteronomy", # "ื™ื”ื•ืฉื•ืข": "Joshua", "ืฉื•ืคื˜ื™ื": "Judges", "ืฉืžื•ืืœ ื": "I Samuel", "ืฉืžื•ืืœ ื‘": "II Samuel", "ืžืœื›ื™ื ื": "I Kings", "ืžืœื›ื™ื ื‘": "II Kings", "ื™ืฉืขื™ื”ื•": "Isaiah", "ื™ืจืžื™ื”ื•": "Jeremiah", "ื™ื—ื–ืงืืœ": "Ezekiel", "ื”ื•ืฉืข": "Hosea", "ื™ื•ืืœ": "Joel", "ืขืžื•ืก": "Amos", "ืขื•ื‘ื“ื™ื”": "Obadiah", "ื™ื•ื ื”": "Jonah", "ืžื™ื›ื”": "Micah", "ื ื—ื•ื": "Nahum", "ื—ื‘ืงื•ืง": "Habakkuk", "ืฆืคื ื™ื”": "Zephaniah", "ื—ื’ื™ื™": "Haggai", "ื–ื›ืจื™ื”": "Zechariah", "ืžืœืื›ื™": "Malachi", # "ืชื”ื™ืœื™ื": "Psalms", "ืžืฉืœื™": "Proverbs", "ืื™ื•ื‘": "Job", "ืฉื™ืจ ื”ืฉื™ืจื™ื": "Song of Songs", "ืจื•ืช": "Ruth", "ืื™ื›ื”": "Lamentations", "ืงื•ื”ืœืช": "Ecclesiastes", "ืืกืชืจ": "Esther", "ื“ื ื™ื™ืืœ": "Daniel", "ืขื–ืจื / ื ื—ืžื™ื” ืข": "Ezra", "ืขื–ืจื / ื ื—ืžื™ื” ื ": "Nehemiah", "ื“ื‘ืจื™ ื”ื™ืžื™ื ื": "I Chronicles", "ื“ื‘ืจื™ ื”ื™ืžื™ื ื‘": "II Chronicles", } def count_words(lock, pos: int, read_q: Queue, write_q: Queue): """Count words in a book.""" # pylint: disable=too-many-locals tqdm.set_lock(lock) re_remove = re.compile( r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)" ) re_name = re.compile(r"<H1>(.*)</H1>") re_ref = re.compile(r"<B>(.*)</B>") for msg in queuer(read_q): result = {"books": [], "words": {}} book = Path(msg.data) text = book.read_text() # book_num = int(book.stem[1:], 10) book_name = re_name.search(text)[1] book_num = 0 en_name = "" # result["books"].append( # dict(id=book_num, name=book_name, corpus="mechon-mamre.org") # ) save_ref = "" desc = f"{os.getpid()} COUNT {book_name:<15}" for line in tqdm(text.split("\n"), desc=desc, position=pos): line = re_remove.sub("", line).replace("<BR>", " ").strip() if save_ref: ref, save_ref = save_ref, "" else: if not line or not line.startswith("<B>"): continue ref = re_ref.search(line)[1].replace(" ื†", "") if "-" in ref: ref, save_ref = ref.split("-") save_ref = f'{ref.split(",")[0]},{save_ref}' ref = f"{book_name} {ref}" he_name, ref = ref.rsplit(" ", 1) tmp_name = BOOK_NAMES[he_name] if tmp_name != en_name: en_name = tmp_name book_num = list(BOOK_NAMES).index(he_name) + 1 result["books"].append( dict(id=book_num, name=en_name, corpus="mechon-mamre.org") ) chapter, verse = ref.split(",") chapter, verse = grammar.gematria(chapter), grammar.gematria(verse) line = re_ref.sub("", line) # reference removed line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ") for raw in line.split(): clean = T.strip(raw) if not clean: continue if clean in result["words"]: result["words"][clean]["freq"] += 1 else: ref = f"{en_name} {chapter}:{verse}" result["words"][clean] = dict( book_id=book_num, freq=1, ref=ref, raw=raw ) write_q.put(Msg("SAVE", result)) def list_books(read_q: Queue, folder: Path): """Enqueue paths of books to parse.""" for path in sorted(folder.iterdir()): read_q.put(Msg("COUNT", path)) def main(argv: List[str] = None): """Parse texts from <http://mechon-mamre.org>. Usage: mechon_mamre_org.py [download <folder> | -i <PATH>] [-n COUNT] Options: download <folder> download HTML fil
o <folder> --index, -i PATH HTML folder [default: text/mechon-mamre.org] --cpus, -n NUM number of CPUs to use; at least 2 [default: all] """ args = parse_args(main.__doc__ or "", argv) num_readers = args["num_readers"] num_writers = args["num_writers"] if args["download"]: url = "http://mechon-mamre.org/htmlzips/ct005.zip" folder = Path(args["<folder>"]).resolve() pattern = re.compile(r"c/ct/c[0-9]{2}.htm") folder = download_unzip(url, folder, pattern) else: folder = Path(args["--index"]).resolve() init_fn = partial(list_books, folder=folder) spawn_processes(init_fn, count_words, save_database, num_readers, num_writers) if __name__ == "__main__": # pragma: no cover main()
es t
endpoint.go
package workflows_updateStep const ApiMethod string = "workflows.updateStep"