file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
fuzz.py
from unicorn.arm_const import UC_ARM_REG_R0 from .. import native def get_fuzz(uc, size): """ Gets at most 'size' bytes from the fuzz pool. If we run out of fuzz, something will happen (e.g., exit) :param size: :return: """ return native.get_fuzz(uc, size) def fuzz_remaining(): return native.fuzz_remaining() def load_fuzz(file_path): native.load_fuzz(file_path) def return_fuzz_byte(uc):
global fuzz c = get_fuzz(uc, 1) uc.reg_write(UC_ARM_REG_R0, ord(c))
helpers.rs
pub fn fix_denormal(f: f32) -> f32 { f + 1e-20 } pub fn param_to_freq(param: f32) -> f32 { 20.0 + (20000.0 - 20.0) * param * param } pub fn freq_to_param(freq: f32) -> f32 { ((freq - 20.0) / (20000.0 - 20.0)).sqrt() } pub fn param_to_resonance(param: f32) -> f32
pub fn resonance_to_param(resonance: f32) -> f32 { (resonance - 0.01) / 0.99 }
{ param * 0.99 + 0.01 }
file_hash.py
"""Hash your files for easy identification.""" import hashlib import logging import os from builtins import * # noqa pylint: disable=unused-import, redefined-builtin from typing import Dict from flexget import plugin from flexget.event import event from flexget.logger import FlexGetLogger from .cunit import IECUnit PLUGIN_ID = 'file_hash' log: FlexGetLogger = logging.getLogger(PLUGIN_ID) class FileHashPlugin(object): """ Task class that does the hashing. By default file_hash will: - Use blake2b if it is available, otherwise it will use MD5 - Start at 50MiB into the file - If the file is less than 50MiB, it starts at the beginning - Hashes 25MiB of the file after the starting point - If the file does not have 25MiB after the starting point, it will hash from the starting point to the end - Choose MAX two 'size', 'start', 'stop' Examples: # Use file_hash with the default settings. file_hash: yes # Use sha1 with the rest of the default settings file_hash: sha1 # Hash 1MiB, 25MiB into the file with algorithm SHA256 file_hash: algorithm: sha256 size: 1 start: 25 # Hash from 25MiB in to 35MiB in file_hash: start: 25 stop: 35 """ @staticmethod def __default_algo(): return 'blake2b' if 'blake2b' in hashlib.algorithms_available else 'md5' hash_size_default = 25 hash_start_default = 50 schema = { 'oneOf': [ {'type': 'boolean'}, {'type': 'string', 'enum': list(hashlib.algorithms_available)}, {'type': 'object', 'properties': { 'algorithm': { 'type': 'string', 'enum': list(hashlib.algorithms_available)}, 'size': {'type': 'integer', 'default': hash_size_default}, 'start': {'type': 'integer', 'default': hash_start_default}, 'stop': {'type': 'integer'}, 'time': {'type': 'boolean', 'default': 'boolean'}}}, ], } plugin_fields = {'file_hash_type', 'file_hash_hash', 'file_hash_modified', 'file_hash_bytes'} @staticmethod def __strict_boolean(check): if isinstance(check, bool) and check: return True return False def __get_algo(self, config): return self.__default_algo() def compare_entry(self, entry, config): if 'file_hash' in entry: file_hash = entry['file_hash'] match_algo = file_hash.algorithm == self.__get_algo(config) match_file_size = file_hash.file_size == os.path.getsize(entry['location']) match_modified = file_hash.modified == os.path.getmtime(entry['location']) match_start = file_hash.start == config.get('start') match_stop = file_hash.stop == config.get('stop') match_chunk_size = file_hash.chunk_size == config.get('size') match_strict = match_file_size and match_start and match_stop and match_chunk_size if match_algo and match_strict: return True return False def on_task_metainfo(self, task, config): """Call the plugin.""" log.info('Starting file_hash') # todo: add conditions to adapt to users' configuration if self.__strict_boolean(config): config = {True} hash_portion = { 'algorithm': self.__get_algo(config), 'size': IECUnit.MiB * (config['size'] if 'size' in config else self.hash_size_default), 'start': IECUnit.MiB * (config['start'] if 'start' in config else self.hash_start_default), 'stop': IECUnit.MiB * (config['stop'] if 'stop' in config else -1), } hasher = hashlib.new(hash_portion['algorithm']) log.verbose('Hasing with algorithm: %s', hash_portion['algorithm']) log.debug('Hashing %s MiB of each file.', hash_portion['size']) log.debug('Hashing starting %s MiB into file.', hash_portion['start']) log.debug('Hashing ending at %s MiB.', hash_portion['stop']) len_entries = len(task.entries) idx = 0 for entry in task.entries: idx += 1 file_size = os.path.getsize(entry['location']) if self.compare_entry(entry, config): log.verbose('This file seems to be unmodified, skipping') continue log.verbose('%s/%s: Hasing %s', idx, len_entries, entry['location']) current_hasher = hasher.copy() tmp_hash_portion_start = -1 if file_size < hash_portion['start']: log.debug('The file is only %s MiB, adjusting start location.', float(file_size / IECUnit.MiB)) if file_size < hash_portion['size']: log.debug('The file is less than the set size to to hash, setting start position to 0') tmp_hash_portion_start = 0 else: tmp_hash_portion_start = file_size - hash_portion['size'] log.debug('The size of the file is greater than the set size to hash, \ setting start position to %s MiB', tmp_hash_portion_start) with open(entry['location'], 'rb') as to_hash: to_hash.seek(tmp_hash_portion_start if tmp_hash_portion_start > -1 else hash_portion['start']) piece = to_hash.read(hash_portion['size']) current_hasher.update(piece) file_digest = current_hasher.hexdigest() file_modified = os.path.getmtime(entry['location']) filehash = FileHash(hash_portion, file_digest, file_modified, file_size) entry['file_hash'] = filehash log.debug(filehash) to_hash.close() class FileHash(object):
@event('plugin.register') def register_plugin(): plugin.register(FileHashPlugin, PLUGIN_ID, api_ver=2, interfaces=['task', 'series_metainfo', 'movie_metainfo'])
"""Store the information from the hashing.""" algorithm = None file_hash = None modified = None start = None stop = None chunk_size = None size = None def __init__(self, config_settings: Dict, file_hash, modified, size): """ Initialize a FileHash object. config_settings -- ends up being the config for the plugin file_hash -- the hash of the file modified -- last time the file was modified size -- size of the file in bytes """ self.algorithm = config_settings['algorithm'] self.start = config_settings['start'] self.stop = config_settings['stop'] self.chunk_size = config_settings['size'] self.file_hash = file_hash self.modified = modified self.size = size def __eq__(self, other): return isinstance(other, FileHash) and\ self.algorithm == other.algorithm and\ self.file_hash == other.file_hash def __repr__(self): """Represent a FileHash.""" return """<FileHash: \ algorithm={0}, \ start={1}, stop={2}, \ chunk_size={3}, \ file_hash={4}, \ modified={5}, \ size={6}""".format( self.algorithm, self.start, self.stop, self.chunk_size, self.file_hash, self.modified, self.size)
publisher.go
/* Copyright 2021 Dynatrace LLC. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package hostvolumes import ( "context" "fmt" "os" "time" csivolumes "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/driver/volumes" "github.com/Dynatrace/dynatrace-operator/src/controllers/csi/metadata" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/spf13/afero" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "k8s.io/utils/mount" "sigs.k8s.io/controller-runtime/pkg/client" ) func NewHostVolumePublisher(client client.Client, fs afero.Afero, mounter mount.Interface, db metadata.Access, path metadata.PathResolver) csivolumes.Publisher { return &HostVolumePublisher{ client: client, fs: fs, mounter: mounter, db: db, path: path, }
type HostVolumePublisher struct { client client.Client fs afero.Afero mounter mount.Interface db metadata.Access path metadata.PathResolver } func (publisher *HostVolumePublisher) PublishVolume(ctx context.Context, volumeCfg *csivolumes.VolumeConfig) (*csi.NodePublishVolumeResponse, error) { bindCfg, err := csivolumes.NewBindConfig(ctx, publisher.db, volumeCfg) if err != nil { return nil, err } if err := publisher.mountOneAgent(bindCfg.TenantUUID, volumeCfg); err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount osagent volume: %s", err.Error())) } volume, err := publisher.db.GetOsAgentVolumeViaTenantUUID(bindCfg.TenantUUID) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get osagent volume info from database: %s", err.Error())) } timestamp := time.Now() if volume == nil { storage := metadata.OsAgentVolume{ VolumeID: volumeCfg.VolumeID, TenantUUID: bindCfg.TenantUUID, Mounted: true, LastModified: &timestamp, } if err := publisher.db.InsertOsAgentVolume(&storage); err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to insert osagent volume info to database. info: %v err: %s", storage, err.Error())) } } else { volume.VolumeID = volumeCfg.VolumeID volume.Mounted = true volume.LastModified = &timestamp if err := publisher.db.UpdateOsAgentVolume(volume); err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to update osagent volume info to database. info: %v err: %s", volume, err.Error())) } } return &csi.NodePublishVolumeResponse{}, nil } func (publisher *HostVolumePublisher) UnpublishVolume(_ context.Context, volumeInfo *csivolumes.VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) { volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(volumeInfo.VolumeID) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get osagent volume info from database: %s", err.Error())) } if volume == nil { return nil, nil } if err := publisher.umountOneAgent(volumeInfo.TargetPath); err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to unmount osagent volume: %s", err.Error())) } timestamp := time.Now() volume.Mounted = false volume.LastModified = &timestamp if err := publisher.db.UpdateOsAgentVolume(volume); err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("failed to update osagent volume info to database. info: %v err: %s", volume, err.Error())) } log.Info("osagent volume has been unpublished", "targetPath", volumeInfo.TargetPath) return &csi.NodeUnpublishVolumeResponse{}, nil } func (publisher *HostVolumePublisher) CanUnpublishVolume(volumeInfo *csivolumes.VolumeInfo) (bool, error) { volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(volumeInfo.VolumeID) if err != nil { return false, status.Error(codes.Internal, fmt.Sprintf("failed to get osagent volume info from database: %s", err.Error())) } return volume != nil, nil } func (publisher *HostVolumePublisher) mountOneAgent(tenantUUID string, volumeCfg *csivolumes.VolumeConfig) error { hostDir := publisher.path.OsAgentDir(tenantUUID) _ = publisher.fs.MkdirAll(hostDir, os.ModePerm) if err := publisher.fs.MkdirAll(volumeCfg.TargetPath, os.ModePerm); err != nil { return err } if err := publisher.mounter.Mount(hostDir, volumeCfg.TargetPath, "", []string{"bind"}); err != nil { _ = publisher.mounter.Unmount(hostDir) return err } return nil } func (publisher *HostVolumePublisher) umountOneAgent(targetPath string) error { if err := publisher.mounter.Unmount(targetPath); err != nil { log.Error(err, "Unmount failed", "path", targetPath) } return nil }
}
network_monitor.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::AsyncResult; use crate::Cancellable; use crate::NetworkConnectivity; use crate::SocketConnectable; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; use std::pin::Pin; use std::ptr; glib::wrapper! { pub struct NetworkMonitor(Interface<ffi::GNetworkMonitor, ffi::GNetworkMonitorInterface>); match fn { type_ => || ffi::g_network_monitor_get_type(), } } impl NetworkMonitor { #[doc(alias = "g_network_monitor_get_default")] #[doc(alias = "get_default")] pub fn default() -> NetworkMonitor { unsafe { from_glib_none(ffi::g_network_monitor_get_default()) } } } pub const NONE_NETWORK_MONITOR: Option<&NetworkMonitor> = None; pub trait NetworkMonitorExt: 'static { #[doc(alias = "g_network_monitor_can_reach")] fn can_reach<P: IsA<SocketConnectable>, Q: IsA<Cancellable>>( &self, connectable: &P, cancellable: Option<&Q>, ) -> Result<(), glib::Error>; #[doc(alias = "g_network_monitor_can_reach_async")] fn can_reach_async< P: IsA<SocketConnectable>, Q: IsA<Cancellable>, R: FnOnce(Result<(), glib::Error>) + Send + 'static, >( &self, connectable: &P, cancellable: Option<&Q>, callback: R, ); fn can_reach_async_future<P: IsA<SocketConnectable> + Clone + 'static>( &self, connectable: &P, ) -> Pin<Box_<dyn std::future::Future<Output = Result<(), glib::Error>> + 'static>>; #[doc(alias = "g_network_monitor_get_connectivity")] #[doc(alias = "get_connectivity")] fn connectivity(&self) -> NetworkConnectivity; #[doc(alias = "g_network_monitor_get_network_available")] #[doc(alias = "get_network_available")] fn is_network_available(&self) -> bool; #[doc(alias = "g_network_monitor_get_network_metered")] #[doc(alias = "get_network_metered")] fn is_network_metered(&self) -> bool; #[doc(alias = "network-changed")] fn connect_network_changed<F: Fn(&Self, bool) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "connectivity")] fn connect_connectivity_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "network-available")] fn connect_network_available_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "network-metered")] fn connect_network_metered_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<NetworkMonitor>> NetworkMonitorExt for O { fn can_reach<P: IsA<SocketConnectable>, Q: IsA<Cancellable>>( &self, connectable: &P, cancellable: Option<&Q>, ) -> Result<(), glib::Error> { unsafe { let mut error = ptr::null_mut(); let _ = ffi::g_network_monitor_can_reach( self.as_ref().to_glib_none().0, connectable.as_ref().to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0, &mut error, ); if error.is_null() { Ok(()) } else { Err(from_glib_full(error)) } } } fn can_reach_async< P: IsA<SocketConnectable>, Q: IsA<Cancellable>, R: FnOnce(Result<(), glib::Error>) + Send + 'static, >( &self, connectable: &P, cancellable: Option<&Q>, callback: R, ) { let user_data: Box_<R> = Box_::new(callback); unsafe extern "C" fn can_reach_async_trampoline< R: FnOnce(Result<(), glib::Error>) + Send + 'static, >( _source_object: *mut glib::gobject_ffi::GObject, res: *mut crate::ffi::GAsyncResult, user_data: glib::ffi::gpointer, ) { let mut error = ptr::null_mut(); let _ = ffi::g_network_monitor_can_reach_finish(_source_object as *mut _, res, &mut error); let result = if error.is_null()
else { Err(from_glib_full(error)) }; let callback: Box_<R> = Box_::from_raw(user_data as *mut _); callback(result); } let callback = can_reach_async_trampoline::<R>; unsafe { ffi::g_network_monitor_can_reach_async( self.as_ref().to_glib_none().0, connectable.as_ref().to_glib_none().0, cancellable.map(|p| p.as_ref()).to_glib_none().0, Some(callback), Box_::into_raw(user_data) as *mut _, ); } } fn can_reach_async_future<P: IsA<SocketConnectable> + Clone + 'static>( &self, connectable: &P, ) -> Pin<Box_<dyn std::future::Future<Output = Result<(), glib::Error>> + 'static>> { let connectable = connectable.clone(); Box_::pin(crate::GioFuture::new(self, move |obj, send| { let cancellable = Cancellable::new(); obj.can_reach_async(&connectable, Some(&cancellable), move |res| { send.resolve(res); }); cancellable })) } fn connectivity(&self) -> NetworkConnectivity { unsafe { from_glib(ffi::g_network_monitor_get_connectivity( self.as_ref().to_glib_none().0, )) } } fn is_network_available(&self) -> bool { unsafe { from_glib(ffi::g_network_monitor_get_network_available( self.as_ref().to_glib_none().0, )) } } fn is_network_metered(&self) -> bool { unsafe { from_glib(ffi::g_network_monitor_get_network_metered( self.as_ref().to_glib_none().0, )) } } #[doc(alias = "network-changed")] fn connect_network_changed<F: Fn(&Self, bool) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn network_changed_trampoline<P, F: Fn(&P, bool) + 'static>( this: *mut ffi::GNetworkMonitor, network_available: glib::ffi::gboolean, f: glib::ffi::gpointer, ) where P: IsA<NetworkMonitor>, { let f: &F = &*(f as *const F); f( &NetworkMonitor::from_glib_borrow(this).unsafe_cast_ref(), from_glib(network_available), ) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"network-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( network_changed_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "connectivity")] fn connect_connectivity_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_connectivity_trampoline<P, F: Fn(&P) + 'static>( this: *mut ffi::GNetworkMonitor, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) where P: IsA<NetworkMonitor>, { let f: &F = &*(f as *const F); f(&NetworkMonitor::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::connectivity\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_connectivity_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "network-available")] fn connect_network_available_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_network_available_trampoline<P, F: Fn(&P) + 'static>( this: *mut ffi::GNetworkMonitor, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) where P: IsA<NetworkMonitor>, { let f: &F = &*(f as *const F); f(&NetworkMonitor::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::network-available\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_network_available_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } #[doc(alias = "network-metered")] fn connect_network_metered_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_network_metered_trampoline<P, F: Fn(&P) + 'static>( this: *mut ffi::GNetworkMonitor, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) where P: IsA<NetworkMonitor>, { let f: &F = &*(f as *const F); f(&NetworkMonitor::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::network-metered\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_network_metered_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for NetworkMonitor { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("NetworkMonitor") } }
{ Ok(()) }
mod.rs
#![allow(dead_code)] use crate::vec3::Vec3; use std::sync::atomic::{AtomicU64, Ordering}; static RAY_COUNT: AtomicU64 = AtomicU64::new(0); #[derive(Debug, Copy, Clone)] pub struct Ray { orig: Vec3, dir: Vec3, } impl Ray { pub fn new(orig: Vec3, dir: Vec3) -> Self { Ray{orig: orig, dir: dir} } // Accessors pub fn origin(&self) -> Vec3 { self.orig } pub fn direction(&self) -> Vec3 { self.dir } // Operations pub fn at(&self, t: f64) -> Vec3 { RAY_COUNT.store(RAY_COUNT.load(Ordering::Relaxed)+1, Ordering::Relaxed); self.orig + (self.dir*t) } pub fn get_count() -> u64 { RAY_COUNT.load(Ordering::Relaxed) } pub fn reset_count() { RAY_COUNT.store(0, Ordering::Relaxed); } }
alter_index.go
// Copyright 2017 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. package sql import ( "context" "fmt" "github.com/gogo/protobuf/proto" "github.com/znbasedb/znbase/pkg/security/audit/server" "github.com/znbasedb/znbase/pkg/security/privilege" "github.com/znbasedb/znbase/pkg/sql/pgwire/pgcode" "github.com/znbasedb/znbase/pkg/sql/pgwire/pgerror" "github.com/znbasedb/znbase/pkg/sql/sem/tree" "github.com/znbasedb/znbase/pkg/sql/sqlbase" "github.com/znbasedb/znbase/pkg/util/timeutil" ) type alterIndexNode struct { n *tree.AlterIndex tableDesc *sqlbase.MutableTableDescriptor indexDesc *sqlbase.IndexDescriptor } // AlterIndex applies a schema change on an index. // Privileges: CREATE on table. func (p *planner) AlterIndex(ctx context.Context, n *tree.AlterIndex) (planNode, error) { tableDesc, indexDesc, err := p.getTableAndIndex(ctx, nil, n.Index, privilege.REFERENCES) if err != nil { return nil, err } // As an artifact of finding the index by name, we get a pointer to a // different copy than the one in the tableDesc. To make it easier for the // code below, get a pointer to the index descriptor that's actually in // tableDesc. indexDesc, err = tableDesc.FindIndexByID(indexDesc.ID) if err != nil { return nil, err } return &alterIndexNode{n: n, tableDesc: tableDesc, indexDesc: indexDesc}, nil } func (n *alterIndexNode) startExec(params runParams) error { // Commands can either change the descriptor directly (for // alterations that don't require a backfill) or add a mutation to // the list. descriptorChanged := false useLocateIn := false var oldLocationNums, newLocationNums int32 origNumMutations := len(n.tableDesc.Mutations) for _, cmd := range n.n.Cmds { switch t := cmd.(type) { case *tree.AlterIndexPartitionBy: if t.PartitionBy != nil && t.IsHash { return pgerror.NewError(pgcode.IndexHashPartition, "Can not append hash partition to index.") } if n.indexDesc.IsLocal { // 本地分区索引不能被显式重新分区 return pgerror.NewError(pgcode.LocalIndexAlter, "Submitted alter index partition operation is not valid for local partitioned index") } partitioning, err := CreatePartitioning( params.ctx, params.extendedEvalCtx.Settings, params.EvalContext(), n.tableDesc, n.indexDesc, t.PartitionBy, params.StatusServer()) if err != nil { return err } descriptorChanged = !proto.Equal( &n.indexDesc.Partitioning, &partitioning, ) oldLocationNums = n.indexDesc.LocationNums newLocationNums = n.indexDesc.LocationNums - n.indexDesc.Partitioning.LocationNums + partitioning.LocationNums n.indexDesc.Partitioning = partitioning if oldLocationNums > 0 || newLocationNums > 0 { useLocateIn = true } case *tree.AlterIndexLocateIn: spaceName := t.LocateSpaceName.ToValue() if err := CheckLocateSpaceNameExistICL(params.ctx, spaceName, *params.StatusServer()); err != nil { return err } var changeValue int32 descChange := false oldLocationNums = n.indexDesc.LocationNums if descChange, changeValue = changeLocationNums(spaceName, n.indexDesc.LocateSpaceName); descChange { newLocationNums = n.indexDesc.LocationNums + changeValue + 1 if spaceName != nil { n.indexDesc.LocateSpaceName = spaceName } } if descChange { descriptorChanged = desc
newLocationNums > 0 { useLocateIn = true } default: return fmt.Errorf("unsupported alter command: %T", cmd) } } if descriptorChanged { if err := updateIndexLocationNums(n.tableDesc.TableDesc(), n.indexDesc, newLocationNums); err != nil { return err } if useLocateIn { if err := params.p.LocationMapChange(params.ctx, n.tableDesc, params.extendedEvalCtx.Tables.databaseCache.systemConfig); err != nil { return err } } } if err := n.tableDesc.AllocateIDs(); err != nil { return err } addedMutations := len(n.tableDesc.Mutations) > origNumMutations mutationID := sqlbase.InvalidMutationID var err error if addedMutations { mutationID, err = params.p.createOrUpdateSchemaChangeJob(params.ctx, n.tableDesc, tree.AsStringWithFlags(n.n, tree.FmtAlwaysQualifyTableNames|tree.FmtVisableType)) } else if !descriptorChanged { // Nothing to be done return nil } if err != nil { return err } result := "OK" if err := params.p.writeSchemaChange(params.ctx, n.tableDesc, mutationID); err != nil { result = "ERROR" } // Record this index alteration in the event log. This is an auditable log // event and is recorded in the same transaction as the table descriptor // update. return params.extendedEvalCtx.ExecCfg.AuditServer.LogAudit( params.ctx, false, &server.AuditInfo{ EventTime: timeutil.Now(), EventType: string(EventLogAlterIndex), //TargetID: int32(n.tableDesc.ID), //Opt: n.n.String(), //OptTime: timeutil.SubTimes(timeutil.Now(), start), AffectedSize: 1, Result: result, Info: struct { TableName string IndexName string Statement string User string MutationID uint32 }{ n.n.Index.Table.FQString(), n.indexDesc.Name, n.n.String(), params.SessionData().User, uint32(mutationID), }, }, ) } func (n *alterIndexNode) Next(runParams) (bool, error) { return false, nil } func (n *alterIndexNode) Values() tree.Datums { return tree.Datums{} } func (n *alterIndexNode) Close(context.Context) {}
Change } if oldLocationNums > 0 ||
repeat.js
(function () { angular.module('c8y.sdk').directive('c8yRepeat', [ '$injector', '$compile', '$rootScope', c8yRepeat ]); function c8yRepeat( $injector, $compile, $rootScope ) { function createLink(clonedElement) { return function (scope, _elem, attrs) { var elem = clonedElement; var serviceName; var ngRepeatLink; var parentScope; replaceWithNgRepeat(); init(); function replaceWithNgRepeat() { var regex = /^\s*([^\s]+)\s*in\s*([^\s]+)\s*/; var matches = regex.exec(attrs.c8yRepeat); var varName = matches[1]; serviceName = matches[2]; elem.removeAttr('c8y-repeat'); elem.removeAttr('data-c8y-repeat'); elem.attr( 'ng-repeat', varName + ' in __c8y_serviceResult track by ' + varName + '.id' ); ngRepeatLink = $compile(elem); $(_elem).replaceWith(elem); } function assignRefreshFunction() { scope.refresh = fetchResults; } function fetchResults() { if ($rootScope.c8y && $rootScope.c8y.user) { callService().then(function (result) { parentScope.__c8y_serviceResult = result; }); } } function
() { var filter = scope.filter || {}; return $injector.get('c8y' + capitalize(serviceName)).list(filter); } function capitalize(s) { return s[0].toUpperCase() + s.slice(1); } function init() { parentScope = scope.$parent; ngRepeatLink(parentScope); assignRefreshFunction(); $rootScope.$on('c8y.api.login', function () { fetchResults(); }); scope.$watch('filter', fetchResults, true); } }; } return { restrict: 'A', compile: function (element) { return createLink($(element).clone()); }, transclude: false, scope: { filter: '=?', refresh: '=?' } }; } })();
callService
test_renault_vehicle.py
"""Test cases for the Renault client API keys.""" from datetime import datetime from typing import List import aiohttp import pytest from aioresponses import aioresponses from tests import get_file_content from tests.const import TEST_ACCOUNT_ID from tests.const import TEST_COUNTRY from tests.const import TEST_KAMEREON_URL from tests.const import TEST_LOCALE_DETAILS from tests.const import TEST_VIN from tests.test_credential_store import get_logged_in_credential_store from tests.test_renault_session import get_logged_in_session from renault_api.kamereon.enums import ChargeMode from renault_api.kamereon.models import ChargeSchedule from renault_api.renault_vehicle import RenaultVehicle TEST_KAMEREON_BASE_URL = f"{TEST_KAMEREON_URL}/commerce/v1" TEST_KAMEREON_ACCOUNT_URL = f"{TEST_KAMEREON_BASE_URL}/accounts/{TEST_ACCOUNT_ID}" TEST_KAMEREON_VEHICLE_URL1 = ( f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v1/cars/{TEST_VIN}" ) TEST_KAMEREON_VEHICLE_URL2 = ( f"{TEST_KAMEREON_ACCOUNT_URL}/kamereon/kca/car-adapter/v2/cars/{TEST_VIN}" ) FIXTURE_PATH = "tests/fixtures/kamereon/" QUERY_STRING = f"country={TEST_COUNTRY}" @pytest.fixture def vehicle(websession: aiohttp.ClientSession) -> RenaultVehicle: """Fixture for testing RenaultVehicle.""" return RenaultVehicle( account_id=TEST_ACCOUNT_ID, vin=TEST_VIN, session=get_logged_in_session(websession), ) def tests_init(websession: aiohttp.ClientSession) -> None: """Test RenaultVehicle initialisation.""" assert RenaultVehicle( account_id=TEST_ACCOUNT_ID, vin=TEST_VIN, session=get_logged_in_session(websession), ) assert RenaultVehicle( account_id=TEST_ACCOUNT_ID, vin=TEST_VIN, websession=websession, country=TEST_COUNTRY, locale_details=TEST_LOCALE_DETAILS, credential_store=get_logged_in_credential_store(), ) @pytest.mark.asyncio async def test_get_battery_status(vehicle: RenaultVehicle) -> None: """Test get_battery_status.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL2}/battery-status?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/battery-status.1.json"), ) assert await vehicle.get_battery_status() @pytest.mark.asyncio async def test_get_location(vehicle: RenaultVehicle) -> None: """Test get_location.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/location?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/location.json"), ) assert await vehicle.get_location() @pytest.mark.asyncio async def test_get_hvac_status(vehicle: RenaultVehicle) -> None: """Test get_hvac_status.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-status?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-status.json"), ) assert await vehicle.get_hvac_status() @pytest.mark.asyncio async def test_get_charge_mode(vehicle: RenaultVehicle) -> None: """Test get_charge_mode.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/charge-mode?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-mode.json"), ) assert await vehicle.get_charge_mode() @pytest.mark.asyncio async def test_get_cockpit(vehicle: RenaultVehicle) -> None: """Test get_cockpit.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL2}/cockpit?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/cockpit.zoe.json"), ) assert await vehicle.get_cockpit() @pytest.mark.asyncio async def test_get_lock_status(vehicle: RenaultVehicle) -> None: """Test get_lock_status.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/lock-status?{QUERY_STRING}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/lock-status.json"), ) assert await vehicle.get_lock_status() @pytest.mark.asyncio async def test_get_charging_settings(vehicle: RenaultVehicle) -> None: """Test get_charging_settings.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/charging-settings?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_data/charging-settings.json" ), ) assert await vehicle.get_charging_settings() @pytest.mark.asyncio async def test_get_notification_settings(vehicle: RenaultVehicle) -> None: """Test get_notification_settings.""" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/notification-settings?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_data/notification-settings.json" ), ) assert await vehicle.get_notification_settings() @pytest.mark.asyncio async def test_get_charge_history(vehicle: RenaultVehicle) -> None: """Test get_charge_history.""" query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/charge-history?{query_string}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charge-history.json"), ) assert await vehicle.get_charge_history( start=datetime(2020, 10, 1), end=datetime(2020, 11, 15), ) @pytest.mark.asyncio async def
(vehicle: RenaultVehicle) -> None: """Test get_charges.""" query_string = f"{QUERY_STRING}&end=20201115&start=20201001" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/charges?{query_string}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/charges.json"), ) assert await vehicle.get_charges( start=datetime(2020, 10, 1), end=datetime(2020, 11, 15), ) @pytest.mark.asyncio async def test_get_hvac_history(vehicle: RenaultVehicle) -> None: """Test get_hvac_history.""" query_string = f"{QUERY_STRING}&end=202011&start=202010&type=month" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-history?{query_string}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-history.json"), ) assert await vehicle.get_hvac_history( start=datetime(2020, 10, 1), end=datetime(2020, 11, 15), ) @pytest.mark.asyncio async def test_get_hvac_sessions(vehicle: RenaultVehicle) -> None: """Test get_hvac_sessions.""" query_string = f"{QUERY_STRING}&end=20201115&start=20201001" with aioresponses() as mocked_responses: mocked_responses.get( f"{TEST_KAMEREON_VEHICLE_URL1}/hvac-sessions?{query_string}", status=200, body=get_file_content(f"{FIXTURE_PATH}/vehicle_data/hvac-sessions.json"), ) assert await vehicle.get_hvac_sessions( start=datetime(2020, 10, 1), end=datetime(2020, 11, 15), ) @pytest.mark.asyncio async def test_set_ac_start(vehicle: RenaultVehicle) -> None: """Test set_ac_start.""" with aioresponses() as mocked_responses: mocked_responses.post( f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_action/hvac-start.start.json" ), ) assert await vehicle.set_ac_start(21, datetime(2020, 11, 24)) @pytest.mark.asyncio async def test_set_ac_stop(vehicle: RenaultVehicle) -> None: """Test set_ac_stop.""" with aioresponses() as mocked_responses: mocked_responses.post( f"{TEST_KAMEREON_VEHICLE_URL1}/actions/hvac-start?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_action/hvac-start.cancel.json" ), ) assert await vehicle.set_ac_stop() @pytest.mark.asyncio async def test_set_charge_mode(vehicle: RenaultVehicle) -> None: """Test set_charge_mode.""" with aioresponses() as mocked_responses: mocked_responses.post( f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charge-mode?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_action/charge-mode.schedule_mode.json" ), ) assert await vehicle.set_charge_mode(ChargeMode.SCHEDULE_MODE) @pytest.mark.asyncio async def test_set_charge_schedules(vehicle: RenaultVehicle) -> None: """Test set_charge_schedules.""" schedules: List[ChargeSchedule] = [] with aioresponses() as mocked_responses: mocked_responses.post( f"{TEST_KAMEREON_VEHICLE_URL2}/actions/charge-schedule?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_action/charge-schedule.schedules.json" ), ) assert await vehicle.set_charge_schedules(schedules) @pytest.mark.asyncio async def test_set_charge_start(vehicle: RenaultVehicle) -> None: """Test set_charge_start.""" with aioresponses() as mocked_responses: mocked_responses.post( f"{TEST_KAMEREON_VEHICLE_URL1}/actions/charging-start?{QUERY_STRING}", status=200, body=get_file_content( f"{FIXTURE_PATH}/vehicle_action/charging-start.start.json" ), ) assert await vehicle.set_charge_start()
test_get_charges
environment.ts
export const environment = { production: false, hmr: false, apiUrl: 'http://localhost:4000', firebaseConfig: {
apiKey: 'AIzaSyAA9x9CnEpyea8p_eb66QQPP_wlPUz8ee0', authDomain: 'chordbomb.firebaseapp.com', databaseURL: 'https://chordbomb-default-rtdb.firebaseio.com', projectId: 'chordbomb', storageBucket: 'chordbomb.appspot.com', messagingSenderId: '1037238641360', appId: '1:1037238641360:web:ad28959c41df8a021934d5', measurementId: 'G-QVNMJDZE46' }, firestoreAdminId: 'FOkwTfH6SAZwDYOwSHvjYoVqPpz2', algoliaConfig: { apiKey: '1dfb51e5d328e2efcf769a74182ebf1e', appId: 'EGCKVSN4PS' } };
printing.py
#!/usr/bin/python import sys import subprocess printers = [] def
(): global printers if not sys.platform == "linux2": return ['default'] if len(printers) > 0: return printers try: process = subprocess.Popen(["lpstat", "-a"], stdout=subprocess.PIPE) result = process.communicate()[0].strip() # KONICA_bizhub_192.168.12.10 accepting requests since Sun 16 Dec 2012 07:43:59 PM GMT print(result) printers = [x.split(' ')[0] for x in result.split('\n')] print('[print] printers=%s' % repr(printers)) except OSError as e: print('[print] %s' % repr(e)) return printers def printFile(file, printer): cmd = ["lpr","-P", printer, file] print("[print] printer=%s file=%s cmd=%s" %(printer, file, repr(cmd) )) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) results = process.communicate() results = (None,None) print("[print] printer=%s file=%s cmd=%s result=%s" %(printer, file, repr(cmd), repr(results))) if __name__=="__main__": print ('Installed printers: %s' % repr(getPrinters()))
getPrinters
astdiff.py
"""Utilities for comparing two versions of a module symbol table. The goal is to find which AST nodes have externally visible changes, so that we can fire triggers and re-process other parts of the program that are stale because of the changes. Only look at detail at definitions at the current module -- don't recurse into other modules. A summary of the module contents: * snapshot_symbol_table(...) creates an opaque snapshot description of a module/class symbol table (recursing into nested class symbol tables). * compare_symbol_table_snapshots(...) compares two snapshots for the same module id and returns fully qualified names of differences (which act as triggers). To compare two versions of a module symbol table, take snapshots of both versions and compare the snapshots. The use of snapshots makes it easy to compare two versions of the *same* symbol table that is being mutated. Summary of how this works for certain kinds of differences: * If a symbol table node is deleted or added (only present in old/new version of the symbol table), it is considered different, of course. * If a symbol table node refers to a different sort of thing in the new version, it is considered different (for example, if a class is replaced with a function). * If the signature of a function has changed, it is considered different. * If the type of a variable changes, it is considered different. * If the MRO of a class changes, or a non-generic class is turned into a generic class, the class is considered different (there are other such "big" differences that cause a class to be considered changed). However, just changes to attributes or methods don't generally constitute a difference at the class level -- these are handled at attribute level (say, 'mod.Cls.method' is different rather than 'mod.Cls' being different). * If an imported name targets a different name (say, 'from x import y' is replaced with 'from z import y'), the name in the module is considered different. If the target of an import continues to have the same name, but it's specifics change, this doesn't mean that the imported name is treated as changed. Say, there is 'from x import y' in 'm', and the type of 'x.y' has changed. This doesn't mean that that 'm.y' is considered changed. Instead, processing the difference in 'm' will be handled through fine-grained dependencies. """ from typing import Set, Dict, Tuple, Optional, Sequence, Union from mypy.nodes import ( SymbolTable, TypeInfo, Var, SymbolNode, Decorator, TypeVarExpr, TypeAlias, FuncBase, OverloadedFuncDef, FuncItem, MypyFile, UNBOUND_IMPORTED ) from mypy.types import ( Type, TypeGuardType, TypeVisitor, UnboundType, AnyType, NoneType, UninhabitedType, ErasedType, DeletedType, Instance, TypeVarType, CallableType, TupleType, TypedDictType, UnionType, Overloaded, PartialType, TypeType, LiteralType, TypeAliasType ) from mypy.util import get_prefix # Snapshot representation of a symbol table node or type. The representation is # opaque -- the only supported operations are comparing for equality and # hashing (latter for type snapshots only). Snapshots can contain primitive # objects, nested tuples, lists and dictionaries and primitive objects (type # snapshots are immutable). # # For example, the snapshot of the 'int' type is ('Instance', 'builtins.int', ()). SnapshotItem = Tuple[object, ...] def compare_symbol_table_snapshots( name_prefix: str, snapshot1: Dict[str, SnapshotItem], snapshot2: Dict[str, SnapshotItem]) -> Set[str]: """Return names that are different in two snapshots of a symbol table. Only shallow (intra-module) differences are considered. References to things defined outside the module are compared based on the name of the target only. Recurse into class symbol tables (if the class is defined in the target module). Return a set of fully-qualified names (e.g., 'mod.func' or 'mod.Class.method'). """ # Find names only defined only in one version. names1 = {'%s.%s' % (name_prefix, name) for name in snapshot1} names2 = {'%s.%s' % (name_prefix, name) for name in snapshot2} triggers = names1 ^ names2 # Look for names defined in both versions that are different. for name in set(snapshot1.keys()) & set(snapshot2.keys()): item1 = snapshot1[name] item2 = snapshot2[name] kind1 = item1[0] kind2 = item2[0] item_name = '%s.%s' % (name_prefix, name) if kind1 != kind2: # Different kind of node in two snapshots -> trivially different. triggers.add(item_name) elif kind1 == 'TypeInfo': if item1[:-1] != item2[:-1]: # Record major difference (outside class symbol tables). triggers.add(item_name) # Look for differences in nested class symbol table entries. assert isinstance(item1[-1], dict) assert isinstance(item2[-1], dict) triggers |= compare_symbol_table_snapshots(item_name, item1[-1], item2[-1]) else: # Shallow node (no interesting internal structure). Just use equality. if snapshot1[name] != snapshot2[name]: triggers.add(item_name) return triggers def snapshot_symbol_table(name_prefix: str, table: SymbolTable) -> Dict[str, SnapshotItem]: """Create a snapshot description that represents the state of a symbol table. The snapshot has a representation based on nested tuples and dicts that makes it easy and fast to find differences. Only "shallow" state is included in the snapshot -- references to things defined in other modules are represented just by the names of the targets. """ result = {} # type: Dict[str, SnapshotItem] for name, symbol in table.items(): node = symbol.node # TODO: cross_ref? fullname = node.fullname if node else None common = (fullname, symbol.kind, symbol.module_public) if isinstance(node, MypyFile): # This is a cross-reference to another module. # If the reference is busted because the other module is missing, # the node will be a "stale_info" TypeInfo produced by fixup, # but that doesn't really matter to us here. result[name] = ('Moduleref', common) elif isinstance(node, TypeVarExpr): result[name] = ('TypeVar', node.variance, [snapshot_type(value) for value in node.values], snapshot_type(node.upper_bound)) elif isinstance(node, TypeAlias): result[name] = ('TypeAlias', node.alias_tvars, node.normalized, node.no_args, snapshot_optional_type(node.target)) else: assert symbol.kind != UNBOUND_IMPORTED if node and get_prefix(node.fullname) != name_prefix: # This is a cross-reference to a node defined in another module. result[name] = ('CrossRef', common) else: result[name] = snapshot_definition(node, common) return result def snapshot_definition(node: Optional[SymbolNode], common: Tuple[object, ...]) -> Tuple[object, ...]: """Create a snapshot description of a symbol table node. The representation is nested tuples and dicts. Only externally visible attributes are included. """ if isinstance(node, FuncBase): # TODO: info if node.type: signature = snapshot_type(node.type) else: signature = snapshot_untyped_signature(node) return ('Func', common, node.is_property, node.is_final, node.is_class, node.is_static, signature) elif isinstance(node, Var):
elif isinstance(node, Decorator): # Note that decorated methods are represented by Decorator instances in # a symbol table since we need to preserve information about the # decorated function (whether it's a class function, for # example). Top-level decorated functions, however, are represented by # the corresponding Var node, since that happens to provide enough # context. return ('Decorator', node.is_overload, snapshot_optional_type(node.var.type), snapshot_definition(node.func, common)) elif isinstance(node, TypeInfo): attrs = (node.is_abstract, node.is_enum, node.is_protocol, node.fallback_to_any, node.is_named_tuple, node.is_newtype, # We need this to e.g. trigger metaclass calculation in subclasses. snapshot_optional_type(node.metaclass_type), snapshot_optional_type(node.tuple_type), snapshot_optional_type(node.typeddict_type), [base.fullname for base in node.mro], # Note that the structure of type variables is a part of the external interface, # since creating instances might fail, for example: # T = TypeVar('T', bound=int) # class C(Generic[T]): # ... # x: C[str] <- this is invalid, and needs to be re-checked if `T` changes. # An alternative would be to create both deps: <...> -> C, and <...> -> <C>, # but this currently seems a bit ad hoc. tuple(snapshot_type(TypeVarType(tdef)) for tdef in node.defn.type_vars), [snapshot_type(base) for base in node.bases], snapshot_optional_type(node._promote)) prefix = node.fullname symbol_table = snapshot_symbol_table(prefix, node.names) # Special dependency for abstract attribute handling. symbol_table['(abstract)'] = ('Abstract', tuple(sorted(node.abstract_attributes))) return ('TypeInfo', common, attrs, symbol_table) else: # Other node types are handled elsewhere. assert False, type(node) def snapshot_type(typ: Type) -> SnapshotItem: """Create a snapshot representation of a type using nested tuples.""" return typ.accept(SnapshotTypeVisitor()) def snapshot_optional_type(typ: Optional[Type]) -> Optional[SnapshotItem]: if typ: return snapshot_type(typ) else: return None def snapshot_types(types: Sequence[Type]) -> SnapshotItem: return tuple(snapshot_type(item) for item in types) def snapshot_simple_type(typ: Type) -> SnapshotItem: return (type(typ).__name__,) def encode_optional_str(s: Optional[str]) -> str: if s is None: return '<None>' else: return s class SnapshotTypeVisitor(TypeVisitor[SnapshotItem]): """Creates a read-only, self-contained snapshot of a type object. Properties of a snapshot: - Contains (nested) tuples and other immutable primitive objects only. - References to AST nodes are replaced with full names of targets. - Has no references to mutable or non-primitive objects. - Two snapshots represent the same object if and only if they are equal. - Results must be sortable. It's important that tuples have consistent types and can't arbitrarily mix str and None values, for example, since they can't be compared. """ def visit_unbound_type(self, typ: UnboundType) -> SnapshotItem: return ('UnboundType', typ.name, typ.optional, typ.empty_tuple_index, snapshot_types(typ.args)) def visit_any(self, typ: AnyType) -> SnapshotItem: return snapshot_simple_type(typ) def visit_none_type(self, typ: NoneType) -> SnapshotItem: return snapshot_simple_type(typ) def visit_uninhabited_type(self, typ: UninhabitedType) -> SnapshotItem: return snapshot_simple_type(typ) def visit_erased_type(self, typ: ErasedType) -> SnapshotItem: return snapshot_simple_type(typ) def visit_deleted_type(self, typ: DeletedType) -> SnapshotItem: return snapshot_simple_type(typ) def visit_instance(self, typ: Instance) -> SnapshotItem: return ('Instance', encode_optional_str(typ.type.fullname), snapshot_types(typ.args), ('None',) if typ.last_known_value is None else snapshot_type(typ.last_known_value)) def visit_type_var(self, typ: TypeVarType) -> SnapshotItem: return ('TypeVar', typ.name, typ.fullname, typ.id.raw_id, typ.id.meta_level, snapshot_types(typ.values), snapshot_type(typ.upper_bound), typ.variance) def visit_callable_type(self, typ: CallableType) -> SnapshotItem: # FIX generics return ('CallableType', snapshot_types(typ.arg_types), snapshot_type(typ.ret_type), tuple([encode_optional_str(name) for name in typ.arg_names]), tuple(typ.arg_kinds), typ.is_type_obj(), typ.is_ellipsis_args) def visit_tuple_type(self, typ: TupleType) -> SnapshotItem: return ('TupleType', snapshot_types(typ.items)) def visit_typeddict_type(self, typ: TypedDictType) -> SnapshotItem: items = tuple((key, snapshot_type(item_type)) for key, item_type in typ.items.items()) required = tuple(sorted(typ.required_keys)) return ('TypedDictType', items, required) def visit_literal_type(self, typ: LiteralType) -> SnapshotItem: return ('LiteralType', snapshot_type(typ.fallback), typ.value) def visit_union_type(self, typ: UnionType) -> SnapshotItem: # Sort and remove duplicates so that we can use equality to test for # equivalent union type snapshots. items = {snapshot_type(item) for item in typ.items} normalized = tuple(sorted(items)) return ('UnionType', normalized) def visit_type_guard_type(self, typ: TypeGuardType) -> SnapshotItem: return ('TypeGuardType', snapshot_type(typ.type_guard)) def visit_overloaded(self, typ: Overloaded) -> SnapshotItem: return ('Overloaded', snapshot_types(typ.items())) def visit_partial_type(self, typ: PartialType) -> SnapshotItem: # A partial type is not fully defined, so the result is indeterminate. We shouldn't # get here. raise RuntimeError def visit_type_type(self, typ: TypeType) -> SnapshotItem: return ('TypeType', snapshot_type(typ.item)) def visit_type_alias_type(self, typ: TypeAliasType) -> SnapshotItem: assert typ.alias is not None return ('TypeAliasType', typ.alias.fullname, snapshot_types(typ.args)) def snapshot_untyped_signature(func: Union[OverloadedFuncDef, FuncItem]) -> Tuple[object, ...]: """Create a snapshot of the signature of a function that has no explicit signature. If the arguments to a function without signature change, it must be considered as different. We have this special casing since we don't store the implicit signature anywhere, and we'd rather not construct new Callable objects in this module (the idea is to only read properties of the AST here). """ if isinstance(func, FuncItem): return (tuple(func.arg_names), tuple(func.arg_kinds)) else: result = [] for item in func.items: if isinstance(item, Decorator): if item.var.type: result.append(snapshot_type(item.var.type)) else: result.append(('DecoratorWithoutType',)) else: result.append(snapshot_untyped_signature(item)) return tuple(result)
return ('Var', common, snapshot_optional_type(node.type), node.is_final)
browser.d.ts
/// <reference path="browser/ambient/jquery/index.d.ts" /> /// <reference path="browser/ambient/mithril/index.d.ts" /> /// <reference path="browser/ambient/moment-range/index.d.ts" /> /// <reference path="browser/ambient/mousetrap/index.d.ts" /> /// <reference path="browser/ambient/semantic-ui/index.d.ts" /> /// <reference path="browser/definitions/d3/index.d.ts" /> /// <reference path="browser/definitions/moment/index.d.ts" />
whitelist.go
package comet import ( "log" "os" "github.com/jank1369/goim/internal/comet/conf" ) var whitelist *Whitelist // Whitelist . type Whitelist struct { log *log.Logger list map[int64]struct{} // whitelist for debug } // InitWhitelist a whitelist struct. func InitWhitelist(c *conf.Whitelist) (err error) { var ( mid int64 f *os.File ) if f, err = os.OpenFile(c.WhiteLog, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0644); err == nil
return } // Contains whitelist contains a mid or not. func (w *Whitelist) Contains(mid int64) (ok bool) { if mid > 0 { _, ok = w.list[mid] } return } // Printf calls l.Output to print to the logger. func (w *Whitelist) Printf(format string, v ...interface{}) { w.log.Printf(format, v...) }
{ whitelist = new(Whitelist) whitelist.log = log.New(f, "", log.LstdFlags) whitelist.list = make(map[int64]struct{}) for _, mid = range c.Whitelist { whitelist.list[mid] = struct{}{} } }
my-name.d.ts
export declare class
{ render(): any; log(event: any): void; }
MyName
gen.go
/************************************************************************* * Copyright 2018 Gravwell, Inc. All rights reserved. * Contact: <[email protected]> * * This software may be modified and distributed under the terms of the * BSD 2-clause license. See the LICENSE file for details. **************************************************************************/ package main import ( "fmt" "log" "math/rand" "time" rd "github.com/Pallinder/go-randomdata" "github.com/google/uuid" "github.com/gravwell/gravwell/v3/generators/ipgen" "github.com/gravwell/gravwell/v3/ingest" "github.com/gravwell/gravwell/v3/ingest/entry" ) const ( streamBlock = 10 ) var ( v4gen *ipgen.V4Gen v6gen *ipgen.V6Gen ) func init() {
var err error v4gen, err = ipgen.RandomWeightedV4Generator(3) if err != nil { log.Fatal("Failed to instantiate v4 generator: %v", err) } v6gen, err = ipgen.RandomWeightedV6Generator(30) if err != nil { log.Fatal("Failed to instantiate v6 generator: %v", err) } } func throw(igst *ingest.IngestMuxer, tag entry.EntryTag, cnt uint64, dur time.Duration) (err error) { sp := dur / time.Duration(cnt) ts := time.Now().Add(-1 * dur) for i := uint64(0); i < cnt; i++ { dt := genData(ts) if err = igst.WriteEntry(&entry.Entry{ TS: entry.FromStandard(ts), Tag: tag, SRC: src, Data: dt, }); err != nil { return } ts = ts.Add(sp) totalBytes += uint64(len(dt)) totalCount++ } return } func stream(igst *ingest.IngestMuxer, tag entry.EntryTag, cnt uint64, stop *bool) (err error) { sp := time.Second / time.Duration(cnt) var ent *entry.Entry loop: for !*stop { ts := time.Now() start := ts for i := uint64(0); i < cnt; i++ { dt := genData(ts) ent = &entry.Entry{ TS: entry.FromStandard(ts), Tag: tag, SRC: src, Data: dt, } if err = igst.WriteEntry(ent); err != nil { break loop } totalBytes += uint64(len(dt)) totalCount++ ts = ts.Add(sp) } time.Sleep(time.Second - time.Since(start)) } return } func genData(ts time.Time) []byte { ipa, ipb := ips() return []byte(fmt.Sprintf("%s,%s,%d,%s,"+ "%s,%d,%s,%d,"+ "\"%s\n%s\", \"%s\",%s,%x", ts.Format(tsFormat), getApp(), rand.Intn(0xffff), uuid.New(), ipa, 2048+rand.Intn(0xffff-2048), ipb, 1+rand.Intn(2047), rd.Paragraph(), rd.FirstName(rd.RandomGender), rd.Country(rd.TwoCharCountry), rd.City(), []byte(v6gen.IP()))) } func ips() (string, string) { if (rand.Int() & 3) == 0 { //more IPv4 than 6 return v6gen.IP().String(), v6gen.IP().String() } return v4gen.IP().String(), v4gen.IP().String() }
TokenizerTestParallel.rs
tokenizertestParallel.TokenizerTestParallel
PrivateRoute.tsx
import React from 'react' import { Redirect, Route } from 'react-router-dom' import { getToken } from '../../lib/token' import PATH_CODES from '../pathnames' import StandardLayoutRoute from '../StandardLayoutRoute/StandardLayoutRoute' type Props = { id: string | undefined history: any | undefined location: any | undefined component: any path: string | undefined exact?: any } function PrivateRoute(props: Props) { // eslint-disable-next-line @typescript-eslint/no-unused-vars const { component: Component, id, ...rest } = props // Todo: Add real token validation const token = getToken() return ( <Route id={id} {...rest} render={() => token ? ( <StandardLayoutRoute {...props} />
/> ) } export default PrivateRoute
) : ( <Redirect to={{ pathname: PATH_CODES.LOGIN, state: { referer: props.location } }} /> ) }
converters.ts
import { Where, WhereField } from '@via-profit-services/core'; import { ConvertOrderByToKnex, ConvertJsonToKnex, ConvertBetweenToKnex, ConvertWhereToKnex, ConvertSearchToKnex, ApplyAliases, } from '@via-profit-services/knex'; import { DEFAULT_TIMEZONE } from '../constants'; export const applyAliases: ApplyAliases = (whereClause, aliases) => { const aliasesMap = new Map<string, string>(); Object.entries(aliases).forEach(([tableName, field]) => { const fieldsArray = Array.isArray(field) ? field : [field]; fieldsArray.forEach(fieldName => { aliasesMap.set(fieldName, tableName); }); }); const newWhere = whereClause.map(data => { const [field, action, value] = data; const alias = aliasesMap.get(field) || aliasesMap.get('*'); const whereField: WhereField = [ alias && alias !== 'none' ? `${alias}.${field}` : field, action, value, ]; return whereField; }); return newWhere; }; /** * Convert GraphQL OrderBy array to Knex OrderBy array format */ export const convertOrderByToKnex: ConvertOrderByToKnex = (orderBy, aliases) => { const orderByArray = [...(orderBy || [])]; const aliasesMap = new Map<string, string>(); Object.entries(aliases || {}).forEach(([tableName, field]) => { const fieldsArray = Array.isArray(field) ? field : [field]; fieldsArray.forEach(fieldName => { aliasesMap.set(fieldName, tableName); }); }); return orderByArray.map(({ field, direction }) => { const alias = aliasesMap.get(field) || aliasesMap.get('*'); return { column: alias && alias !== 'none' ? `${alias}.${field}` : field, order: direction, }; }); }; export const convertJsonToKnex: ConvertJsonToKnex = (knex, data) => { try { const str = typeof data === 'string' ? data : JSON.stringify(data); return knex.raw('?::jsonb', [str]); } catch (err) { throw new Error('Json field convertation failure. Check the «convertJsonToKnex» passed params'); } }; export const convertBetweenToKnex: ConvertBetweenToKnex = (builder, between, options) => { const { aliases } = options || { aliases: {}, timezone: DEFAULT_TIMEZONE, }; if (typeof between === 'undefined') { return builder; } const aliasesMap = new Map<string, string>(); Object.entries(aliases || {}).forEach(([tableName, field]) => { const fieldsArray = Array.isArray(field) ? field : [field]; fieldsArray.forEach(fieldName => { aliasesMap.set(fieldName, tableName); }); }); Object.entries(between).forEach(([field, betweenData]) => { const alias = aliasesMap.get(field) || aliasesMap.get('*'); builder.whereBetween(alias && alias !== 'none' ? `${alias}.${field}` : field, [ betweenData.start instanceof Date ? new Date(betweenData.start).toUTCString() : betweenData.start, betweenData.end instanceof Date ? new Date(betweenData.end).toUTCString() : betweenData.end, ]); }); return builder; }; export const convertWhereToKnex: ConvertWhereToKnex = (builder, whereClause, aliases) => { if (typeof whereClause === 'undefined') { return builder; } const whereArray: Where = []; // if is an array if (Array.isArray(whereClause)) { whereClause.forEach(([field, action, value]) => { whereArray.push([field, action, value]); }); } if (!Array.isArray(whereClause)) { Object.entries(whereClause).forEach(([field, value]) => { whereArray.push([field, '=', value]);
}); } [...(aliases ? applyAliases(whereArray, aliases) : whereArray)].forEach( ([field, action, value]) => { switch (true) { case action === 'in': builder.whereIn( field, Array.isArray(value) ? value : ([value] as Array<string | number>), ); break; case action === 'notIn': builder.whereNotIn( field, Array.isArray(value) ? value : ([value] as Array<string | number>), ); break; case action === 'is null': builder.whereNull(field); break; case action === 'is not null': builder.whereNotNull(field); break; default: builder.where(field, action, value as string | number | boolean | null); break; } }, ); return builder; }; export const convertSearchToKnex: ConvertSearchToKnex = (builder, search, aliases, options) => { if (!search || !search.length) { return builder; } const splitWords = typeof options?.splitWords === 'undefined' ? false : Boolean(options?.splitWords); const strategy = typeof options?.strategy === 'undefined' ? 'from-start' : options.strategy; try { const searchFields: Record<string, string[]> = {}; const aliasesMap = new Map<string, string>(); // fill aliasesMap Object.entries(aliases || {}).forEach(([tableName, field]) => { const fieldsArray = Array.isArray(field) ? field : [field]; fieldsArray.forEach(fieldName => { aliasesMap.set(fieldName, tableName); }); }); // Group search queries by field name search.forEach(({ field, query }) => { const data = searchFields[field] || []; const queries = splitWords ? query.trim().split(' ') : [query.trim()]; searchFields[field] = data.concat(queries); }); Object.entries(searchFields).forEach(([field, queries]) => { builder.andWhere(andWhereBuilder => { queries.forEach(query => { const alias = aliasesMap.get(field) || aliasesMap.get('*'); const column = alias && alias !== 'none' ? `${alias}.${field}` : field; switch (strategy) { case 'to-end': andWhereBuilder.orWhereRaw('??::text ilike ?', [column, `%${query}`]); break; case 'explicit': andWhereBuilder.orWhereRaw('??::text ilike ?', [column, query]); break; case 'blurry': andWhereBuilder.orWhereRaw('??::text ilike ?', [column, `%${query}%`]); break; case 'from-start': default: andWhereBuilder.orWhereRaw('??::text ilike ?', [column, `${query}%`]); break; } }); return andWhereBuilder; }); }); return builder; } catch (err) { throw new Error( 'Search field convertation failure. Check the «convertSearchToKnex» passed params', ); } };
b_aoc10.py
#!/usr/bin/env python data_in = '../inputs/set01/aoc10.in' def load(file): with open(file) as x: output = x.read() return output.replace('\n', '') def look_say(start):
def solve(data): start = data for x in xrange(50): start = look_say(start) if x == 39: part1 = len(start) print "Pt1:{}\nPt2:{}".format(part1, len(start)) solve(load(data_in))
(current, counter) = ('', 0) new = '' for digit in start: if current != digit: if current: new = '{}{}{}'.format(new, counter, current) current = digit counter = 1 else: counter += 1 return '{}{}{}'.format(new, counter, current)
mast.py
# -*- coding: utf-8 -*- """ Adapters for the field names/types returned by the MAST API. """ from __future__ import (division, print_function, absolute_import, unicode_literals) __all__ = ["koi_adapter", "planet_adapter", "star_adapter", "dataset_adapter", "epic_adapter"] import logging import six try: unicode except NameError: unicode = str class Adapter(object): """ An :class:`Adapter` is a callable that maps a dictionary to another dictionary with different keys and specified data types. Missing/invalid values will be mapped to ``None``. :param parameters: A dictionary of mappers. The keys should be the keys that will be in the input dictionary and the values should be 2-tuples with the output key and the callable type converter. """ def __init__(self, parameters): self._parameters = parameters # Add some general purpose parameters. self._parameters["Ang Sep (')"] = ("angular_separation", float) def __call__(self, row): row = dict(row) final = {} for longname, (shortname, conv) in self._parameters.items(): try: final[shortname] = conv(row.pop(longname, None)) except (ValueError, TypeError): final[shortname] = None for k in row: logging.warn("Unrecognized parameter: '{0}'".format(k)) return final koi_adapter = Adapter({ "Kepler ID": ("kepid", int), "KOI Name": ("kepoi_name", six.text_type), "KOI Number": ("kepoi", six.text_type), "Kepler Disposition": ("koi_pdisposition", six.text_type), "NExScI Disposition": ("koi_disposition", six.text_type), "RA (J2000)": ("degree_ra", float), "Dec (J2000)": ("degree_dec", float), "Time of Transit Epoch": ("koi_time0bk", float), "Time err1": ("koi_time0bk_err1", float), "Time_err2": ("koi_time0bk_err2", float), "Period": ("koi_period", float), "Period err1": ("koi_period_err1", float), "Period err2": ("koi_period_err2", float), "Transit Depth": ("koi_depth", float), "Depth err1": ("koi_depth_err1", float), "Depth err2": ("koi_depth_err2", float), "Duration": ("koi_duration", float), "Duration err1": ("koi_duration_err1", float), "Duration err2": ("koi_duration_err2", float), "Ingress Duration": ("koi_ingress", float), "Ingress err1": ("koi_ingress_err1", float), "Ingress err2": ("koi_ingress_err2", float), "Impact Parameter": ("koi_impact", float), "Impact Parameter err1": ("koi_impact_err1", float), "Impact Parameter err2": ("koi_impact_err2", float), "Inclination": ("koi_incl", float), "Inclination err1": ("koi_incl_err1", float), "Inclination err2": ("koi_incl_err2", float), "Semi-major Axis": ("koi_sma", float), "Semi-major Axus err1": ("koi_sma_err1", float), "Semi-major Axis err2": ("koi_sma_err2", float), "Eccentricity": ("koi_eccen", float), "Eccentricity err1": ("koi_eccen_err1", float), "Eccentricity err2": ("koi_eccen_err2", float), "Long of Periastron": ("koi_longp", float), "Long err1": ("koi_longp_err1", float), "Long err2": ("koi_longp_err2", float), "r/R": ("koi_ror", float), "r/R err1": ("koi_ror_err1", float), "r/R err2": ("koi_ror_err2", float), "a/R": ("koi_dor", float), "a/R err1": ("koi_dor_err1", float), "a/R err2": ("koi_dor_err2", float), "Planet Radius": ("koi_prad", float), "Planet Radius err1": ("koi_prad_err1", float), "Planet Radius err2": ("koi_prad_err2", float), "Teq": ("koi_teq", int), "Teq err1": ("koi_teq_err1", int), "Teq err2": ("koi_teq_err2", int), "Teff": ("koi_steff", int), "Teff err1": ("koi_steff_err1", int), "Teff err2": ("koi_steff_err2", int), "log(g)": ("koi_slogg", float), "log(g) err1": ("koi_slogg_err1", float), "log(g) err2": ("koi_slogg_err2", float), "Metallicity": ("koi_smet", float), "Metallicity err1": ("koi_smet_err1", float), "Metallicity err2": ("koi_smet_err2", float), "Stellar Radius": ("koi_srad", float), "Stellar Radius err1": ("koi_srad_err1", float), "Stellar Radius err2": ("koi_srad_err2", float), "Stellar Mass": ("koi_smass", float), "Stellar Mass err2": ("koi_smass_err2", float), "Stellar Mass err1": ("koi_smass_err1", float), "Age": ("koi_sage", float), "Age err1": ("koi_sage_err1", float), "Age err2": ("koi_sage_err2", float), "Provenance": ("koi_sparprov", six.text_type), "Quarters": ("koi_quarters", six.text_type), "Limb Darkening Model": ("koi_limbdark_mod", six.text_type), "Limb Darkening Coeff1": ("koi_ldm_coeff1", float), "Limb Darkening Coeff2": ("koi_ldm_coeff2", float), "Limb Darkening Coeff3": ("koi_ldm_coeff3", float), "Limb Darkening Coeff4": ("koi_ldm_coeff4", float), "Transit Number": ("koi_num_transits", int), "Max single event sigma": ("koi_max_sngle_ev", float), "Max Multievent sigma": ("koi_max_mult_ev", float), "KOI count": ("koi_count", int), "Binary Discrimination": ("koi_bin_oedp_sig", float), "False Positive Bkgnd ID": ("koi_fp_bkgid", six.text_type), "J-band diff": ("koi_fp_djmag", six.text_type), "Comments": ("koi_comment", six.text_type), "Transit Model": ("koi_trans_mod", six.text_type), "Transit Model SNR": ("koi_model_snr", float), "Transit Model DOF": ("koi_model_dof", float), "Transit Model chisq": ("koi_model_chisq", float), "FWM motion signif.": ("koi_fwm_stat_sig", float), "gmag": ("koi_gmag", float), "gmag err": ("koi_gmag_err", float), "rmag": ("koi_rmag", float), "rmag err": ("koi_rmag_err", float), "imag": ("koi_imag", float), "imag err": ("koi_imag_err", float), "zmag": ("koi_zmag", float), "zmag err": ("koi_zmag_err", float), "Jmag": ("koi_jmag", float), "Jmag err": ("koi_jmag_err", float), "Hmag": ("koi_hmag", float), "Hmag err": ("koi_hmag_err", float), "Kmag": ("koi_kmag", float), "Kmag err": ("koi_kmag_err", float), "kepmag": ("koi_kepmag", float), "kepmag err": ("koi_kepmag_err", float), "Delivery Name": ("koi_delivname", six.text_type), "FWM SRA": ("koi_fwm_sra", float), "FWM SRA err": ("koi_fwm_sra_err", float), "FWM SDec": ("koi_fwm_sdec", float), "FWM SDec err": ("koi_fwm_sdec_err", float), "FWM SRAO": ("koi_fwm_srao", float), "FWM SRAO err": ("koi_fwm_srao_err", float), "FWM SDeco": ("koi_fwm_sdeco", float), "FWM SDeco err": ("koi_fwm_sdeco_err", float), "FWM PRAO": ("koi_fwm_prao", float), "FWM PRAO err": ("koi_fwm_prao_err", float), "FWM PDeco": ("koi_fwm_pdeco", float), "FWM PDeco err": ("koi_fwm_pdeco_err", float), "Dicco MRA": ("koi_dicco_mra", float), "Dicco MRA err": ("koi_dicco_mra_err", float), "Dicco MDec": ("koi_dicco_mdec", float), "Dicco MDec err": ("koi_dicco_mdec_err", float), "Dicco MSky": ("koi_dicco_msky", float), "Dicco MSky err": ("koi_dicco_msky_err", float), "Dicco FRA": ("koi_dicco_fra", float), "Dicco FRA err": ("koi_dicco_fra_err", float), "Dicco FDec": ("koi_dicco_fdec", float), "Dicco FDec err": ("koi_dicco_fdec_err", float), "Dicco FSky": ("koi_dicco_fsky", float), "Dicco FSky err": ("koi_dicco_fsky_err", float), "Dikco MRA": ("koi_dikco_mra", float), "Dikco MRA err": ("koi_dikco_mra_err", float), "Dikco MDec": ("koi_dikco_mdec", float), "Dikco MDec err": ("koi_dikco_mdec_err", float), "Dikco MSky": ("koi_dikco_msky", float), "Dikco MSky err": ("koi_dikco_msky_err", float), "Dikco FRA": ("koi_dikco_fra", float), "Dikco FRA err": ("koi_dikco_fra_err", float), "Dikco FDec": ("koi_dikco_fdec", float), "Dikco FDec err": ("koi_dikco_fdec_err", float), "Dikco FSky": ("koi_dikco_fsky", float), "Dikco FSky err": ("koi_dikco_fsky_err", float), "Last Update": ("rowupdate", six.text_type), }) planet_adapter = Adapter({ "Planet Name": ("kepler_name", six.text_type), "Kepler ID": ("kepid", int), "KOI Name": ("kepoi_name", six.text_type), "Alt Name": ("alt_name", six.text_type), "KOI Number": ("koi_number", six.text_type), # Just `koi` in API. "RA (J2000)": ("degree_ra", float), "RA Error": ("ra_err", float), "Dec (J2000)": ("degree_dec", float), "Dec Error": ("dec_err", float), "2mass Name": ("tm_designation", six.text_type), "Planet temp": ("koi_teq", int), "Planet Radius": ("koi_prad", float), "Transit duration": ("koi_duration", float), "Period": ("koi_period", float), "Period err1": ("koi_period_err1", float), "Ingress Duration": ("koi_ingress", float), "Impact Parameter": ("koi_impact", float), "Inclination": ("koi_incl", float), "Provenance": ("koi_sparprov", six.text_type), "a/R": ("koi_dor", float), "Transit Number": ("koi_num_transits", int), "Transit Model": ("koi_trans_mod", six.text_type), "Time of transit": ("koi_time0bk", float), "Time of transit err1": ("koi_time0bk_err1", float), "Transit Depth": ("koi_depth", float), "Semi-major Axis": ("koi_sma", float), "r/R": ("koi_ror", float), "r/R err1": ("koi_ror_err1", float), "Age": ("koi_sage", float), "Metallicity": ("koi_smet", float), "Stellar Mass": ("koi_smass", float), "Stellar Radius": ("koi_srad", float), "Stellar Teff": ("koi_steff", int), "Logg": ("koi_slogg", float), "KEP Mag": ("koi_kepmag", float), "g Mag": ("koi_gmag", float), "r Mag": ("koi_rmag", float), "i Mag": ("koi_imag", float), "z Mag": ("koi_zmag", float), "J Mag": ("koi_jmag", float), "H Mag": ("koi_hmag", float), "K Mag": ("koi_kmag", float), "KOI List": ("koi_list_flag", six.text_type), "Last Update": ("koi_vet_date", six.text_type), }) star_adapter = Adapter({ "Kepler ID": ("kic_kepler_id", int), "RA (J2000)": ("kic_degree_ra", float), "Dec (J2000)": ("kic_dec", float), "RA PM (arcsec/yr)": ("kic_pmra", float), "Dec PM (arcsec/yr)": ("kic_pmdec", float), "u Mag": ("kic_umag", float), "g Mag": ("kic_gmag", float), "r Mag": ("kic_rmag", float), "i Mag": ("kic_imag", float), "z Mag": ("kic_zmag", float), "Gred Mag": ("kic_gredmag", float),
"D51 Mag": ("kic_d51mag", float), "J Mag": ("kic_jmag", float), "H Mag": ("kic_hmag", float), "K Mag": ("kic_kmag", float), "Kepler Mag": ("kic_kepmag", float), "2MASS ID": ("kic_2mass_id", six.text_type), "2MASS Designation": ("kic_tmid", int), "SCP ID": ("kic_scpid", int), "Alt ID": ("kic_altid", int), "Alt ID Source": ("kic_altsource", int), "Star/Gal ID": ("kic_galaxy", int), "Isolated/Blend ID": ("kic_blend", int), "Var. ID": ("kic_variable", int), "Teff (deg K)": ("kic_teff", int), "Log G (cm/s/s)": ("kic_logg", float), "Metallicity (solar=0.0)": ("kic_feh", float), "E(B-V)": ("kic_ebminusv", float), "A_V": ("kic_av", float), "Radius (solar=1.0)": ("kic_radius", float), "Kepmag Source": ("kic_cq", six.text_type), "Photometry Qual": ("kic_pq", int), "Astrophysics Qual": ("kic_aq", int), "Catalog key": ("kic_catkey", int), "Scp Key": ("kic_scpkey", int), "Parallax (arcsec)": ("kic_parallax", float), "Gal Lon (deg)": ("kic_glon", float), "Gal Lat (deg)": ("kic_glat", float), "Total PM (arcsec/yr)": ("kic_pmtotal", float), "g-r color": ("kic_grcolor", float), "J-K color": ("kic_jkcolor", float), "g-K color": ("kic_gkcolor", float), "RA hours (J2000)": ("kic_ra", float), "Flag": ("flag", int), }) dataset_adapter = Adapter({ "Kepler ID": ("ktc_kepler_id", int), "Investigation ID": ("ktc_investigation_id", six.text_type), "Pep ID": ("sci_pep_id", int), "Dataset Name": ("sci_data_set_name", six.text_type), "Quarter": ("sci_data_quarter", int), "Data Release": ("sci_data_rel", int), "RA (J2000)": ("sci_ra", float), "Dec (J2000)": ("sci_dec", float), "Target Type": ("ktc_target_type", six.text_type), "Archive Class": ("sci_archive_class", six.text_type), "Ref": ("refnum", int), "Actual Start Time": ("sci_start_time", six.text_type), "Actual End Time": ("sci_end_time", six.text_type), "Release Date": ("sci_release_date", six.text_type), "RA PM": ("kic_pmra", float), "Dec PM": ("kic_pmdec", float), "U Mag": ("kic_umag", float), "G Mag": ("kic_gmag", float), "R Mag": ("kic_rmag", float), "I Mag": ("kic_imag", float), "Z Mag": ("kic_zmag", float), "GRed Mag": ("kic_gredmag", float), "D51 Mag": ("kic_d51mag", float), "J Mag": ("twoMass_jmag", float), "H Mag": ("twoMass_hmag", float), "K Mag": ("twoMass_kmag", float), "KEP Mag": ("kic_kepmag", float), "2MASS ID": ("twoMass_2mass_id", six.text_type), "2MASS Designation": ("twoMass_tmid", int), "2MASS conflict flag": ("twoMass_conflictFlag", six.text_type), "SCP ID": ("kic_scpid", int), "Alt ID": ("kic_altid", int), "Alt ID Source": ("kic_altsource", int), "Star/Gal ID": ("kic_galaxy", int), "Isolated/Blend ID": ("kic_blend", int), "Var. ID": ("kic_variable", int), "Teff": ("kic_teff", int), "Log G": ("kic_logg", float), "Metallicity": ("kic_feh", float), "E(B-V)": ("kic_ebminusv", float), "A_V": ("kic_av", float), "Radius": ("kic_radius", float), "Kepmag Source": ("kic_cq", six.text_type), "Photometry Qual": ("kic_pq", int), "Astrophysics Qual": ("kic_aq", int), "Catalog key": ("kic_catkey", int), "Scp Key": ("kic_scpkey", int), "Parallax": ("kic_parallax", float), "Gal Lon": ("kic_glon", float), "Gal Lat": ("kic_glat", float), "Total PM": ("kic_pmtotal", float), "G-R color": ("kic_grcolor", float), "J-K color": ("twoMass_jkcolor", float), "G-K color": ("twoMass_gkcolor", float), "Processing Date": ("sci_generation_date", six.text_type), "crowding": ("sci_crowdsap", float), "contamination": ("sci_contamination", float), "flux fraction": ("sci_flfrcsap", float), "cdpp3": ("sci_Cdpp3_0", float), "cdpp6": ("sci_Cdpp6_0", float), "cdpp12": ("sci_Cdpp12_0", float), "Module": ("sci_module", int), "Output": ("sci_output", int), "Channel": ("sci_channel", int), "Skygroup_ID": ("sci_skygroup_id", int), "Condition flag": ("condition_flag", six.text_type), }) epic_adapter = Adapter({ "EPIC": ("id", int), "RA": ("k2_ra", float), "Dec": ("k2_dec", float), "KepMag": ("kp", float), "HIP": ("hip", int), "TYC": ("tyc", six.text_type), "UCAC": ("ucac", six.text_type), "2MASS": ("twomass", six.text_type), "SDSS": ("sdss", six.text_type), "Object type": ("objtype", six.text_type), "Kepflag": ("kepflag", six.text_type), "pmra": ("pmra", float), "e_pmra": ("e_pmra", float), "pmdec": ("pmdec", float), "e_pmdec": ("e_pmdec", float), "plx": ("plx", float), "e_plx": ("e_plx", float), "Bmag": ("bmag", float), "e_Bmag": ("e_bmag", float), "Vmag": ("vmag", float), "e_Vmag": ("e_vmag", float), "umag": ("umag", float), "e_umag": ("e_umag", float), "gmag": ("gmag", float), "e_gmag": ("e_gmag", float), "rmag": ("rmag", float), "e_rmag": ("e_rmag", float), "imag": ("imag", float), "e_imag": ("e_imag", float), "zmag": ("zmag", float), "e_zmag": ("e_zmag", float), "Jmag": ("jmag", float), "e_Jmag": ("e_jmag", float), "Hmag": ("hmag", float), "e_Hmag": ("e_hmag", float), "Kmag": ("kmag", float), "e_Kmag": ("e_kmag", float), "w1mag": ("w1mag", float), "e_w1mag": ("e_w1mag", float), "w2mag": ("w2mag", float), "e_w2mag": ("e_w2mag", float), "w3mag": ("w3mag", float), "e_w3mag": ("e_w3mag", float), "w4mag": ("w4mag", float), "e_w4mag": ("e_w4mag", float), "Teff": ("teff", float), "e_teff": ("e_teff", float), "logg": ("logg", float), "e_logg": ("e_logg", float), "[Fe/H]": ("feh", float), "e_[Fe/H]": ("e_feh", float), "Radius": ("rad", float), "e_rad": ("e_rad", float), "mass": ("mass", float), "e_mass": ("e_mass", float), "rho": ("rho", float), "e_rho": ("e_rho", float), "lum": ("lum", float), "e_lum": ("e_lum", float), "Distance": ("d", float), "e_d": ("e_d", float), "E(B-V)": ("ebv", float), "2MASS Flag": ("mflg", six.text_type), "Nearest Neighbor": ("prox", float), "Nomad ID": ("nomad", six.text_type), }) k2_dataset_adapter = Adapter({ "K2 ID": ("ktc_k2_id", int), "Dataset Name": ("sci_data_set_name", six.text_type), "Campaign": ("sci_campaign", int), "Object type": ("objtype", six.text_type), "Data Release": ("sci_data_rel", int), "RA (J2000)": ("sci_ra", float), "Dec (J2000)": ("sci_dec", float), "Target Type": ("ktc_target_type", six.text_type), "Archive Class": ("sci_archive_class", six.text_type), "Ref": ("refnum", int), "Actual Start Time": ("sci_start_time", six.text_type), "Actual End Time": ("sci_end_time", six.text_type), "Investigation ID": ("ktc_investigation_id", six.text_type), "RA PM": ("pmRA", float), "RA PM Err": ("e_pmRA", float), "Dec PM": ("pmDEC", float), "Dec PM Err": ("e_pmDEC", float), "Plx": ("plx", float), "Plx Err": ("e_plx", float), "U Mag": ("umag", float), "U Mag Err": ("e_umag", float), "B Mag": ("bmag", float), "B Mag Err": ("e_bmag", float), "V Mag": ("vmag", float), "V Mag Err": ("e_vmag", float), "G Mag": ("gmag", float), "G Mag Err": ("e_gmag", float), "R Mag": ("rmag", float), "R Mag Err": ("e_rmag", float), "I Mag": ("imag", float), "I Mag Err": ("e_imag", float), "Z Mag": ("zmag", float), "Z Mag Err": ("e_zmag", float), "J Mag": ("jmag", float), "J Mag Err": ("e_jmag", float), "H Mag": ("hmag", float), "H Mag Err": ("e_hmag", float), "K Mag": ("kmag", float), "K Mag Err": ("e_kmag", float), "KEP Mag": ("kp", float), "Kep Flag": ("kepflag", six.text_type), "Hip ID": ("hip", int), "Tyc ID": ("tyc", six.text_type), "SDSS ID": ("sdss", six.text_type), "UCAC ID": ("ucac", six.text_type), "2MASS ID": ("twoMass", six.text_type), "2MASS Flag": ("mflg", six.text_type), "Processing Date": ("sci_generation_date", six.text_type), "crowding": ("sci_crowdsap", float), "contamination": ("sci_contamination", float), "flux fraction": ("sci_flfrcsap", float), "cdpp3": ("sci_Cdpp3_0", float), "cdpp6": ("sci_Cdpp6_0", float), "cdpp12": ("sci_Cdpp12_0", float), "Module": ("sci_module", int), "Output": ("sci_output", int), "Channel": ("sci_channel", int), "Nearest Neighbor": ("prox", float), "Nomad ID": ("nomad", six.text_type), }) target_adapter = Adapter({ "masterRA": ("masterRA", float), "masterDec": ("masterDec", float), "Kepler_ID":("kic_kepler_id", int), "2MASS_ID":("twomass_2mass_id", str), "U_UBV":("U_UBV", float), "gr":("gr", float), "Parallax (arcsec)":("kic_parallax", float), "Channel_0": ("Channel_0", int), "Channel_1": ("Channel_1", int), "Channel_2": ("Channel_2", int), "Channel_3": ("Channel_3", int), "Module_0": ("Module_0", int), "Module_1": ("Module_1", int), "Module_2": ("Module_2", int), "Module_3": ("Module_3", int), "Row_0": ("Row_0", int), "Row_1": ("Row_1", int), "Row_2": ("Row_2", int), "Row_3": ("Row_3", int), "Column_0": ("Column_0", int), "Column_1": ("Column_1", int), "Column_2": ("Column_2", int), "Column_3": ("Column_3", int), })
test-tls-disable-renegotiation.js
'use strict'; const common = require('../common'); const assert = require('assert'); const fixtures = require('../common/fixtures'); // Tests that calling disableRenegotiation on a TLSSocket stops renegotiation. if (!common.hasCrypto) common.skip('missing crypto'); const tls = require('tls'); // Renegotiation as a protocol feature was dropped after TLS1.2. tls.DEFAULT_MAX_VERSION = 'TLSv1.2'; const options = { key: fixtures.readKey('agent1-key.pem'), cert: fixtures.readKey('agent1-cert.pem'), }; const server = tls.Server(options, common.mustCall((socket) => { socket.on('error', common.mustCall((err) => { common.expectsError({ name: 'Error', code: 'ERR_TLS_RENEGOTIATION_DISABLED', message: 'TLS session renegotiation disabled for this socket' })(err); socket.destroy(); server.close(); }));
socket.on('data', common.mustCall((chunk) => { socket.write(chunk); socket.disableRenegotiation(); })); socket.on('secure', common.mustCall(() => { assert(socket._handle.handshakes < 2, `Too many handshakes [${socket._handle.handshakes}]`); })); })); server.listen(0, common.mustCall(() => { const port = server.address().port; const options = { rejectUnauthorized: false, port }; const client = tls.connect(options, common.mustCall(() => { assert.throws(() => client.renegotiate(), { code: 'ERR_INVALID_ARG_TYPE', name: 'TypeError', }); assert.throws(() => client.renegotiate(common.mustNotCall()), { code: 'ERR_INVALID_ARG_TYPE', name: 'TypeError', }); assert.throws(() => client.renegotiate({}, false), { code: 'ERR_INVALID_CALLBACK', name: 'TypeError', }); assert.throws(() => client.renegotiate({}, null), { code: 'ERR_INVALID_CALLBACK', name: 'TypeError', }); // Negotiation is still permitted for this first // attempt. This should succeed. let ok = client.renegotiate(options, common.mustSucceed(() => { // Once renegotiation completes, we write some // data to the socket, which triggers the on // data event on the server. After that data // is received, disableRenegotiation is called. client.write('data', common.mustCall(() => { // This second renegotiation attempt should fail // and the callback should never be invoked. The // server will simply drop the connection after // emitting the error. ok = client.renegotiate(options, common.mustNotCall()); assert.strictEqual(ok, true); })); })); assert.strictEqual(ok, true); client.on('secureConnect', common.mustCall(() => { })); client.on('secure', common.mustCall(() => { })); })); }));
// Disable renegotiation after the first chunk of data received. // Demonstrates that renegotiation works successfully up until // disableRenegotiation is called.
command.rs
use crate::{ chain_spec, cli::{Cli, Subcommand}, service, }; use node_broker_runtime::Block; use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { fn impl_name() -> String { "Substrate Node".into() } fn impl_version() -> String { env!("SUBSTRATE_CLI_IMPL_VERSION").into() } fn description() -> String { env!("CARGO_PKG_DESCRIPTION").into() } fn author() -> String { env!("CARGO_PKG_AUTHORS").into() } fn support_url() -> String { "support.anonymous.an".into() } fn copyright_start_year() -> i32 { 2017 } fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } fn native_runtime_version(_: &Box<dyn ChainSpec>) -> &'static RuntimeVersion { &node_broker_runtime::VERSION } } /// Parse and run command line arguments pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) }, Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.database)) }, Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) }, Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::<Block, service::ExecutorDispatch>(config)) } else { Err("Benchmarking wasn't enabled when building the node. You can enable it with \ `--features runtime-benchmarks`." .into()) }, None => {
service::new_full(config).map_err(sc_cli::Error::Service) }) }, } }
let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move {
txn_command.go
// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "bufio" "fmt" "os" "strconv" "strings" "github.com/zhuzhengyang/etcd/Godeps/_workspace/src/github.com/spf13/cobra" "github.com/zhuzhengyang/etcd/Godeps/_workspace/src/golang.org/x/net/context" "github.com/zhuzhengyang/etcd/clientv3" ) var ( txnInteractive bool ) // NewTxnCommand returns the cobra command for "txn". func NewTxnCommand() *cobra.Command { cmd := &cobra.Command{ Use: "txn [options]", Short: "Txn processes all the requests in one transaction.", Run: txnCommandFunc, } cmd.Flags().BoolVarP(&txnInteractive, "interactive", "i", false, "input transaction in interactive mode") return cmd } // txnCommandFunc executes the "txn" command. func txnCommandFunc(cmd *cobra.Command, args []string) { if len(args) != 0 { ExitWithError(ExitBadArgs, fmt.Errorf("txn command does not accept argument.")) } reader := bufio.NewReader(os.Stdin) txn := mustClientFromCmd(cmd).Txn(context.Background()) promptInteractive("compares:") txn.If(readCompares(reader)...) promptInteractive("success requests (get, put, delete):") txn.Then(readOps(reader)...) promptInteractive("failure requests (get, put, delete):") txn.Else(readOps(reader)...) resp, err := txn.Commit() if err != nil { ExitWithError(ExitError, err)
display.Txn(*resp) } func promptInteractive(s string) { if txnInteractive { fmt.Println(s) } } func readCompares(r *bufio.Reader) (cmps []clientv3.Cmp) { for { line, err := r.ReadString('\n') if err != nil { ExitWithError(ExitInvalidInput, err) } if len(line) == 1 { break } // remove trialling \n line = line[:len(line)-1] cmp, err := parseCompare(line) if err != nil { ExitWithError(ExitInvalidInput, err) } cmps = append(cmps, *cmp) } return cmps } func readOps(r *bufio.Reader) (ops []clientv3.Op) { for { line, err := r.ReadString('\n') if err != nil { ExitWithError(ExitInvalidInput, err) } if len(line) == 1 { break } // remove trialling \n line = line[:len(line)-1] op, err := parseRequestUnion(line) if err != nil { ExitWithError(ExitInvalidInput, err) } ops = append(ops, *op) } return ops } func parseRequestUnion(line string) (*clientv3.Op, error) { args := argify(line) if len(args) < 2 { return nil, fmt.Errorf("invalid txn compare request: %s", line) } opc := make(chan clientv3.Op, 1) put := NewPutCommand() put.Run = func(cmd *cobra.Command, args []string) { key, value, opts := getPutOp(cmd, args) opc <- clientv3.OpPut(key, value, opts...) } get := NewGetCommand() get.Run = func(cmd *cobra.Command, args []string) { key, opts := getGetOp(cmd, args) opc <- clientv3.OpGet(key, opts...) } del := NewDelCommand() del.Run = func(cmd *cobra.Command, args []string) { key, opts := getDelOp(cmd, args) opc <- clientv3.OpDelete(key, opts...) } cmds := &cobra.Command{SilenceErrors: true} cmds.AddCommand(put, get, del) cmds.SetArgs(args) if err := cmds.Execute(); err != nil { return nil, fmt.Errorf("invalid txn request: %s", line) } op := <-opc return &op, nil } func parseCompare(line string) (*clientv3.Cmp, error) { var ( key string op string val string ) lparenSplit := strings.SplitN(line, "(", 2) if len(lparenSplit) != 2 { return nil, fmt.Errorf("malformed comparison: %s", line) } target := lparenSplit[0] n, serr := fmt.Sscanf(lparenSplit[1], "%q) %s %q", &key, &op, &val) if n != 3 { return nil, fmt.Errorf("malformed comparison: %s; got %s(%q) %s %q", line, target, key, op, val) } if serr != nil { return nil, fmt.Errorf("malformed comparison: %s (%v)", line, serr) } var ( v int64 err error cmp clientv3.Cmp ) switch target { case "ver", "version": if v, err = strconv.ParseInt(val, 10, 64); err == nil { cmp = clientv3.Compare(clientv3.Version(key), op, v) } case "c", "create": if v, err = strconv.ParseInt(val, 10, 64); err == nil { cmp = clientv3.Compare(clientv3.CreatedRevision(key), op, v) } case "m", "mod": if v, err = strconv.ParseInt(val, 10, 64); err == nil { cmp = clientv3.Compare(clientv3.ModifiedRevision(key), op, v) } case "val", "value": cmp = clientv3.Compare(clientv3.Value(key), op, val) default: return nil, fmt.Errorf("malformed comparison: %s (unknown target %s)", line, target) } if err != nil { return nil, fmt.Errorf("invalid txn compare request: %s", line) } return &cmp, nil }
}
instmgr_show_install_prepare.pb.go
/* Copyright 2019 Cisco Systems Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by protoc-gen-go. DO NOT EDIT. // source: instmgr_show_install_prepare.proto package cisco_ios_xr_spirit_install_instmgr_oper_software_install_prepare import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type InstmgrShowInstallPrepare_KEYS struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *InstmgrShowInstallPrepare_KEYS) Reset() { *m = InstmgrShowInstallPrepare_KEYS{} } func (m *InstmgrShowInstallPrepare_KEYS) String() string { return proto.CompactTextString(m) } func (*InstmgrShowInstallPrepare_KEYS) ProtoMessage() {} func (*InstmgrShowInstallPrepare_KEYS) Descriptor() ([]byte, []int) { return fileDescriptor_4b8a1f85e78eaced, []int{0} } func (m *InstmgrShowInstallPrepare_KEYS) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InstmgrShowInstallPrepare_KEYS.Unmarshal(m, b) } func (m *InstmgrShowInstallPrepare_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_InstmgrShowInstallPrepare_KEYS.Marshal(b, m, deterministic) } func (m *InstmgrShowInstallPrepare_KEYS) XXX_Merge(src proto.Message) { xxx_messageInfo_InstmgrShowInstallPrepare_KEYS.Merge(m, src) } func (m *InstmgrShowInstallPrepare_KEYS) XXX_Size() int { return xxx_messageInfo_InstmgrShowInstallPrepare_KEYS.Size(m) } func (m *InstmgrShowInstallPrepare_KEYS) XXX_DiscardUnknown() { xxx_messageInfo_InstmgrShowInstallPrepare_KEYS.DiscardUnknown(m) } var xxx_messageInfo_InstmgrShowInstallPrepare_KEYS proto.InternalMessageInfo type InstStringPtr struct { Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *InstStringPtr) Reset() { *m = InstStringPtr{} } func (m *InstStringPtr) String() string { return proto.CompactTextString(m) } func (*InstStringPtr) ProtoMessage() {} func (*InstStringPtr) Descriptor() ([]byte, []int) { return fileDescriptor_4b8a1f85e78eaced, []int{1} } func (m *InstStringPtr) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InstStringPtr.Unmarshal(m, b) } func (m *InstStringPtr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_InstStringPtr.Marshal(b, m, deterministic) } func (m *InstStringPtr) XXX_Merge(src proto.Message) { xxx_messageInfo_InstStringPtr.Merge(m, src) } func (m *InstStringPtr) XXX_Size() int { return xxx_messageInfo_InstStringPtr.Size(m) } func (m *InstStringPtr) XXX_DiscardUnknown() { xxx_messageInfo_InstStringPtr.DiscardUnknown(m) } var xxx_messageInfo_InstStringPtr proto.InternalMessageInfo func (m *InstStringPtr) GetPackage() string { if m != nil { return m.Package } return "" } type InstmgrShowInstallPrepare struct { NoPrepareDone string `protobuf:"bytes,50,opt,name=no_prepare_done,json=noPrepareDone,proto3" json:"no_prepare_done,omitempty"`
PreparedBootPartition string `protobuf:"bytes,52,opt,name=prepared_boot_partition,json=preparedBootPartition,proto3" json:"prepared_boot_partition,omitempty"` RestartType string `protobuf:"bytes,53,opt,name=restart_type,json=restartType,proto3" json:"restart_type,omitempty"` Rpm []*InstStringPtr `protobuf:"bytes,54,rep,name=rpm,proto3" json:"rpm,omitempty"` Package []*InstStringPtr `protobuf:"bytes,55,rep,name=package,proto3" json:"package,omitempty"` ActivateMessage string `protobuf:"bytes,56,opt,name=activate_message,json=activateMessage,proto3" json:"activate_message,omitempty"` PrepareCleanMessage string `protobuf:"bytes,57,opt,name=prepare_clean_message,json=prepareCleanMessage,proto3" json:"prepare_clean_message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *InstmgrShowInstallPrepare) Reset() { *m = InstmgrShowInstallPrepare{} } func (m *InstmgrShowInstallPrepare) String() string { return proto.CompactTextString(m) } func (*InstmgrShowInstallPrepare) ProtoMessage() {} func (*InstmgrShowInstallPrepare) Descriptor() ([]byte, []int) { return fileDescriptor_4b8a1f85e78eaced, []int{2} } func (m *InstmgrShowInstallPrepare) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InstmgrShowInstallPrepare.Unmarshal(m, b) } func (m *InstmgrShowInstallPrepare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_InstmgrShowInstallPrepare.Marshal(b, m, deterministic) } func (m *InstmgrShowInstallPrepare) XXX_Merge(src proto.Message) { xxx_messageInfo_InstmgrShowInstallPrepare.Merge(m, src) } func (m *InstmgrShowInstallPrepare) XXX_Size() int { return xxx_messageInfo_InstmgrShowInstallPrepare.Size(m) } func (m *InstmgrShowInstallPrepare) XXX_DiscardUnknown() { xxx_messageInfo_InstmgrShowInstallPrepare.DiscardUnknown(m) } var xxx_messageInfo_InstmgrShowInstallPrepare proto.InternalMessageInfo func (m *InstmgrShowInstallPrepare) GetNoPrepareDone() string { if m != nil { return m.NoPrepareDone } return "" } func (m *InstmgrShowInstallPrepare) GetPreparedBootImage() string { if m != nil { return m.PreparedBootImage } return "" } func (m *InstmgrShowInstallPrepare) GetPreparedBootPartition() string { if m != nil { return m.PreparedBootPartition } return "" } func (m *InstmgrShowInstallPrepare) GetRestartType() string { if m != nil { return m.RestartType } return "" } func (m *InstmgrShowInstallPrepare) GetRpm() []*InstStringPtr { if m != nil { return m.Rpm } return nil } func (m *InstmgrShowInstallPrepare) GetPackage() []*InstStringPtr { if m != nil { return m.Package } return nil } func (m *InstmgrShowInstallPrepare) GetActivateMessage() string { if m != nil { return m.ActivateMessage } return "" } func (m *InstmgrShowInstallPrepare) GetPrepareCleanMessage() string { if m != nil { return m.PrepareCleanMessage } return "" } func init() { proto.RegisterType((*InstmgrShowInstallPrepare_KEYS)(nil), "cisco_ios_xr_spirit_install_instmgr_oper.software_install.prepare.instmgr_show_install_prepare_KEYS") proto.RegisterType((*InstStringPtr)(nil), "cisco_ios_xr_spirit_install_instmgr_oper.software_install.prepare.inst_string_ptr") proto.RegisterType((*InstmgrShowInstallPrepare)(nil), "cisco_ios_xr_spirit_install_instmgr_oper.software_install.prepare.instmgr_show_install_prepare") } func init() { proto.RegisterFile("instmgr_show_install_prepare.proto", fileDescriptor_4b8a1f85e78eaced) } var fileDescriptor_4b8a1f85e78eaced = []byte{ // 330 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x52, 0xbb, 0x4b, 0x33, 0x41, 0x10, 0x27, 0xe4, 0xe3, 0x13, 0x37, 0x4a, 0x74, 0x45, 0xbc, 0xc2, 0x22, 0x39, 0x41, 0x22, 0xc2, 0x15, 0x89, 0x46, 0x2d, 0x7d, 0x15, 0x22, 0x42, 0x88, 0x36, 0x56, 0xc3, 0xe6, 0xb2, 0xc6, 0xc5, 0xbb, 0x9d, 0x65, 0x76, 0x30, 0xa6, 0xf7, 0x0f, 0x97, 0xbb, 0xdc, 0xc6, 0x47, 0x91, 0x46, 0x2c, 0xe7, 0xf7, 0xe4, 0x7e, 0xb7, 0x22, 0x36, 0xd6, 0x73, 0x3e, 0x21, 0xf0, 0xcf, 0x38, 0x85, 0xe2, 0x50, 0x59, 0x06, 0x8e, 0xb4, 0x53, 0xa4, 0x13, 0x47, 0xc8, 0x28, 0xcf, 0x53, 0xe3, 0x53, 0x04, 0x83, 0x1e, 0xde, 0x08, 0xbc, 0x33, 0x64, 0x78, 0x21, 0x0d, 0x7e, 0x74, 0x9a, 0x12, 0x8f, 0x4f, 0x3c, 0x55, 0xa4, 0x03, 0x9b, 0x54, 0x41, 0xf1, 0x9e, 0x68, 0x2f, 0x2b, 0x82, 0xdb, 0xeb, 0xc7, 0xfb, 0xf8, 0x50, 0x34, 0x0b, 0x1c, 0x3c, 0x93, 0xb1, 0x13, 0x70, 0x4c, 0x32, 0x12, 0x2b, 0x4e, 0xa5, 0x2f, 0x6a, 0xa2, 0xa3, 0x5a, 0xab, 0xd6, 0x59, 0x1d, 0x86, 0x33, 0x7e, 0xff, 0x27, 0x76, 0x97, 0x45, 0xca, 0x7d, 0xd1, 0xb4, 0xb8, 0x28, 0x18, 0xa3, 0xd5, 0x51, 0xb7, 0x8c, 0x58, 0xb7, 0x38, 0x98, 0xa3, 0x57, 0x68, 0xb5, 0x4c, 0xc4, 0x56, 0x25, 0x1a, 0xc3, 0x08, 0x91, 0xc1, 0xe4, 0x45, 0x5d, 0xaf, 0xd4, 0x6e, 0x06, 0xea, 0x02, 0x91, 0x6f, 0x0a, 0x42, 0xf6, 0xc5, 0xce, 0x77, 0xbd, 0x53, 0xc4, 0x86, 0x0d, 0xda, 0xe8, 0xa8, 0xf4, 0x6c, 0x7f, 0xf5, 0x0c, 0x02, 0x29, 0xdb, 0x62, 0x8d, 0xb4, 0x67, 0x45, 0x0c, 0x3c, 0x73, 0x3a, 0x3a, 0x2e, 0xc5, 0x8d, 0x0a, 0x7b, 0x98, 0x39, 0x2d, 0xc7, 0xa2, 0x4e, 0x2e, 0x8f, 0xfa, 0xad, 0x7a, 0xa7, 0xd1, 0x1d, 0x26, 0xbf, 0x9e, 0x3d, 0xf9, 0x31, 0xe7, 0xb0, 0x88, 0x97, 0xd9, 0xe7, 0xa6, 0x27, 0x7f, 0xd6, 0x14, 0x2a, 0xe4, 0x81, 0xd8, 0x50, 0x29, 0x9b, 0x57, 0xc5, 0x1a, 0x72, 0xed, 0x7d, 0x51, 0x7b, 0x5a, 0x7e, 0x7a, 0x33, 0xe0, 0x77, 0x73, 0x58, 0x76, 0x45, 0x98, 0x0e, 0xd2, 0x4c, 0x2b, 0xbb, 0xd0, 0x9f, 0x95, 0xfa, 0xf0, 0x9b, 0x2e, 0x0b, 0xae, 0xf2, 0x8c, 0xfe, 0x97, 0x4f, 0xb4, 0xf7, 0x11, 0x00, 0x00, 0xff, 0xff, 0x43, 0xeb, 0xca, 0xd6, 0xc8, 0x02, 0x00, 0x00, }
PreparedBootImage string `protobuf:"bytes,51,opt,name=prepared_boot_image,json=preparedBootImage,proto3" json:"prepared_boot_image,omitempty"`
diagnose_migration_history.rs
use std::path::Path; use super::MigrationCommand; use crate::CoreResult; use migration_connector::{ ConnectorError, MigrationConnector, MigrationDirectory, MigrationRecord, PersistenceNotInitializedError, }; use serde::{Deserialize, Serialize}; /// The input to the `DiagnoseMigrationHistory` command. #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct DiagnoseMigrationHistoryInput { /// The location of the migrations directory. pub migrations_directory_path: String, /// Whether creating shadow/temporary databases is allowed. pub opt_in_to_shadow_database: bool, } /// The output of the `DiagnoseMigrationHistory` command. #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] pub struct DiagnoseMigrationHistoryOutput { /// Whether drift between the expected schema and the dev database could be /// detected. `None` if the dev database has the expected schema. #[serde(skip)] pub drift: Option<DriftDiagnostic>, /// The current status of the migration history of the database relative to /// migrations directory. `None` if they are in sync and up to date. pub history: Option<HistoryDiagnostic>, /// The names of the migrations that are currently in a failed state in the /// database. pub failed_migration_names: Vec<String>, /// The names of the migrations for which the checksum of the script in the /// migration directory does not match the checksum of the applied migration /// in the database. pub edited_migration_names: Vec<String>, /// An optional error encountered when applying a migration that is not /// applied in the main database to the shadow database. We do this to /// validate that unapplied migrations are at least minimally valid. #[serde(skip)] pub error_in_unapplied_migration: Option<user_facing_errors::Error>, /// Is the migrations table initialized in the database. pub has_migrations_table: bool, } impl DiagnoseMigrationHistoryOutput { /// True if no problem was found pub fn is_empty(&self) -> bool { matches!( self, DiagnoseMigrationHistoryOutput { drift, history, has_migrations_table: _, failed_migration_names, edited_migration_names, error_in_unapplied_migration, } if drift.is_none() && history.is_none() && failed_migration_names.is_empty() && edited_migration_names.is_empty() && error_in_unapplied_migration.is_none() ) } } /// Read the contents of the migrations directory and the migrations table, and /// returns their relative statuses. At this stage, the migration engine only /// reads, it does not write to the dev database nor the migrations directory. pub struct DiagnoseMigrationHistoryCommand; #[async_trait::async_trait] impl<'a> MigrationCommand for DiagnoseMigrationHistoryCommand { type Input = DiagnoseMigrationHistoryInput; type Output = DiagnoseMigrationHistoryOutput; async fn execute<C: MigrationConnector>(input: &Self::Input, connector: &C) -> CoreResult<Self::Output> { let migration_persistence = connector.migration_persistence(); let migration_inferrer = connector.database_migration_inferrer(); tracing::debug!("Diagnosing migration history"); migration_connector::error_on_changed_provider(&input.migrations_directory_path, connector.connector_type())?; // Load the migrations. let migrations_from_filesystem = migration_connector::list_migrations(&Path::new(&input.migrations_directory_path))?; let (migrations_from_database, has_migrations_table) = match migration_persistence.list_migrations().await? { Ok(migrations) => (migrations, true), Err(PersistenceNotInitializedError {}) => (vec![], false), }; let mut diagnostics = Diagnostics::new(&migrations_from_filesystem); // Check filesystem history against database history. for (index, fs_migration) in migrations_from_filesystem.iter().enumerate() { let corresponding_db_migration = migrations_from_database .iter() .find(|db_migration| db_migration.migration_name == fs_migration.migration_name()); match corresponding_db_migration { Some(db_migration) if !fs_migration .matches_checksum(&db_migration.checksum) .map_err(ConnectorError::from)? => { diagnostics.edited_migrations.push(db_migration); } Some(_) => (), None => diagnostics.fs_migrations_not_in_db.push((index, fs_migration)), } } for (index, db_migration) in migrations_from_database.iter().enumerate() { let corresponding_fs_migration = migrations_from_filesystem .iter() .find(|fs_migration| db_migration.migration_name == fs_migration.migration_name()); if db_migration.finished_at.is_none() && db_migration.rolled_back_at.is_none() { diagnostics.failed_migrations.push(db_migration); } if corresponding_fs_migration.is_none() { diagnostics.db_migrations_not_in_fs.push((index, db_migration)) } } // Detect drift let applied_migrations: Vec<_> = migrations_from_filesystem .iter() .filter(|fs_migration| { migrations_from_database .iter() .filter(|db_migration| db_migration.finished_at.is_some() && db_migration.rolled_back_at.is_none()) .any(|db_migration| db_migration.migration_name == fs_migration.migration_name()) }) .cloned() .collect(); let (drift, error_in_unapplied_migration) = { if input.opt_in_to_shadow_database { let drift = match migration_inferrer.calculate_drift(&applied_migrations).await { Ok(Some(rollback)) => Some(DriftDiagnostic::DriftDetected { rollback }), Err(error) => Some(DriftDiagnostic::MigrationFailedToApply { error: error.to_user_facing(), }), _ => None, }; let error_in_unapplied_migration = if !matches!(drift, Some(DriftDiagnostic::MigrationFailedToApply { .. })) { migration_inferrer .validate_migrations(&migrations_from_filesystem) .await .err() .map(|connector_error| connector_error.to_user_facing()) } else { None }; (drift, error_in_unapplied_migration) } else { (None, None) } }; Ok(DiagnoseMigrationHistoryOutput { drift, history: diagnostics.history(), failed_migration_names: diagnostics.failed_migration_names(), edited_migration_names: diagnostics.edited_migration_names(), error_in_unapplied_migration, has_migrations_table, }) } } #[derive(Debug)] struct Diagnostics<'a> { fs_migrations_not_in_db: Vec<(usize, &'a MigrationDirectory)>, db_migrations_not_in_fs: Vec<(usize, &'a MigrationRecord)>, edited_migrations: Vec<&'a MigrationRecord>, failed_migrations: Vec<&'a MigrationRecord>, fs_migrations: &'a [MigrationDirectory], } impl<'a> Diagnostics<'a> { fn new(fs_migrations: &'a [MigrationDirectory]) -> Self { Diagnostics { fs_migrations_not_in_db: Vec::new(), db_migrations_not_in_fs: Vec::new(), edited_migrations: Vec::new(), failed_migrations: Vec::new(), fs_migrations, } } fn db_migration_names(&self) -> Vec<String> { self.db_migrations_not_in_fs .iter() .map(|(_, migration)| migration.migration_name.clone()) .collect() } fn edited_migration_names(&self) -> Vec<String> { self.edited_migrations .iter() .map(|migration| migration.migration_name.clone()) .collect() } fn failed_migration_names(&self) -> Vec<String> { self.failed_migrations .iter() .map(|migration| migration.migration_name.clone()) .collect() } fn
(&self) -> Vec<String> { self.fs_migrations_not_in_db .iter() .map(|(_, migration)| migration.migration_name().to_owned()) .collect() } fn history(&self) -> Option<HistoryDiagnostic> { match (self.fs_migrations_not_in_db.len(), self.db_migrations_not_in_fs.len()) { (0, 0) => None, (_, 0) => Some(HistoryDiagnostic::DatabaseIsBehind { unapplied_migration_names: self.fs_migration_names(), }), (0, _) => Some(HistoryDiagnostic::MigrationsDirectoryIsBehind { unpersisted_migration_names: self.db_migration_names(), }), (_, _) => Some(HistoryDiagnostic::HistoriesDiverge { last_common_migration_name: self.fs_migrations_not_in_db.first().and_then(|(idx, _)| { if *idx == 0 { None } else { Some(self.fs_migrations[idx - 1].migration_name().to_owned()) } }), unpersisted_migration_names: self.db_migration_names(), unapplied_migration_names: self.fs_migration_names(), }), } } } /// A diagnostic returned by `diagnoseMigrationHistory` when looking at the /// database migration history in relation to the migrations directory. #[derive(Debug, PartialEq, Serialize)] #[serde(tag = "diagnostic", rename_all = "camelCase")] pub enum HistoryDiagnostic { /// There are migrations in the migrations directory that have not been /// applied to the database yet. #[serde(rename_all = "camelCase")] DatabaseIsBehind { /// The names of the migrations. unapplied_migration_names: Vec<String>, }, /// Migrations have been applied to the database that are not in the /// migrations directory. #[serde(rename_all = "camelCase")] MigrationsDirectoryIsBehind { /// The names of the migrations. unpersisted_migration_names: Vec<String>, }, /// The migrations table history and the migrations directory history are /// not the same. This currently ignores the ordering of migrations. #[serde(rename_all = "camelCase")] HistoriesDiverge { /// The last migration that is present both in the migrations directory /// and the migrations table. last_common_migration_name: Option<String>, /// The names of the migrations that are present in the migrations table /// but not in the migrations directory. unpersisted_migration_names: Vec<String>, /// The names of the migrations that are present in the migrations /// directory but have not been applied to the database. unapplied_migration_names: Vec<String>, }, } /// A diagnostic returned by `diagnoseMigrationHistory` when trying to determine /// whether the development database has the expected schema at its stage in /// history. #[derive(Debug, PartialEq, Serialize)] #[serde(tag = "diagnostic", rename_all = "camelCase")] pub enum DriftDiagnostic { /// The database schema of the current database does not match what would be /// expected at its stage in the migration history. DriftDetected { /// A database script to correct the drift by reverting to the expected schema. rollback: String, }, /// When a migration fails to apply cleanly to a shadow database. #[serde(rename_all = "camelCase")] MigrationFailedToApply { /// The full error. error: user_facing_errors::Error, }, } impl DriftDiagnostic { /// For tests. pub fn unwrap_drift_detected(self) -> String { match self { DriftDiagnostic::DriftDetected { rollback } => rollback, other => panic!("unwrap_drift_detected on {:?}", other), } } }
fs_migration_names
protobuf.rs
/// The protocol compiler can output a FileDescriptorSet containing the .proto /// files it parses. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileDescriptorSet { #[prost(message, repeated, tag="1")] pub file: ::std::vec::Vec<FileDescriptorProto>, } /// Describes a complete .proto file. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileDescriptorProto { /// file name, relative to root of source tree #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, /// e.g. "foo", "foo.bar", etc. #[prost(string, optional, tag="2")] pub package: ::std::option::Option<std::string::String>, /// Names of files imported by this file. #[prost(string, repeated, tag="3")] pub dependency: ::std::vec::Vec<std::string::String>, /// Indexes of the public imported files in the dependency list above. #[prost(int32, repeated, packed="false", tag="10")] pub public_dependency: ::std::vec::Vec<i32>, /// Indexes of the weak imported files in the dependency list. /// For Google-internal migration only. Do not use. #[prost(int32, repeated, packed="false", tag="11")] pub weak_dependency: ::std::vec::Vec<i32>, /// All top-level definitions in this file. #[prost(message, repeated, tag="4")] pub message_type: ::std::vec::Vec<DescriptorProto>, #[prost(message, repeated, tag="5")] pub enum_type: ::std::vec::Vec<EnumDescriptorProto>, #[prost(message, repeated, tag="6")] pub service: ::std::vec::Vec<ServiceDescriptorProto>, #[prost(message, repeated, tag="7")] pub extension: ::std::vec::Vec<FieldDescriptorProto>, #[prost(message, optional, tag="8")] pub options: ::std::option::Option<FileOptions>, /// This field contains optional information about the original source code. /// You may safely remove this entire field without harming runtime /// functionality of the descriptors -- the information is needed only by /// development tools. #[prost(message, optional, tag="9")] pub source_code_info: ::std::option::Option<SourceCodeInfo>, /// The syntax of the proto file. /// The supported values are "proto2" and "proto3". #[prost(string, optional, tag="12")] pub syntax: ::std::option::Option<std::string::String>, } /// Describes a message type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(message, repeated, tag="2")] pub field: ::std::vec::Vec<FieldDescriptorProto>, #[prost(message, repeated, tag="6")] pub extension: ::std::vec::Vec<FieldDescriptorProto>, #[prost(message, repeated, tag="3")] pub nested_type: ::std::vec::Vec<DescriptorProto>, #[prost(message, repeated, tag="4")] pub enum_type: ::std::vec::Vec<EnumDescriptorProto>, #[prost(message, repeated, tag="5")] pub extension_range: ::std::vec::Vec<descriptor_proto::ExtensionRange>, #[prost(message, repeated, tag="8")] pub oneof_decl: ::std::vec::Vec<OneofDescriptorProto>, #[prost(message, optional, tag="7")] pub options: ::std::option::Option<MessageOptions>, #[prost(message, repeated, tag="9")] pub reserved_range: ::std::vec::Vec<descriptor_proto::ReservedRange>, /// Reserved field names, which may not be used by fields in the same message. /// A given name may only be reserved once. #[prost(string, repeated, tag="10")] pub reserved_name: ::std::vec::Vec<std::string::String>, } pub mod descriptor_proto { #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExtensionRange { /// Inclusive. #[prost(int32, optional, tag="1")] pub start: ::std::option::Option<i32>, /// Exclusive. #[prost(int32, optional, tag="2")] pub end: ::std::option::Option<i32>, #[prost(message, optional, tag="3")] pub options: ::std::option::Option<super::ExtensionRangeOptions>, } /// Range of reserved tag numbers. Reserved tag numbers may not be used by /// fields or extension ranges in the same message. Reserved ranges may /// not overlap. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReservedRange { /// Inclusive. #[prost(int32, optional, tag="1")] pub start: ::std::option::Option<i32>, /// Exclusive. #[prost(int32, optional, tag="2")] pub end: ::std::option::Option<i32>, } } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExtensionRangeOptions { /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } /// Describes a field within a message. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(int32, optional, tag="3")] pub number: ::std::option::Option<i32>, #[prost(enumeration="field_descriptor_proto::Label", optional, tag="4")] pub label: ::std::option::Option<i32>, /// If type_name is set, this need not be set. If both this and type_name /// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. #[prost(enumeration="field_descriptor_proto::Type", optional, tag="5")] pub r#type: ::std::option::Option<i32>, /// For message and enum types, this is the name of the type. If the name /// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping /// rules are used to find the type (i.e. first the nested types within this /// message are searched, then within the parent, on up to the root /// namespace). #[prost(string, optional, tag="6")] pub type_name: ::std::option::Option<std::string::String>, /// For extensions, this is the name of the type being extended. It is /// resolved in the same manner as type_name. #[prost(string, optional, tag="2")] pub extendee: ::std::option::Option<std::string::String>, /// For numeric types, contains the original text representation of the value. /// For booleans, "true" or "false". /// For strings, contains the default text contents (not escaped in any way). /// For bytes, contains the C escaped value. All bytes >= 128 are escaped. /// TODO(kenton): Base-64 encode? #[prost(string, optional, tag="7")] pub default_value: ::std::option::Option<std::string::String>, /// If set, gives the index of a oneof in the containing type's oneof_decl /// list. This field is a member of that oneof. #[prost(int32, optional, tag="9")] pub oneof_index: ::std::option::Option<i32>, /// JSON name of this field. The value is set by protocol compiler. If the /// user has set a "json_name" option on this field, that option's value /// will be used. Otherwise, it's deduced from the field's name by converting /// it to camelCase. #[prost(string, optional, tag="10")] pub json_name: ::std::option::Option<std::string::String>, #[prost(message, optional, tag="8")] pub options: ::std::option::Option<FieldOptions>, } pub mod field_descriptor_proto { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Type { /// 0 is reserved for errors. /// Order is weird for historical reasons. Double = 1, Float = 2, /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if /// negative values are likely. Int64 = 3, Uint64 = 4, /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if /// negative values are likely. Int32 = 5, Fixed64 = 6, Fixed32 = 7, Bool = 8, String = 9, /// Tag-delimited aggregate. /// Group type is deprecated and not supported in proto3. However, Proto3 /// implementations should still be able to parse the group wire format and /// treat group fields as unknown fields. Group = 10, /// Length-delimited aggregate. Message = 11, /// New in version 2. Bytes = 12, Uint32 = 13, Enum = 14, Sfixed32 = 15, Sfixed64 = 16, /// Uses ZigZag encoding. Sint32 = 17, /// Uses ZigZag encoding. Sint64 = 18, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Label { /// 0 is reserved for errors Optional = 1, Required = 2, Repeated = 3, } } /// Describes a oneof. #[derive(Clone, PartialEq, ::prost::Message)] pub struct OneofDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(message, optional, tag="2")] pub options: ::std::option::Option<OneofOptions>, } /// Describes an enum type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(message, repeated, tag="2")] pub value: ::std::vec::Vec<EnumValueDescriptorProto>, #[prost(message, optional, tag="3")] pub options: ::std::option::Option<EnumOptions>, /// Range of reserved numeric values. Reserved numeric values may not be used /// by enum values in the same enum declaration. Reserved ranges may not /// overlap. #[prost(message, repeated, tag="4")] pub reserved_range: ::std::vec::Vec<enum_descriptor_proto::EnumReservedRange>, /// Reserved enum value names, which may not be reused. A given name may only /// be reserved once. #[prost(string, repeated, tag="5")] pub reserved_name: ::std::vec::Vec<std::string::String>, } pub mod enum_descriptor_proto { /// Range of reserved numeric values. Reserved values may not be used by /// entries in the same enum. Reserved ranges may not overlap.
/// Note that this is distinct from DescriptorProto.ReservedRange in that it /// is inclusive such that it can appropriately represent the entire int32 /// domain. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumReservedRange { /// Inclusive. #[prost(int32, optional, tag="1")] pub start: ::std::option::Option<i32>, /// Inclusive. #[prost(int32, optional, tag="2")] pub end: ::std::option::Option<i32>, } } /// Describes a value within an enum. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumValueDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(int32, optional, tag="2")] pub number: ::std::option::Option<i32>, #[prost(message, optional, tag="3")] pub options: ::std::option::Option<EnumValueOptions>, } /// Describes a service. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, #[prost(message, repeated, tag="2")] pub method: ::std::vec::Vec<MethodDescriptorProto>, #[prost(message, optional, tag="3")] pub options: ::std::option::Option<ServiceOptions>, } /// Describes a method of a service. #[derive(Clone, PartialEq, ::prost::Message)] pub struct MethodDescriptorProto { #[prost(string, optional, tag="1")] pub name: ::std::option::Option<std::string::String>, /// Input and output type names. These are resolved in the same way as /// FieldDescriptorProto.type_name, but must refer to a message type. #[prost(string, optional, tag="2")] pub input_type: ::std::option::Option<std::string::String>, #[prost(string, optional, tag="3")] pub output_type: ::std::option::Option<std::string::String>, #[prost(message, optional, tag="4")] pub options: ::std::option::Option<MethodOptions>, /// Identifies if client streams multiple client messages #[prost(bool, optional, tag="5", default="false")] pub client_streaming: ::std::option::Option<bool>, /// Identifies if server streams multiple server messages #[prost(bool, optional, tag="6", default="false")] pub server_streaming: ::std::option::Option<bool>, } // =================================================================== // Options // Each of the definitions above may have "options" attached. These are // just annotations which may cause code to be generated slightly differently // or may contain hints for code that manipulates protocol messages. // // Clients may define custom options as extensions of the *Options messages. // These extensions may not yet be known at parsing time, so the parser cannot // store the values in them. Instead it stores them in a field in the *Options // message called uninterpreted_option. This field must have the same name // across all *Options messages. We then use this field to populate the // extensions when we build a descriptor, at which point all protos have been // parsed and so all extensions are known. // // Extension numbers for custom options may be chosen as follows: // * For options which will only be used within a single application or // organization, or for experimental options, use field numbers 50000 // through 99999. It is up to you to ensure that you do not use the // same number for multiple options. // * For options which will be published and used publicly by multiple // independent entities, e-mail [email protected] // to reserve extension numbers. Simply provide your project name (e.g. // Objective-C plugin) and your project website (if available) -- there's no // need to explain how you intend to use them. Usually you only need one // extension number. You can declare multiple options with only one extension // number by putting them in a sub-message. See the Custom Options section of // the docs for examples: // https://developers.google.com/protocol-buffers/docs/proto#options // If this turns out to be popular, a web service will be set up // to automatically assign option numbers. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FileOptions { /// Sets the Java package where classes generated from this .proto will be /// placed. By default, the proto package is used, but this is often /// inappropriate because proto packages do not normally start with backwards /// domain names. #[prost(string, optional, tag="1")] pub java_package: ::std::option::Option<std::string::String>, /// If set, all the classes from the .proto file are wrapped in a single /// outer class with the given name. This applies to both Proto1 /// (equivalent to the old "--one_java_file" option) and Proto2 (where /// a .proto always translates to a single class, but you may want to /// explicitly choose the class name). #[prost(string, optional, tag="8")] pub java_outer_classname: ::std::option::Option<std::string::String>, /// If set true, then the Java code generator will generate a separate .java /// file for each top-level message, enum, and service defined in the .proto /// file. Thus, these types will *not* be nested inside the outer class /// named by java_outer_classname. However, the outer class will still be /// generated to contain the file's getDescriptor() method as well as any /// top-level extensions defined in the file. #[prost(bool, optional, tag="10", default="false")] pub java_multiple_files: ::std::option::Option<bool>, /// This option does nothing. #[prost(bool, optional, tag="20")] pub java_generate_equals_and_hash: ::std::option::Option<bool>, /// If set true, then the Java2 code generator will generate code that /// throws an exception whenever an attempt is made to assign a non-UTF-8 /// byte sequence to a string field. /// Message reflection will do the same. /// However, an extension field still accepts non-UTF-8 byte sequences. /// This option has no effect on when used with the lite runtime. #[prost(bool, optional, tag="27", default="false")] pub java_string_check_utf8: ::std::option::Option<bool>, #[prost(enumeration="file_options::OptimizeMode", optional, tag="9", default="Speed")] pub optimize_for: ::std::option::Option<i32>, /// Sets the Go package where structs generated from this .proto will be /// placed. If omitted, the Go package will be derived from the following: /// - The basename of the package import path, if provided. /// - Otherwise, the package statement in the .proto file, if present. /// - Otherwise, the basename of the .proto file, without extension. #[prost(string, optional, tag="11")] pub go_package: ::std::option::Option<std::string::String>, /// Should generic services be generated in each language? "Generic" services /// are not specific to any particular RPC system. They are generated by the /// main code generators in each language (without additional plugins). /// Generic services were the only kind of service generation supported by /// early versions of google.protobuf. /// /// Generic services are now considered deprecated in favor of using plugins /// that generate code specific to your particular RPC system. Therefore, /// these default to false. Old code which depends on generic services should /// explicitly set them to true. #[prost(bool, optional, tag="16", default="false")] pub cc_generic_services: ::std::option::Option<bool>, #[prost(bool, optional, tag="17", default="false")] pub java_generic_services: ::std::option::Option<bool>, #[prost(bool, optional, tag="18", default="false")] pub py_generic_services: ::std::option::Option<bool>, #[prost(bool, optional, tag="42", default="false")] pub php_generic_services: ::std::option::Option<bool>, /// Is this file deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for everything in the file, or it will be completely ignored; in the very /// least, this is a formalization for deprecating files. #[prost(bool, optional, tag="23", default="false")] pub deprecated: ::std::option::Option<bool>, /// Enables the use of arenas for the proto messages in this file. This applies /// only to generated classes for C++. #[prost(bool, optional, tag="31", default="false")] pub cc_enable_arenas: ::std::option::Option<bool>, /// Sets the objective c class prefix which is prepended to all objective c /// generated classes from this .proto. There is no default. #[prost(string, optional, tag="36")] pub objc_class_prefix: ::std::option::Option<std::string::String>, /// Namespace for generated classes; defaults to the package. #[prost(string, optional, tag="37")] pub csharp_namespace: ::std::option::Option<std::string::String>, /// By default Swift generators will take the proto package and CamelCase it /// replacing '.' with underscore and use that to prefix the types/symbols /// defined. When this options is provided, they will use this value instead /// to prefix the types/symbols defined. #[prost(string, optional, tag="39")] pub swift_prefix: ::std::option::Option<std::string::String>, /// Sets the php class prefix which is prepended to all php generated classes /// from this .proto. Default is empty. #[prost(string, optional, tag="40")] pub php_class_prefix: ::std::option::Option<std::string::String>, /// Use this option to change the namespace of php generated classes. Default /// is empty. When this option is empty, the package name will be used for /// determining the namespace. #[prost(string, optional, tag="41")] pub php_namespace: ::std::option::Option<std::string::String>, /// Use this option to change the namespace of php generated metadata classes. /// Default is empty. When this option is empty, the proto file name will be /// used for determining the namespace. #[prost(string, optional, tag="44")] pub php_metadata_namespace: ::std::option::Option<std::string::String>, /// Use this option to change the package of ruby generated classes. Default /// is empty. When this option is not set, the package name will be used for /// determining the ruby package. #[prost(string, optional, tag="45")] pub ruby_package: ::std::option::Option<std::string::String>, /// The parser stores options it doesn't recognize here. /// See the documentation for the "Options" section above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } pub mod file_options { /// Generated classes can be optimized for speed or code size. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum OptimizeMode { /// Generate complete code for parsing, serialization, Speed = 1, /// etc. /// /// Use ReflectionOps to implement these methods. CodeSize = 2, /// Generate code using MessageLite and the lite runtime. LiteRuntime = 3, } } #[derive(Clone, PartialEq, ::prost::Message)] pub struct MessageOptions { /// Set true to use the old proto1 MessageSet wire format for extensions. /// This is provided for backwards-compatibility with the MessageSet wire /// format. You should not use this for any other reason: It's less /// efficient, has fewer features, and is more complicated. /// /// The message must be defined exactly as follows: /// message Foo { /// option message_set_wire_format = true; /// extensions 4 to max; /// } /// Note that the message cannot have any defined fields; MessageSets only /// have extensions. /// /// All extensions of your type must be singular messages; e.g. they cannot /// be int32s, enums, or repeated messages. /// /// Because this is an option, the above two restrictions are not enforced by /// the protocol compiler. #[prost(bool, optional, tag="1", default="false")] pub message_set_wire_format: ::std::option::Option<bool>, /// Disables the generation of the standard "descriptor()" accessor, which can /// conflict with a field of the same name. This is meant to make migration /// from proto1 easier; new code should avoid fields named "descriptor". #[prost(bool, optional, tag="2", default="false")] pub no_standard_descriptor_accessor: ::std::option::Option<bool>, /// Is this message deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the message, or it will be completely ignored; in the very least, /// this is a formalization for deprecating messages. #[prost(bool, optional, tag="3", default="false")] pub deprecated: ::std::option::Option<bool>, /// Whether the message is an automatically generated map entry type for the /// maps field. /// /// For maps fields: /// map<KeyType, ValueType> map_field = 1; /// The parsed descriptor looks like: /// message MapFieldEntry { /// option map_entry = true; /// optional KeyType key = 1; /// optional ValueType value = 2; /// } /// repeated MapFieldEntry map_field = 1; /// /// Implementations may choose not to generate the map_entry=true message, but /// use a native map in the target language to hold the keys and values. /// The reflection APIs in such implementations still need to work as /// if the field is a repeated message field. /// /// NOTE: Do not set the option in .proto files. Always use the maps syntax /// instead. The option should only be implicitly set by the proto compiler /// parser. #[prost(bool, optional, tag="7")] pub map_entry: ::std::option::Option<bool>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldOptions { /// The ctype option instructs the C++ code generator to use a different /// representation of the field than it normally would. See the specific /// options below. This option is not yet implemented in the open source /// release -- sorry, we'll try to include it in a future version! #[prost(enumeration="field_options::CType", optional, tag="1", default="String")] pub ctype: ::std::option::Option<i32>, /// The packed option can be enabled for repeated primitive fields to enable /// a more efficient representation on the wire. Rather than repeatedly /// writing the tag and type for each element, the entire array is encoded as /// a single length-delimited blob. In proto3, only explicit setting it to /// false will avoid using packed encoding. #[prost(bool, optional, tag="2")] pub packed: ::std::option::Option<bool>, /// The jstype option determines the JavaScript type used for values of the /// field. The option is permitted only for 64 bit integral and fixed types /// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING /// is represented as JavaScript string, which avoids loss of precision that /// can happen when a large value is converted to a floating point JavaScript. /// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to /// use the JavaScript "number" type. The behavior of the default option /// JS_NORMAL is implementation dependent. /// /// This option is an enum to permit additional types to be added, e.g. /// goog.math.Integer. #[prost(enumeration="field_options::JsType", optional, tag="6", default="JsNormal")] pub jstype: ::std::option::Option<i32>, /// Should this field be parsed lazily? Lazy applies only to message-type /// fields. It means that when the outer message is initially parsed, the /// inner message's contents will not be parsed but instead stored in encoded /// form. The inner message will actually be parsed when it is first accessed. /// /// This is only a hint. Implementations are free to choose whether to use /// eager or lazy parsing regardless of the value of this option. However, /// setting this option true suggests that the protocol author believes that /// using lazy parsing on this field is worth the additional bookkeeping /// overhead typically needed to implement it. /// /// This option does not affect the public interface of any generated code; /// all method signatures remain the same. Furthermore, thread-safety of the /// interface is not affected by this option; const methods remain safe to /// call from multiple threads concurrently, while non-const methods continue /// to require exclusive access. /// /// /// Note that implementations may choose not to check required fields within /// a lazy sub-message. That is, calling IsInitialized() on the outer message /// may return true even if the inner message has missing required fields. /// This is necessary because otherwise the inner message would have to be /// parsed in order to perform the check, defeating the purpose of lazy /// parsing. An implementation which chooses not to check required fields /// must be consistent about it. That is, for any particular sub-message, the /// implementation must either *always* check its required fields, or *never* /// check its required fields, regardless of whether or not the message has /// been parsed. #[prost(bool, optional, tag="5", default="false")] pub lazy: ::std::option::Option<bool>, /// Is this field deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for accessors, or it will be completely ignored; in the very least, this /// is a formalization for deprecating fields. #[prost(bool, optional, tag="3", default="false")] pub deprecated: ::std::option::Option<bool>, /// For Google-internal migration only. Do not use. #[prost(bool, optional, tag="10", default="false")] pub weak: ::std::option::Option<bool>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } pub mod field_options { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum CType { /// Default mode. String = 0, Cord = 1, StringPiece = 2, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum JsType { /// Use the default type. JsNormal = 0, /// Use JavaScript strings. JsString = 1, /// Use JavaScript numbers. JsNumber = 2, } } #[derive(Clone, PartialEq, ::prost::Message)] pub struct OneofOptions { /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumOptions { /// Set this option to true to allow mapping different tag names to the same /// value. #[prost(bool, optional, tag="2")] pub allow_alias: ::std::option::Option<bool>, /// Is this enum deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the enum, or it will be completely ignored; in the very least, this /// is a formalization for deprecating enums. #[prost(bool, optional, tag="3", default="false")] pub deprecated: ::std::option::Option<bool>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumValueOptions { /// Is this enum value deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the enum value, or it will be completely ignored; in the very least, /// this is a formalization for deprecating enum values. #[prost(bool, optional, tag="1", default="false")] pub deprecated: ::std::option::Option<bool>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. /// Is this service deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the service, or it will be completely ignored; in the very least, /// this is a formalization for deprecating services. #[prost(bool, optional, tag="33", default="false")] pub deprecated: ::std::option::Option<bool>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } #[derive(Clone, PartialEq, ::prost::Message)] pub struct MethodOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. /// Is this method deprecated? /// Depending on the target platform, this can emit Deprecated annotations /// for the method, or it will be completely ignored; in the very least, /// this is a formalization for deprecating methods. #[prost(bool, optional, tag="33", default="false")] pub deprecated: ::std::option::Option<bool>, #[prost(enumeration="method_options::IdempotencyLevel", optional, tag="34", default="IdempotencyUnknown")] pub idempotency_level: ::std::option::Option<i32>, /// The parser stores options it doesn't recognize here. See above. #[prost(message, repeated, tag="999")] pub uninterpreted_option: ::std::vec::Vec<UninterpretedOption>, } pub mod method_options { /// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, /// or neither? HTTP based RPC implementation may choose GET verb for safe /// methods, and PUT verb for idempotent methods instead of the default POST. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum IdempotencyLevel { IdempotencyUnknown = 0, /// implies idempotent NoSideEffects = 1, /// idempotent, but may have side effects Idempotent = 2, } } /// A message representing a option the parser does not recognize. This only /// appears in options protos created by the compiler::Parser class. /// DescriptorPool resolves these when building Descriptor objects. Therefore, /// options protos in descriptor objects (e.g. returned by Descriptor::options(), /// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions /// in them. #[derive(Clone, PartialEq, ::prost::Message)] pub struct UninterpretedOption { #[prost(message, repeated, tag="2")] pub name: ::std::vec::Vec<uninterpreted_option::NamePart>, /// The value of the uninterpreted option, in whatever type the tokenizer /// identified it as during parsing. Exactly one of these should be set. #[prost(string, optional, tag="3")] pub identifier_value: ::std::option::Option<std::string::String>, #[prost(uint64, optional, tag="4")] pub positive_int_value: ::std::option::Option<u64>, #[prost(int64, optional, tag="5")] pub negative_int_value: ::std::option::Option<i64>, #[prost(double, optional, tag="6")] pub double_value: ::std::option::Option<f64>, #[prost(bytes, optional, tag="7")] pub string_value: ::std::option::Option<std::vec::Vec<u8>>, #[prost(string, optional, tag="8")] pub aggregate_value: ::std::option::Option<std::string::String>, } pub mod uninterpreted_option { /// The name of the uninterpreted option. Each string represents a segment in /// a dot-separated name. is_extension is true iff a segment represents an /// extension (denoted with parentheses in options specs in .proto files). /// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents /// "foo.(bar.baz).qux". #[derive(Clone, PartialEq, ::prost::Message)] pub struct NamePart { #[prost(string, required, tag="1")] pub name_part: std::string::String, #[prost(bool, required, tag="2")] pub is_extension: bool, } } // =================================================================== // Optional source code info /// Encapsulates information about the original source file from which a /// FileDescriptorProto was generated. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceCodeInfo { /// A Location identifies a piece of source code in a .proto file which /// corresponds to a particular definition. This information is intended /// to be useful to IDEs, code indexers, documentation generators, and similar /// tools. /// /// For example, say we have a file like: /// message Foo { /// optional string foo = 1; /// } /// Let's look at just the field definition: /// optional string foo = 1; /// ^ ^^ ^^ ^ ^^^ /// a bc de f ghi /// We have the following locations: /// span path represents /// [a,i) [ 4, 0, 2, 0 ] The whole field definition. /// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). /// [c,d) [ 4, 0, 2, 0, 5 ] The type (string). /// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). /// [g,h) [ 4, 0, 2, 0, 3 ] The number (1). /// /// Notes: /// - A location may refer to a repeated field itself (i.e. not to any /// particular index within it). This is used whenever a set of elements are /// logically enclosed in a single code segment. For example, an entire /// extend block (possibly containing multiple extension definitions) will /// have an outer location whose path refers to the "extensions" repeated /// field without an index. /// - Multiple locations may have the same path. This happens when a single /// logical declaration is spread out across multiple places. The most /// obvious example is the "extend" block again -- there may be multiple /// extend blocks in the same scope, each of which will have the same path. /// - A location's span is not always a subset of its parent's span. For /// example, the "extendee" of an extension declaration appears at the /// beginning of the "extend" block and is shared by all extensions within /// the block. /// - Just because a location's span is a subset of some other location's span /// does not mean that it is a descendant. For example, a "group" defines /// both a type and a field in a single declaration. Thus, the locations /// corresponding to the type and field and their components will overlap. /// - Code which tries to interpret locations should probably be designed to /// ignore those that it doesn't understand, as more types of locations could /// be recorded in the future. #[prost(message, repeated, tag="1")] pub location: ::std::vec::Vec<source_code_info::Location>, } pub mod source_code_info { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Location { /// Identifies which part of the FileDescriptorProto was defined at this /// location. /// /// Each element is a field number or an index. They form a path from /// the root FileDescriptorProto to the place where the definition. For /// example, this path: /// [ 4, 3, 2, 7, 1 ] /// refers to: /// file.message_type(3) // 4, 3 /// .field(7) // 2, 7 /// .name() // 1 /// This is because FileDescriptorProto.message_type has field number 4: /// repeated DescriptorProto message_type = 4; /// and DescriptorProto.field has field number 2: /// repeated FieldDescriptorProto field = 2; /// and FieldDescriptorProto.name has field number 1: /// optional string name = 1; /// /// Thus, the above path gives the location of a field name. If we removed /// the last element: /// [ 4, 3, 2, 7 ] /// this path refers to the whole field declaration (from the beginning /// of the label to the terminating semicolon). #[prost(int32, repeated, tag="1")] pub path: ::std::vec::Vec<i32>, /// Always has exactly three or four elements: start line, start column, /// end line (optional, otherwise assumed same as start line), end column. /// These are packed into a single field for efficiency. Note that line /// and column numbers are zero-based -- typically you will want to add /// 1 to each before displaying to a user. #[prost(int32, repeated, tag="2")] pub span: ::std::vec::Vec<i32>, /// If this SourceCodeInfo represents a complete declaration, these are any /// comments appearing before and after the declaration which appear to be /// attached to the declaration. /// /// A series of line comments appearing on consecutive lines, with no other /// tokens appearing on those lines, will be treated as a single comment. /// /// leading_detached_comments will keep paragraphs of comments that appear /// before (but not connected to) the current element. Each paragraph, /// separated by empty lines, will be one comment element in the repeated /// field. /// /// Only the comment content is provided; comment markers (e.g. //) are /// stripped out. For block comments, leading whitespace and an asterisk /// will be stripped from the beginning of each line other than the first. /// Newlines are included in the output. /// /// Examples: /// /// optional int32 foo = 1; // Comment attached to foo. /// // Comment attached to bar. /// optional int32 bar = 2; /// /// optional string baz = 3; /// // Comment attached to baz. /// // Another line attached to baz. /// /// // Comment attached to qux. /// // /// // Another line attached to qux. /// optional double qux = 4; /// /// // Detached comment for corge. This is not leading or trailing comments /// // to qux or corge because there are blank lines separating it from /// // both. /// /// // Detached comment for corge paragraph 2. /// /// optional string corge = 5; /// /* Block comment attached /// * to corge. Leading asterisks /// * will be removed. */ /// /* Block comment attached to /// * grault. */ /// optional int32 grault = 6; /// /// // ignored detached comments. #[prost(string, optional, tag="3")] pub leading_comments: ::std::option::Option<std::string::String>, #[prost(string, optional, tag="4")] pub trailing_comments: ::std::option::Option<std::string::String>, #[prost(string, repeated, tag="6")] pub leading_detached_comments: ::std::vec::Vec<std::string::String>, } } /// Describes the relationship between generated code and its original source /// file. A GeneratedCodeInfo message is associated with only one generated /// source file, but may contain references to different source .proto files. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GeneratedCodeInfo { /// An Annotation connects some span of text in generated code to an element /// of its generating .proto file. #[prost(message, repeated, tag="1")] pub annotation: ::std::vec::Vec<generated_code_info::Annotation>, } pub mod generated_code_info { #[derive(Clone, PartialEq, ::prost::Message)] pub struct Annotation { /// Identifies the element in the original source .proto file. This field /// is formatted the same as SourceCodeInfo.Location.path. #[prost(int32, repeated, tag="1")] pub path: ::std::vec::Vec<i32>, /// Identifies the filesystem path to the original source .proto. #[prost(string, optional, tag="2")] pub source_file: ::std::option::Option<std::string::String>, /// Identifies the starting offset in bytes in the generated code /// that relates to the identified object. #[prost(int32, optional, tag="3")] pub begin: ::std::option::Option<i32>, /// Identifies the ending offset in bytes in the generated code that /// relates to the identified offset. The end offset should be one past /// the last relevant byte (so the length of the text = end - begin). #[prost(int32, optional, tag="4")] pub end: ::std::option::Option<i32>, } } /// `Any` contains an arbitrary serialized protocol buffer message along with a /// URL that describes the type of the serialized message. /// /// Protobuf library provides support to pack/unpack Any values in the form /// of utility functions or additional generated methods of the Any type. /// /// Example 1: Pack and unpack a message in C++. /// /// Foo foo = ...; /// Any any; /// any.PackFrom(foo); /// ... /// if (any.UnpackTo(&foo)) { /// ... /// } /// /// Example 2: Pack and unpack a message in Java. /// /// Foo foo = ...; /// Any any = Any.pack(foo); /// ... /// if (any.is(Foo.class)) { /// foo = any.unpack(Foo.class); /// } /// /// Example 3: Pack and unpack a message in Python. /// /// foo = Foo(...) /// any = Any() /// any.Pack(foo) /// ... /// if any.Is(Foo.DESCRIPTOR): /// any.Unpack(foo) /// ... /// /// Example 4: Pack and unpack a message in Go /// /// foo := &pb.Foo{...} /// any, err := ptypes.MarshalAny(foo) /// ... /// foo := &pb.Foo{} /// if err := ptypes.UnmarshalAny(any, foo); err != nil { /// ... /// } /// /// The pack methods provided by protobuf library will by default use /// 'type.googleapis.com/full.type.name' as the type URL and the unpack /// methods only use the fully qualified type name after the last '/' /// in the type URL, for example "foo.bar.com/x/y.z" will yield type /// name "y.z". /// /// /// JSON /// ==== /// The JSON representation of an `Any` value uses the regular /// representation of the deserialized, embedded message, with an /// additional field `@type` which contains the type URL. Example: /// /// package google.profile; /// message Person { /// string first_name = 1; /// string last_name = 2; /// } /// /// { /// "@type": "type.googleapis.com/google.profile.Person", /// "firstName": <string>, /// "lastName": <string> /// } /// /// If the embedded message type is well-known and has a custom JSON /// representation, that representation will be embedded adding a field /// `value` which holds the custom JSON in addition to the `@type` /// field. Example (for message [google.protobuf.Duration][]): /// /// { /// "@type": "type.googleapis.com/google.protobuf.Duration", /// "value": "1.212s" /// } /// #[derive(Clone, PartialEq, ::prost::Message)] pub struct Any { /// A URL/resource name that uniquely identifies the type of the serialized /// protocol buffer message. This string must contain at least /// one "/" character. The last segment of the URL's path must represent /// the fully qualified name of the type (as in /// `path/google.protobuf.Duration`). The name should be in a canonical form /// (e.g., leading "." is not accepted). /// /// In practice, teams usually precompile into the binary all types that they /// expect it to use in the context of Any. However, for URLs which use the /// scheme `http`, `https`, or no scheme, one can optionally set up a type /// server that maps type URLs to message definitions as follows: /// /// * If no scheme is provided, `https` is assumed. /// * An HTTP GET on the URL must yield a [google.protobuf.Type][] /// value in binary format, or produce an error. /// * Applications are allowed to cache lookup results based on the /// URL, or have them precompiled into a binary to avoid any /// lookup. Therefore, binary compatibility needs to be preserved /// on changes to types. (Use versioned type names to manage /// breaking changes.) /// /// Note: this functionality is not currently available in the official /// protobuf release, and it is not used for type URLs beginning with /// type.googleapis.com. /// /// Schemes other than `http`, `https` (or the empty scheme) might be /// used with implementation specific semantics. /// #[prost(string, tag="1")] pub type_url: std::string::String, /// Must be a valid serialized protocol buffer of the above specified type. #[prost(bytes, tag="2")] pub value: std::vec::Vec<u8>, } /// `SourceContext` represents information about the source of a /// protobuf element, like the file in which it is defined. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SourceContext { /// The path-qualified name of the .proto file that contained the associated /// protobuf element. For example: `"google/protobuf/source_context.proto"`. #[prost(string, tag="1")] pub file_name: std::string::String, } /// A protocol buffer message type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Type { /// The fully qualified message name. #[prost(string, tag="1")] pub name: std::string::String, /// The list of fields. #[prost(message, repeated, tag="2")] pub fields: ::std::vec::Vec<Field>, /// The list of types appearing in `oneof` definitions in this type. #[prost(string, repeated, tag="3")] pub oneofs: ::std::vec::Vec<std::string::String>, /// The protocol buffer options. #[prost(message, repeated, tag="4")] pub options: ::std::vec::Vec<Option>, /// The source context. #[prost(message, optional, tag="5")] pub source_context: ::std::option::Option<SourceContext>, /// The source syntax. #[prost(enumeration="Syntax", tag="6")] pub syntax: i32, } /// A single field of a message type. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Field { /// The field type. #[prost(enumeration="field::Kind", tag="1")] pub kind: i32, /// The field cardinality. #[prost(enumeration="field::Cardinality", tag="2")] pub cardinality: i32, /// The field number. #[prost(int32, tag="3")] pub number: i32, /// The field name. #[prost(string, tag="4")] pub name: std::string::String, /// The field type URL, without the scheme, for message or enumeration /// types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. #[prost(string, tag="6")] pub type_url: std::string::String, /// The index of the field type in `Type.oneofs`, for message or enumeration /// types. The first type has index 1; zero means the type is not in the list. #[prost(int32, tag="7")] pub oneof_index: i32, /// Whether to use alternative packed wire representation. #[prost(bool, tag="8")] pub packed: bool, /// The protocol buffer options. #[prost(message, repeated, tag="9")] pub options: ::std::vec::Vec<Option>, /// The field JSON name. #[prost(string, tag="10")] pub json_name: std::string::String, /// The string value of the default value of this field. Proto2 syntax only. #[prost(string, tag="11")] pub default_value: std::string::String, } pub mod field { /// Basic field types. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Kind { /// Field type unknown. TypeUnknown = 0, /// Field type double. TypeDouble = 1, /// Field type float. TypeFloat = 2, /// Field type int64. TypeInt64 = 3, /// Field type uint64. TypeUint64 = 4, /// Field type int32. TypeInt32 = 5, /// Field type fixed64. TypeFixed64 = 6, /// Field type fixed32. TypeFixed32 = 7, /// Field type bool. TypeBool = 8, /// Field type string. TypeString = 9, /// Field type group. Proto2 syntax only, and deprecated. TypeGroup = 10, /// Field type message. TypeMessage = 11, /// Field type bytes. TypeBytes = 12, /// Field type uint32. TypeUint32 = 13, /// Field type enum. TypeEnum = 14, /// Field type sfixed32. TypeSfixed32 = 15, /// Field type sfixed64. TypeSfixed64 = 16, /// Field type sint32. TypeSint32 = 17, /// Field type sint64. TypeSint64 = 18, } /// Whether a field is optional, required, or repeated. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Cardinality { /// For fields with unknown cardinality. Unknown = 0, /// For optional fields. Optional = 1, /// For required fields. Proto2 syntax only. Required = 2, /// For repeated fields. Repeated = 3, } } /// Enum type definition. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Enum { /// Enum type name. #[prost(string, tag="1")] pub name: std::string::String, /// Enum value definitions. #[prost(message, repeated, tag="2")] pub enumvalue: ::std::vec::Vec<EnumValue>, /// Protocol buffer options. #[prost(message, repeated, tag="3")] pub options: ::std::vec::Vec<Option>, /// The source context. #[prost(message, optional, tag="4")] pub source_context: ::std::option::Option<SourceContext>, /// The source syntax. #[prost(enumeration="Syntax", tag="5")] pub syntax: i32, } /// Enum value definition. #[derive(Clone, PartialEq, ::prost::Message)] pub struct EnumValue { /// Enum value name. #[prost(string, tag="1")] pub name: std::string::String, /// Enum value number. #[prost(int32, tag="2")] pub number: i32, /// Protocol buffer options. #[prost(message, repeated, tag="3")] pub options: ::std::vec::Vec<Option>, } /// A protocol buffer option, which can be attached to a message, field, /// enumeration, etc. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Option { /// The option's name. For protobuf built-in options (options defined in /// descriptor.proto), this is the short name. For example, `"map_entry"`. /// For custom options, it should be the fully-qualified name. For example, /// `"google.api.http"`. #[prost(string, tag="1")] pub name: std::string::String, /// The option's value packed in an Any message. If the value is a primitive, /// the corresponding wrapper type defined in google/protobuf/wrappers.proto /// should be used. If the value is an enum, it should be stored as an int32 /// value using the google.protobuf.Int32Value type. #[prost(message, optional, tag="2")] pub value: ::std::option::Option<Any>, } /// The syntax in which a protocol buffer element is defined. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Syntax { /// Syntax `proto2`. Proto2 = 0, /// Syntax `proto3`. Proto3 = 1, } /// Api is a light-weight descriptor for an API Interface. /// /// Interfaces are also described as "protocol buffer services" in some contexts, /// such as by the "service" keyword in a .proto file, but they are different /// from API Services, which represent a concrete implementation of an interface /// as opposed to simply a description of methods and bindings. They are also /// sometimes simply referred to as "APIs" in other contexts, such as the name of /// this message itself. See https://cloud.google.com/apis/design/glossary for /// detailed terminology. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Api { /// The fully qualified name of this interface, including package name /// followed by the interface's simple name. #[prost(string, tag="1")] pub name: std::string::String, /// The methods of this interface, in unspecified order. #[prost(message, repeated, tag="2")] pub methods: ::std::vec::Vec<Method>, /// Any metadata attached to the interface. #[prost(message, repeated, tag="3")] pub options: ::std::vec::Vec<Option>, /// A version string for this interface. If specified, must have the form /// `major-version.minor-version`, as in `1.10`. If the minor version is /// omitted, it defaults to zero. If the entire version field is empty, the /// major version is derived from the package name, as outlined below. If the /// field is not empty, the version in the package name will be verified to be /// consistent with what is provided here. /// /// The versioning schema uses [semantic /// versioning](http://semver.org) where the major version number /// indicates a breaking change and the minor version an additive, /// non-breaking change. Both version numbers are signals to users /// what to expect from different versions, and should be carefully /// chosen based on the product plan. /// /// The major version is also reflected in the package name of the /// interface, which must end in `v<major-version>`, as in /// `google.feature.v1`. For major versions 0 and 1, the suffix can /// be omitted. Zero major versions must only be used for /// experimental, non-GA interfaces. /// /// #[prost(string, tag="4")] pub version: std::string::String, /// Source context for the protocol buffer service represented by this /// message. #[prost(message, optional, tag="5")] pub source_context: ::std::option::Option<SourceContext>, /// Included interfaces. See [Mixin][]. #[prost(message, repeated, tag="6")] pub mixins: ::std::vec::Vec<Mixin>, /// The source syntax of the service. #[prost(enumeration="Syntax", tag="7")] pub syntax: i32, } /// Method represents a method of an API interface. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Method { /// The simple name of this method. #[prost(string, tag="1")] pub name: std::string::String, /// A URL of the input message type. #[prost(string, tag="2")] pub request_type_url: std::string::String, /// If true, the request is streamed. #[prost(bool, tag="3")] pub request_streaming: bool, /// The URL of the output message type. #[prost(string, tag="4")] pub response_type_url: std::string::String, /// If true, the response is streamed. #[prost(bool, tag="5")] pub response_streaming: bool, /// Any metadata attached to the method. #[prost(message, repeated, tag="6")] pub options: ::std::vec::Vec<Option>, /// The source syntax of this method. #[prost(enumeration="Syntax", tag="7")] pub syntax: i32, } /// Declares an API Interface to be included in this interface. The including /// interface must redeclare all the methods from the included interface, but /// documentation and options are inherited as follows: /// /// - If after comment and whitespace stripping, the documentation /// string of the redeclared method is empty, it will be inherited /// from the original method. /// /// - Each annotation belonging to the service config (http, /// visibility) which is not set in the redeclared method will be /// inherited. /// /// - If an http annotation is inherited, the path pattern will be /// modified as follows. Any version prefix will be replaced by the /// version of the including interface plus the [root][] path if /// specified. /// /// Example of a simple mixin: /// /// package google.acl.v1; /// service AccessControl { /// // Get the underlying ACL object. /// rpc GetAcl(GetAclRequest) returns (Acl) { /// option (google.api.http).get = "/v1/{resource=**}:getAcl"; /// } /// } /// /// package google.storage.v2; /// service Storage { /// rpc GetAcl(GetAclRequest) returns (Acl); /// /// // Get a data record. /// rpc GetData(GetDataRequest) returns (Data) { /// option (google.api.http).get = "/v2/{resource=**}"; /// } /// } /// /// Example of a mixin configuration: /// /// apis: /// - name: google.storage.v2.Storage /// mixins: /// - name: google.acl.v1.AccessControl /// /// The mixin construct implies that all methods in `AccessControl` are /// also declared with same name and request/response types in /// `Storage`. A documentation generator or annotation processor will /// see the effective `Storage.GetAcl` method after inherting /// documentation and annotations as follows: /// /// service Storage { /// // Get the underlying ACL object. /// rpc GetAcl(GetAclRequest) returns (Acl) { /// option (google.api.http).get = "/v2/{resource=**}:getAcl"; /// } /// ... /// } /// /// Note how the version in the path pattern changed from `v1` to `v2`. /// /// If the `root` field in the mixin is specified, it should be a /// relative path under which inherited HTTP paths are placed. Example: /// /// apis: /// - name: google.storage.v2.Storage /// mixins: /// - name: google.acl.v1.AccessControl /// root: acls /// /// This implies the following inherited HTTP annotation: /// /// service Storage { /// // Get the underlying ACL object. /// rpc GetAcl(GetAclRequest) returns (Acl) { /// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; /// } /// ... /// } #[derive(Clone, PartialEq, ::prost::Message)] pub struct Mixin { /// The fully qualified name of the interface which is included. #[prost(string, tag="1")] pub name: std::string::String, /// If non-empty specifies a path under which inherited HTTP paths /// are rooted. #[prost(string, tag="2")] pub root: std::string::String, } /// A Duration represents a signed, fixed-length span of time represented /// as a count of seconds and fractions of seconds at nanosecond /// resolution. It is independent of any calendar and concepts like "day" /// or "month". It is related to Timestamp in that the difference between /// two Timestamp values is a Duration and it can be added or subtracted /// from a Timestamp. Range is approximately +-10,000 years. /// /// # Examples /// /// Example 1: Compute Duration from two Timestamps in pseudo code. /// /// Timestamp start = ...; /// Timestamp end = ...; /// Duration duration = ...; /// /// duration.seconds = end.seconds - start.seconds; /// duration.nanos = end.nanos - start.nanos; /// /// if (duration.seconds < 0 && duration.nanos > 0) { /// duration.seconds += 1; /// duration.nanos -= 1000000000; /// } else if (duration.seconds > 0 && duration.nanos < 0) { /// duration.seconds -= 1; /// duration.nanos += 1000000000; /// } /// /// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. /// /// Timestamp start = ...; /// Duration duration = ...; /// Timestamp end = ...; /// /// end.seconds = start.seconds + duration.seconds; /// end.nanos = start.nanos + duration.nanos; /// /// if (end.nanos < 0) { /// end.seconds -= 1; /// end.nanos += 1000000000; /// } else if (end.nanos >= 1000000000) { /// end.seconds += 1; /// end.nanos -= 1000000000; /// } /// /// Example 3: Compute Duration from datetime.timedelta in Python. /// /// td = datetime.timedelta(days=3, minutes=10) /// duration = Duration() /// duration.FromTimedelta(td) /// /// # JSON Mapping /// /// In JSON format, the Duration type is encoded as a string rather than an /// object, where the string ends in the suffix "s" (indicating seconds) and /// is preceded by the number of seconds, with nanoseconds expressed as /// fractional seconds. For example, 3 seconds with 0 nanoseconds should be /// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should /// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 /// microsecond should be expressed in JSON format as "3.000001s". /// /// #[derive(Clone, PartialEq, ::prost::Message)] pub struct Duration { /// Signed seconds of the span of time. Must be from -315,576,000,000 /// to +315,576,000,000 inclusive. Note: these bounds are computed from: /// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years #[prost(int64, tag="1")] pub seconds: i64, /// Signed fractions of a second at nanosecond resolution of the span /// of time. Durations less than one second are represented with a 0 /// `seconds` field and a positive or negative `nanos` field. For durations /// of one second or more, a non-zero value for the `nanos` field must be /// of the same sign as the `seconds` field. Must be from -999,999,999 /// to +999,999,999 inclusive. #[prost(int32, tag="2")] pub nanos: i32, } /// `FieldMask` represents a set of symbolic field paths, for example: /// /// paths: "f.a" /// paths: "f.b.d" /// /// Here `f` represents a field in some root message, `a` and `b` /// fields in the message found in `f`, and `d` a field found in the /// message in `f.b`. /// /// Field masks are used to specify a subset of fields that should be /// returned by a get operation or modified by an update operation. /// Field masks also have a custom JSON encoding (see below). /// /// # Field Masks in Projections /// /// When used in the context of a projection, a response message or /// sub-message is filtered by the API to only contain those fields as /// specified in the mask. For example, if the mask in the previous /// example is applied to a response message as follows: /// /// f { /// a : 22 /// b { /// d : 1 /// x : 2 /// } /// y : 13 /// } /// z: 8 /// /// The result will not contain specific values for fields x,y and z /// (their value will be set to the default, and omitted in proto text /// output): /// /// /// f { /// a : 22 /// b { /// d : 1 /// } /// } /// /// A repeated field is not allowed except at the last position of a /// paths string. /// /// If a FieldMask object is not present in a get operation, the /// operation applies to all fields (as if a FieldMask of all fields /// had been specified). /// /// Note that a field mask does not necessarily apply to the /// top-level response message. In case of a REST get operation, the /// field mask applies directly to the response, but in case of a REST /// list operation, the mask instead applies to each individual message /// in the returned resource list. In case of a REST custom method, /// other definitions may be used. Where the mask applies will be /// clearly documented together with its declaration in the API. In /// any case, the effect on the returned resource/resources is required /// behavior for APIs. /// /// # Field Masks in Update Operations /// /// A field mask in update operations specifies which fields of the /// targeted resource are going to be updated. The API is required /// to only change the values of the fields as specified in the mask /// and leave the others untouched. If a resource is passed in to /// describe the updated values, the API ignores the values of all /// fields not covered by the mask. /// /// If a repeated field is specified for an update operation, new values will /// be appended to the existing repeated field in the target resource. Note that /// a repeated field is only allowed in the last position of a `paths` string. /// /// If a sub-message is specified in the last position of the field mask for an /// update operation, then new value will be merged into the existing sub-message /// in the target resource. /// /// For example, given the target message: /// /// f { /// b { /// d: 1 /// x: 2 /// } /// c: [1] /// } /// /// And an update message: /// /// f { /// b { /// d: 10 /// } /// c: [2] /// } /// /// then if the field mask is: /// /// paths: ["f.b", "f.c"] /// /// then the result will be: /// /// f { /// b { /// d: 10 /// x: 2 /// } /// c: [1, 2] /// } /// /// An implementation may provide options to override this default behavior for /// repeated and message fields. /// /// In order to reset a field's value to the default, the field must /// be in the mask and set to the default value in the provided resource. /// Hence, in order to reset all fields of a resource, provide a default /// instance of the resource and set all fields in the mask, or do /// not provide a mask as described below. /// /// If a field mask is not present on update, the operation applies to /// all fields (as if a field mask of all fields has been specified). /// Note that in the presence of schema evolution, this may mean that /// fields the client does not know and has therefore not filled into /// the request will be reset to their default. If this is unwanted /// behavior, a specific service may require a client to always specify /// a field mask, producing an error if not. /// /// As with get operations, the location of the resource which /// describes the updated values in the request message depends on the /// operation kind. In any case, the effect of the field mask is /// required to be honored by the API. /// /// ## Considerations for HTTP REST /// /// The HTTP kind of an update operation which uses a field mask must /// be set to PATCH instead of PUT in order to satisfy HTTP semantics /// (PUT must only be used for full updates). /// /// # JSON Encoding of Field Masks /// /// In JSON, a field mask is encoded as a single string where paths are /// separated by a comma. Fields name in each path are converted /// to/from lower-camel naming conventions. /// /// As an example, consider the following message declarations: /// /// message Profile { /// User user = 1; /// Photo photo = 2; /// } /// message User { /// string display_name = 1; /// string address = 2; /// } /// /// In proto a field mask for `Profile` may look as such: /// /// mask { /// paths: "user.display_name" /// paths: "photo" /// } /// /// In JSON, the same mask is represented as below: /// /// { /// mask: "user.displayName,photo" /// } /// /// # Field Masks and Oneof Fields /// /// Field masks treat fields in oneofs just as regular fields. Consider the /// following message: /// /// message SampleMessage { /// oneof test_oneof { /// string name = 4; /// SubMessage sub_message = 9; /// } /// } /// /// The field mask can be: /// /// mask { /// paths: "name" /// } /// /// Or: /// /// mask { /// paths: "sub_message" /// } /// /// Note that oneof type names ("test_oneof" in this case) cannot be used in /// paths. /// /// ## Field Mask Verification /// /// The implementation of any API method which has a FieldMask type field in the /// request should verify the included field paths, and return an /// `INVALID_ARGUMENT` error if any path is unmappable. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FieldMask { /// The set of field mask paths. #[prost(string, repeated, tag="1")] pub paths: ::std::vec::Vec<std::string::String>, } /// `Struct` represents a structured data value, consisting of fields /// which map to dynamically typed values. In some languages, `Struct` /// might be supported by a native representation. For example, in /// scripting languages like JS a struct is represented as an /// object. The details of that representation are described together /// with the proto support for the language. /// /// The JSON representation for `Struct` is JSON object. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Struct { /// Unordered map of dynamically typed values. #[prost(btree_map="string, message", tag="1")] pub fields: ::std::collections::BTreeMap<std::string::String, Value>, } /// `Value` represents a dynamically typed value which can be either /// null, a number, a string, a boolean, a recursive struct value, or a /// list of values. A producer of value is expected to set one of that /// variants, absence of any variant indicates an error. /// /// The JSON representation for `Value` is JSON value. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Value { /// The kind of value. #[prost(oneof="value::Kind", tags="1, 2, 3, 4, 5, 6")] pub kind: ::std::option::Option<value::Kind>, } pub mod value { /// The kind of value. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { /// Represents a null value. #[prost(enumeration="super::NullValue", tag="1")] NullValue(i32), /// Represents a double value. #[prost(double, tag="2")] NumberValue(f64), /// Represents a string value. #[prost(string, tag="3")] StringValue(std::string::String), /// Represents a boolean value. #[prost(bool, tag="4")] BoolValue(bool), /// Represents a structured value. #[prost(message, tag="5")] StructValue(super::Struct), /// Represents a repeated `Value`. #[prost(message, tag="6")] ListValue(super::ListValue), } } /// `ListValue` is a wrapper around a repeated field of values. /// /// The JSON representation for `ListValue` is JSON array. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ListValue { /// Repeated field of dynamically typed values. #[prost(message, repeated, tag="1")] pub values: ::std::vec::Vec<Value>, } /// `NullValue` is a singleton enumeration to represent the null value for the /// `Value` type union. /// /// The JSON representation for `NullValue` is JSON `null`. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum NullValue { /// Null value. NullValue = 0, } /// A Timestamp represents a point in time independent of any time zone or local /// calendar, encoded as a count of seconds and fractions of seconds at /// nanosecond resolution. The count is relative to an epoch at UTC midnight on /// January 1, 1970, in the proleptic Gregorian calendar which extends the /// Gregorian calendar backwards to year one. /// /// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap /// second table is needed for interpretation, using a [24-hour linear /// smear](https://developers.google.com/time/smear). /// /// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By /// restricting to that range, we ensure that we can convert to and from [RFC /// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. /// /// # Examples /// /// Example 1: Compute Timestamp from POSIX `time()`. /// /// Timestamp timestamp; /// timestamp.set_seconds(time(NULL)); /// timestamp.set_nanos(0); /// /// Example 2: Compute Timestamp from POSIX `gettimeofday()`. /// /// struct timeval tv; /// gettimeofday(&tv, NULL); /// /// Timestamp timestamp; /// timestamp.set_seconds(tv.tv_sec); /// timestamp.set_nanos(tv.tv_usec * 1000); /// /// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. /// /// FILETIME ft; /// GetSystemTimeAsFileTime(&ft); /// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; /// /// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z /// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. /// Timestamp timestamp; /// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); /// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); /// /// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. /// /// long millis = System.currentTimeMillis(); /// /// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) /// .setNanos((int) ((millis % 1000) * 1000000)).build(); /// /// /// Example 5: Compute Timestamp from current time in Python. /// /// timestamp = Timestamp() /// timestamp.GetCurrentTime() /// /// # JSON Mapping /// /// In JSON format, the Timestamp type is encoded as a string in the /// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the /// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" /// where {year} is always expressed using four digits while {month}, {day}, /// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional /// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), /// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone /// is required. A proto3 JSON serializer should always use UTC (as indicated by /// "Z") when printing the Timestamp type and a proto3 JSON parser should be /// able to accept both UTC and other timezones (as indicated by an offset). /// /// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past /// 01:30 UTC on January 15, 2017. /// /// In JavaScript, one can convert a Date object to this format using the /// standard /// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) /// method. In Python, a standard `datetime.datetime` object can be converted /// to this format using /// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with /// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use /// the Joda Time's [`ISODateTimeFormat.dateTime()`]( /// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D /// ) to obtain a formatter capable of generating timestamps in this format. /// /// #[derive(Clone, PartialEq, ::prost::Message)] pub struct Timestamp { /// Represents seconds of UTC time since Unix epoch /// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to /// 9999-12-31T23:59:59Z inclusive. #[prost(int64, tag="1")] pub seconds: i64, /// Non-negative fractions of a second at nanosecond resolution. Negative /// second values with fractions must still have non-negative nanos values /// that count forward in time. Must be from 0 to 999,999,999 /// inclusive. #[prost(int32, tag="2")] pub nanos: i32, }
///
Setting.js
// All material copyright ESRI, All Rights Reserved, unless otherwise specified. // See http://js.arcgis.com/3.15/esri/copyright.txt and http://www.arcgis.com/apps/webappbuilder/copyright.txt for details. //>>built require({cache:{"jimu/dijit/LayerChooserFromMap":function(){define("dojo/on dojo/Evented dojo/_base/declare dijit/_WidgetBase dijit/_TemplatedMixin dijit/_WidgetsInTemplateMixin dojo/store/Memory dojo/Deferred dojo/store/Observable dijit/tree/ObjectStoreModel dojo/promise/all dojo/_base/lang dojo/_base/html dojo/_base/array jimu/utils jimu/dijit/_Tree jimu/LayerInfos/LayerInfos jimu/dijit/LoadingIndicator".split(" "),function(g,m,q,h,c,d,f,k,p,l,b,a,e,n,x,r,u,C){var t=q([h,c,d,m],{templateString:'\x3cdiv style\x3d"width:100%;"\x3e\x3cdiv data-dojo-attach-point\x3d"errorTipSection" class\x3d"error-tip-section"\x3e\x3cspan class\x3d"jimu-icon jimu-icon-error"\x3e\x3c/span\x3e\x3cspan class\x3d"jimu-state-error-text" data-dojo-attach-point\x3d"errTip"\x3e${nls.noLayersTip}\x3c/span\x3e\x3c/div\x3e\x3c/div\x3e', _store:null,_id:0,_treeClass:"layer-chooser-tree",createMapResponse:null,multiple:!1,onlyShowVisible:!1,updateWhenLayerInfosIsShowInMapChanged:!1,onlyShowWebMapLayers:!1,displayTooltipForTreeNode:!1,postMixInProperties:function(){this.nls=window.jimuNls.basicLayerChooserFromMap},postCreate:function(){this.inherited(arguments);e.addClass(this.domNode,"jimu-basic-layer-chooser-from-map");this.multiple=!!this.multiple;this.shelter=new C({hidden:!0});this.shelter.placeAt(this.domNode);this.shelter.startup(); this._createTree();this.basicFilter=a.hitch(this,this.basicFilter);this.filter=t.andCombineFilters([this.basicFilter,this.filter]);this.createMapResponse&&this.setCreateMapResponse(this.createMapResponse)},basicFilter:function(a){var b=new k;this.onlyShowVisible?b.resolve(a.isShowInMap()):b.resolve(!0);return b},filter:function(a){a=new k;a.resolve(!0);return a},getSelectedItems:function(){var b=this.tree.getSelectedItems();return n.map(b,a.hitch(this,function(a){return this.getHandledItem(a)}))}, getAllItems:function(){var b=this.tree.getAllItems(),e=[];n.forEach(b,a.hitch(this,function(a){"root"!==a.id&&(a=this.getHandledItem(a),e.push(a))}));return e},getHandledItem:function(a){return{name:a.name,layerInfo:a.layerInfo}},_isLeafItem:function(a){return a.isLeaf},setCreateMapResponse:function(b){this.createMapResponse=b;u.getInstance(this.createMapResponse.map,this.createMapResponse.itemInfo).then(a.hitch(this,function(b){this.layerInfosObj=b;this.own(g(this.layerInfosObj,"layerInfosChanged", a.hitch(this,this._onLayerInfosChanged)));this.updateWhenLayerInfosIsShowInMapChanged&&this.own(g(this.layerInfosObj,"layerInfosIsShowInMapChanged",a.hitch(this,this._onLayerInfosIsShowInMapChanged)));this._buildTree(this.layerInfosObj)}))},_onLayerInfosChanged:function(a,b){this._buildTree(this.layerInfosObj);this.emit("update")},_onLayerInfosIsShowInMapChanged:function(a){this._buildTree(this.layerInfosObj);this.emit("update")},_buildTree:function(b){this._clear();e.setStyle(this.errorTipSection, "display","block");var c=[];this.onlyShowWebMapLayers?(c=b.getLayerInfoArrayOfWebmap(),c=c.concat(b.getTableInfoArrayOfWebmap())):(c=b.getLayerInfoArray(),c=c.concat(b.getTableInfoArray()));0!==c.length&&(e.setStyle(this.errorTipSection,"display","none"),n.forEach(c,a.hitch(this,function(a){this._addDirectLayerInfo(a)})))},_addDirectLayerInfo:function(b){b&&b.getLayerObject().then(a.hitch(this,function(){this._addItem("root",b)}),a.hitch(this,function(a){console.error(a)}))},_clear:function(){var b= this._store.query({parent:"root"});n.forEach(b,a.hitch(this,function(a){a&&"root"!==a.id&&this._store.remove(a.id)}))},_addItem:function(e,c){var d=null,f=c.getLayerType(),z=this.filter(c);b({layerType:f,valid:z}).then(a.hitch(this,function(f){if(f.valid){var z=a.hitch(this,function(a,b){this._id++;d={name:c.title||"",parent:e,layerInfo:c,type:f.layerType,layerClass:c.layerObject.declaredClass,id:this._id.toString(),isLeaf:a,hasChildren:b};this._store.add(d)}),k=c.getSubLayers(),g=0===k.length;g? z(g,!1):(k=n.map(k,a.hitch(this,function(a){return this.filter(a)})),b(k).then(a.hitch(this,function(a){(a=n.some(a,function(a){return a}))&&z(g,a)})))}}))},_getRootItem:function(){return{id:"root",name:"Map Root",type:"root",isLeaf:!1,hasChildren:!0}},_createTree:function(){var b=this._getRootItem(),b=new f({data:[b],getChildren:function(a){return this.query({parent:a.id})}});this._store=new p(b);b=new l({store:this._store,query:{id:"root"},mayHaveChildren:a.hitch(this,this._mayHaveChildren)});this.tree= new r({multiple:this.multiple,model:b,showRoot:!1,isLeafItem:a.hitch(this,this._isLeafItem),style:{width:"100%"},onOpen:a.hitch(this,function(a,b){"root"!==a.id&&this._onTreeOpen(a,b)}),onClick:a.hitch(this,function(a,b,e){this._onTreeClick(a,b,e);this.emit("tree-click",a,b,e)}),getIconStyle:a.hitch(this,function(a,b){var e=null;if(!a||"root"===a.id)return null;var c={width:"20px",height:"20px",backgroundRepeat:"no-repeat",backgroundPosition:"center center",backgroundImage:""},d=window.location.protocol+ "//"+window.location.host+require.toUrl("jimu");if(a=this._getIconInfo(a,b).imageName)c.backgroundImage="url("+d+"/css/images/"+a+")",e=c;return e}),getIconClass:a.hitch(this,function(a,b){return this._getIconInfo(a,b).className}),getTooltip:a.hitch(this,function(a){return this.displayTooltipForTreeNode?a.layerInfo.title:""})});e.addClass(this.tree.domNode,this._treeClass);this.tree.placeAt(this.shelter.domNode,"before")},_mayHaveChildren:function(a){return a.hasChildren},_getIconInfo:function(a, b){var e="",c="";"ArcGISDynamicMapServiceLayer"===a.type||"ArcGISTiledMapServiceLayer"===a.type?b?(e="mapserver_open.png",c="mapservice-layer-icon open"):(e="mapserver_close.png",c="mapservice-layer-icon close"):"GroupLayer"===a.type?b?(e="group_layer2.png",c="group-layer-icon open"):(e="group_layer1.png",c="group-layer-icon close"):"FeatureLayer"===a.type?(a=x.getTypeByGeometryType(a.layerInfo.layerObject.geometryType),"point"===a?(e="point_layer1.png",c="point-layer-icon"):"polyline"===a?(e="line_layer1.png", c="line-layer-icon"):"polygon"===a&&(e="polygon_layer1.png",c="polygon-layer-icon")):"Table"===a.type?(e="table.png",c="table-icon"):"ArcGISImageServiceLayer"===a.type||"ArcGISImageServiceVectorLayer"===a.type?(e="image_layer.png",c="iamge-layer-icon"):b?(e="mapserver_open.png",c="mapservice-layer-icon open"):(e="mapserver_close.png",c="mapservice-layer-icon close");return{imageName:e,className:c}},_onTreeOpen:function(e,c){if("root"!==e.id){var d=[];c=[];d=e.layerInfo.getSubLayers();e.checked||(this.shelter.show(), c=n.map(d,a.hitch(this,function(a){return a.getLayerObject()})),b(c).then(a.hitch(this,function(){this.domNode&&(n.forEach(d,a.hitch(this,function(a){this._addItem(e.id,a)})),e.checked=!0,this.shelter.hide())}),a.hitch(this,function(a){console.error(a);this.shelter.hide()})))}},_onTreeClick:function(a,b,e){},destroy:function(){this.shelter&&(this.shelter.destroy(),this.shelter=null);this.tree&&this.tree.destroy();this.inherited(arguments)}});t.createFilterByLayerType=function(e){a.isArrayLike(e)|| (e=[]);return function(a){var c=new k;if(0===e.length)c.resolve(!0);else{var d=[];a.traversal(function(a){d.push(a.getLayerType())});b(d).then(function(a){for(var b=0;b<a.length;b++)for(var d=0;d<e.length;d++)if(a[b]===e[d]){c.resolve(!0);return}c.resolve(!1)},function(a){console.error(a);c.reject(a)})}return c}};t.createFeaturelayerFilter=function(a,e,c,d){var f=["point","polyline","polygon"];a&&0<a.length?(a=n.filter(a,function(a){return 0<=f.indexOf(a)}),0===a.length&&(a=f)):a=f;return function(f){var k= f.getLayerType();f=f.getLayerObject();return b({layerType:k,layerObject:f}).then(function(b){var f=b.layerType;b=b.layerObject;if("ArcGISDynamicMapServiceLayer"===f||"ArcGISTiledMapServiceLayer"===f||"GroupLayer"===f||"FeatureCollection"===f)return!0;if("FeatureLayer"===f){var f=x.getTypeByGeometryType(b.geometryType),f=0<=n.indexOf(a,f),k=t._shouldPassStatisticsCheck(d,b);return b.url?(b=x.isFeaturelayerUrlSupportQuery(b.url,b.capabilities),f&&b&&k):e&&f}return"Table"===f?(f=x.isFeaturelayerUrlSupportQuery(b.url, b.capabilities),b=t._shouldPassStatisticsCheck(d,b),c&&f&&b):!1})}};t.createImageServiceLayerFilter=function(a,e){return function(c){var d=c.getLayerType();c=c.getLayerObject();return b({layerType:d,layerObject:c}).then(function(b){var c=b.layerType,d=b.layerObject;return"ArcGISImageServiceLayer"===c||"ArcGISImageServiceVectorLayer"===c?a?x.isImageServiceSupportQuery(b.layerObject.capabilities)?e?t._shouldPassStatisticsCheck(e,d):!0:!1:!0:!1})}};t._shouldPassStatisticsCheck=function(a,b){return a? (a=!1,a=b.advancedQueryCapabilities?!!b.advancedQueryCapabilities.supportsStatistics:!!b.supportsStatistics):!0};t.createQueryableLayerFilter=function(a){var b=t.createFeaturelayerFilter(["point","polyline","polygon"],!1,!0,a);a=t.createImageServiceLayerFilter(!0,a);return t.orCombineFilters([b,a])};t.andCombineFilters=function(a){return t._combineFilters(a,!0)};t.orCombineFilters=function(a){return t._combineFilters(a,!1)};t._combineFilters=function(a,e){return function(c){var d=new k,f=n.map(a, function(a){return a(c)});b(f).then(function(a){var b=!1,b=e?n.every(a,function(a){return a}):n.some(a,function(a){return a});d.resolve(b)},function(a){console.error(a);d.reject(a)});return d}};return t})},"dijit/tree/ObjectStoreModel":function(){define("dojo/_base/array dojo/aspect dojo/_base/declare dojo/Deferred dojo/_base/lang dojo/when ../Destroyable".split(" "),function(g,m,q,h,c,d,f){return q("dijit.tree.ObjectStoreModel",f,{store:null,labelAttr:"name",labelType:"text",root:null,query:null, constructor:function(d){c.mixin(this,d);this.childrenCache={}},getRoot:function(f,g){if(this.root)f(this.root);else{var k=this.store.query(this.query);k.then&&this.own(k);d(k,c.hitch(this,function(b){if(1!=b.length)throw Error("dijit.tree.ObjectStoreModel: root query returned "+b.length+" items, but must return exactly one");this.root=b[0];f(this.root);k.observe&&k.observe(c.hitch(this,function(a){this.onChange(a)}),!0)}),g)}},mayHaveChildren:function(){return!0},getChildren:function(f,g,h){var b= this.store.getIdentity(f);if(this.childrenCache[b])d(this.childrenCache[b],g,h);else{var a=this.childrenCache[b]=this.store.getChildren(f);a.then&&this.own(a);a.observe&&this.own(a.observe(c.hitch(this,function(b,n,k){this.onChange(b);n!=k&&d(a,c.hitch(this,"onChildrenChange",f))}),!0));d(a,g,h)}},isItem:function(){return!0},getIdentity:function(c){return this.store.getIdentity(c)},getLabel:function(c){return c[this.labelAttr]},newItem:function(c,d,f,b){return this.store.put(c,{parent:d,before:b})}, pasteItem:function(d,f,l,b,a,e){var n=new h;if(f===l&&!b&&!e)return n.resolve(!0),n;f&&!b?this.getChildren(f,c.hitch(this,function(a){a=[].concat(a);var b=g.indexOf(a,d);a.splice(b,1);this.onChildrenChange(f,a);n.resolve(this.store.put(d,{overwrite:!0,parent:l,oldParent:f,before:e,isCopy:!1}))})):n.resolve(this.store.put(d,{overwrite:!0,parent:l,oldParent:f,before:e,isCopy:!0}));return n},onChange:function(){},onChildrenChange:function(){},onDelete:function(){}})})},"jimu/dijit/_Tree":function(){define("dojo/_base/declare dijit/_WidgetBase dijit/_TemplatedMixin dojo/text!./templates/_TreeNode.html dojo/_base/lang dojo/_base/html dojo/_base/array dojo/_base/event dojo/query dojo/aspect dojo/on dojo/keys dojo/Evented dijit/registry dijit/Tree jimu/utils".split(" "), function(g,m,q,h,c,d,f,k,p,l,b,a,e,n,x,r){var u=g([x._TreeNode,e],{templateString:h,declaredClass:"jimu._TreeNode",isLeaf:!1,groupId:"",postCreate:function(){this.inherited(arguments);d.addClass(this.domNode,"jimu-tree-node");this.isLeaf=!!this.isLeaf;this.groupId?(this.checkNode=d.toDom('\x3cinput type\x3d"radio" /\x3e'),this.checkNode.name=this.groupId):this.checkNode=d.toDom('\x3cinput type\x3d"checkbox" /\x3e');d.addClass(this.checkNode,"jimu-tree-check-node");d.place(this.checkNode,this.contentNode, "first");this.own(b(this.checkNode,"click",c.hitch(this,this._onClick)));this.own(b(this.rowNode,"keydown",c.hitch(this,function(b,e){e.target=b;e.keyCode!==a.ENTER&&e.keyCode!==a.SPACE||this._onClick(e)},this.checkNode)));this.isLeaf?this.groupId?d.setStyle(this.checkNode,"display","none"):d.setStyle(this.checkNode,"display","inline"):d.setStyle(this.checkNode,"display","none");this.isLeaf?d.addClass(this.domNode,"jimu-tree-leaf-node"):d.addClass(this.domNode,"jimu-tree-not-leaf-node")},select:function(){this.isLeaf&& (this.checkNode.checked=!0,d.addClass(this.domNode,"jimu-tree-selected-leaf-node"))},unselect:function(){this.isLeaf&&(this.checkNode.checked=!1,d.removeClass(this.domNode,"jimu-tree-selected-leaf-node"))},toggleSelect:function(){this.isLeaf&&(this.checkNode.checked?this.unselect():this.select())},_onClick:function(a){(a.target||a.srcElement)===this.checkNode?this.tree._onCheckNodeClick(this,this.checkNode.checked,a):this.tree._onClick(this,a)},_onChange:function(){this.isLeaf&&setTimeout(c.hitch(this, function(){this.checkNode.checked?this.emit("tn-select",this):this.emit("tn-unselect",this)}),100)},destroy:function(){delete this.tree;this.inherited(arguments)}});return g([x,e],{declaredClass:"jimu._Tree",openOnClick:!0,multiple:!0,uniqueId:"",showRoot:!1,postMixInProperties:function(){this.inherited(arguments);this.uniqueId="tree_"+r.getRandomString()},postCreate:function(){this.inherited(arguments);d.addClass(this.domNode,"jimu-tree");this.own(l.before(this,"onClick",c.hitch(this,this._jimuBeforeClick))); this.rootLoadingIndicator&&d.setStyle(this.rootLoadingIndicator,"display","none");this.dndController.singular=!0;d.setAttr(this.domNode,"tabindex",0)},removeItem:function(a){this.model.store.remove(a)},getAllItems:function(){var a=this.getAllTreeNodeWidgets();return f.map(a,c.hitch(this,function(a){var b=a.item;b.selected=a.checkNode.checked;return b}))},getSelectedItems:function(){var a=this.getAllTreeNodeWidgets(),a=f.filter(a,c.hitch(this,function(a){return a.checkNode.checked}));return f.map(a, c.hitch(this,function(a){return a.item}))},getFilteredItems:function(a){var b=this.getAllTreeNodeWidgets(),b=f.map(b,c.hitch(this,function(a){var b=a.item;b.selected=a.checkNode.checked;return b}));return f.filter(b,c.hitch(this,function(b){return a(b)}))},getTreeNodeByItemId:function(a){for(var b=this._getAllTreeNodeDoms(),e=0;e<b.length;e++){var c=n.byNode(b[e]);if(c.item.id.toString()===a.toString())return c}return null},selectItem:function(a){(a=this.getTreeNodeByItemId(a))&&a.isLeaf&&this.selectNodeWidget(a)}, unselectItem:function(a){(a=this.getTreeNodeByItemId(a))&&a.isLeaf&&a.unselect()},getAllLeafTreeNodeWidgets:function(){var a=this.getAllTreeNodeWidgets();return f.filter(a,c.hitch(this,function(a){return a.isLeaf}))},getAllTreeNodeWidgets:function(){var a=this._getAllTreeNodeDoms();return f.map(a,c.hitch(this,function(a){return n.byNode(a)}))},isLeafItem:function(a){return a&&a.isLeaf},_getAllTreeNodeDoms:function(){return p(".dijitTreeNode",this.domNode)},_createTreeNode:function(a){a.isLeaf=this.isLeafItem(a.item); this.multiple||(a.groupId=this.uniqueId);return new u(a)},_onTreeNodeSelect:function(a){this.emit("item-select",{item:a.item,treeNode:a})},_onTreeNodeUnselect:function(a){this.emit("item-unselect",{item:a.item,treeNode:a})},selectNodeWidget:function(a){this.multiple||this.unselectAllLeafNodeWidgets();a.select()},_jimuBeforeClick:function(a,b,e){b.isLeaf&&(d.hasClass(e.target||e.srcElement,"jimu-tree-check-node")||(this.multiple?b.toggleSelect():this.selectNodeWidget(b)));return arguments},_onCheckNodeClick:function(a, b,e){!this.multiple&&b&&this.unselectAllLeafNodeWidgets();k.stop(e);this.focusNode(a);setTimeout(c.hitch(this,function(){b?this.selectNodeWidget(a):a.unselect();this.onClick(a.item,a,e)}),0)},unselectAllLeafNodeWidgets:function(){var a=this.getAllLeafTreeNodeWidgets();f.forEach(a,c.hitch(this,function(a){a.unselect()}))}})})},"dijit/Tree":function(){define("dojo/_base/array dojo/aspect dojo/cookie dojo/_base/declare dojo/Deferred dojo/promise/all dojo/dom dojo/dom-class dojo/dom-geometry dojo/dom-style dojo/errors/create dojo/fx dojo/has dojo/_base/kernel dojo/keys dojo/_base/lang dojo/on dojo/topic dojo/touch dojo/when ./a11yclick ./focus ./registry ./_base/manager ./_Widget ./_TemplatedMixin ./_Container ./_Contained ./_CssStateMixin ./_KeyNavMixin dojo/text!./templates/TreeNode.html dojo/text!./templates/Tree.html ./tree/TreeStoreModel ./tree/ForestStoreModel ./tree/_dndSelector dojo/query!css2".split(" "), function(g,m,q,h,c,d,f,k,p,l,b,a,e,n,x,r,u,C,t,z,E,K,B,F,G,I,J,M,H,L,N,O,R,P,Q){function v(a){return r.delegate(a.promise||a,{addCallback:function(a){this.then(a)},addErrback:function(a){this.otherwise(a)}})}var D=h("dijit._TreeNode",[G,I,J,M,H],{item:null,isTreeNode:!0,label:"",_setLabelAttr:function(a){this.labelNode["html"==this.labelType?"innerHTML":"innerText"in this.labelNode?"innerText":"textContent"]=a;this._set("label",a);e("dojo-bidi")&&this.applyTextDir(this.labelNode)},labelType:"text", isExpandable:null,isExpanded:!1,state:"NotLoaded",templateString:N,baseClass:"dijitTreeNode",cssStateNodes:{rowNode:"dijitTreeRow"},_setTooltipAttr:{node:"rowNode",type:"attribute",attribute:"title"},buildRendering:function(){this.inherited(arguments);this._setExpando();this._updateItemClasses(this.item);this.isExpandable&&this.labelNode.setAttribute("aria-expanded",this.isExpanded);this.setSelected(!1)},_setIndentAttr:function(a){var b=Math.max(a,0)*this.tree._nodePixelIndent+"px";l.set(this.domNode, "backgroundPosition",b+" 0px");l.set(this.rowNode,this.isLeftToRight()?"paddingLeft":"paddingRight",b);g.forEach(this.getChildren(),function(b){b.set("indent",a+1)});this._set("indent",a)},markProcessing:function(){this.state="Loading";this._setExpando(!0)},unmarkProcessing:function(){this._setExpando(!1)},_updateItemClasses:function(a){var b=this.tree,w=b.model;b._v10Compat&&a===w.root&&(a=null);this._applyClassAndStyle(a,"icon","Icon");this._applyClassAndStyle(a,"label","Label");this._applyClassAndStyle(a, "row","Row");this.tree._startPaint(!0)},_applyClassAndStyle:function(a,b,e){var w="_"+b+"Class";b+="Node";var c=this[w];this[w]=this.tree["get"+e+"Class"](a,this.isExpanded);k.replace(this[b],this[w]||"",c||"");l.set(this[b],this.tree["get"+e+"Style"](a,this.isExpanded)||{})},_updateLayout:function(){var a=this.getParent(),a=!a||!a.rowNode||"none"==a.rowNode.style.display;k.toggle(this.domNode,"dijitTreeIsRoot",a);k.toggle(this.domNode,"dijitTreeIsLast",!a&&!this.getNextSibling())},_setExpando:function(a){var b= ["dijitTreeExpandoLoading","dijitTreeExpandoOpened","dijitTreeExpandoClosed","dijitTreeExpandoLeaf"];a=a?0:this.isExpandable?this.isExpanded?1:2:3;k.replace(this.expandoNode,b[a],b);this.expandoNodeText.innerHTML=["*","-","+","*"][a]},expand:function(){if(this._expandDeferred)return v(this._expandDeferred);this._collapseDeferred&&(this._collapseDeferred.cancel(),delete this._collapseDeferred);this.isExpanded=!0;this.labelNode.setAttribute("aria-expanded","true");(this.tree.showRoot||this!==this.tree.rootNode)&& this.containerNode.setAttribute("role","group");k.add(this.contentNode,"dijitTreeContentExpanded");this._setExpando();this._updateItemClasses(this.item);this==this.tree.rootNode&&this.tree.showRoot&&this.tree.domNode.setAttribute("aria-expanded","true");var b=a.wipeIn({node:this.containerNode,duration:F.defaultDuration}),e=this._expandDeferred=new c(function(){b.stop()});m.after(b,"onEnd",function(){e.resolve(!0)},!0);b.play();return v(e)},collapse:function(){if(this._collapseDeferred)return v(this._collapseDeferred); this._expandDeferred&&(this._expandDeferred.cancel(),delete this._expandDeferred);this.isExpanded=!1;this.labelNode.setAttribute("aria-expanded","false");this==this.tree.rootNode&&this.tree.showRoot&&this.tree.domNode.setAttribute("aria-expanded","false");k.remove(this.contentNode,"dijitTreeContentExpanded");this._setExpando();this._updateItemClasses(this.item);var b=a.wipeOut({node:this.containerNode,duration:F.defaultDuration}),e=this._collapseDeferred=new c(function(){b.stop()});m.after(b,"onEnd", function(){e.resolve(!0)},!0);b.play();return v(e)},indent:0,setChildItems:function(a){var b=this.tree,e=b.model,w=[],c=b.focusedChild,n=this.getChildren();g.forEach(n,function(a){J.prototype.removeChild.call(this,a)},this);this.defer(function(){g.forEach(n,function(a){if(!a._destroyed&&!a.getParent()){var w=function(a){var c=e.getIdentity(a.item),d=b._itemNodesMap[c];1==d.length?delete b._itemNodesMap[c]:(c=g.indexOf(d,a),-1!=c&&d.splice(c,1));g.forEach(a.getChildren(),w)};b.dndController.removeTreeNode(a); w(a);if(b.persist){var d=g.map(a.getTreePath(),function(a){return b.model.getIdentity(a)}).join("/"),y;for(y in b._openedNodes)y.substr(0,d.length)==d&&delete b._openedNodes[y];b._saveExpandedNodes()}b.lastFocusedChild&&!f.isDescendant(b.lastFocusedChild.domNode,b.domNode)&&delete b.lastFocusedChild;c&&!f.isDescendant(c.domNode,b.domNode)&&b.focus();a.destroyRecursive()}})});this.state="Loaded";a&&0<a.length?(this.isExpandable=!0,g.forEach(a,function(a){var c=e.getIdentity(a),d=b._itemNodesMap[c], f;if(d)for(var y=0;y<d.length;y++)if(d[y]&&!d[y].getParent()){f=d[y];f.set("indent",this.indent+1);break}f||(f=this.tree._createTreeNode({item:a,tree:b,isExpandable:e.mayHaveChildren(a),label:b.getLabel(a),labelType:b.model&&b.model.labelType||"text",tooltip:b.getTooltip(a),ownerDocument:b.ownerDocument,dir:b.dir,lang:b.lang,textDir:b.textDir,indent:this.indent+1}),d?d.push(f):b._itemNodesMap[c]=[f]);this.addChild(f);(this.tree.autoExpand||this.tree._state(f))&&w.push(b._expandNode(f))},this),g.forEach(this.getChildren(), function(a){a._updateLayout()})):this.isExpandable=!1;this._setExpando&&this._setExpando(!1);this._updateItemClasses(this.item);a=d(w);this.tree._startPaint(a);return v(a)},getTreePath:function(){for(var a=this,b=[];a&&a!==this.tree.rootNode;)b.unshift(a.item),a=a.getParent();b.unshift(this.tree.rootNode.item);return b},getIdentity:function(){return this.tree.model.getIdentity(this.item)},removeChild:function(a){this.inherited(arguments);var b=this.getChildren();0==b.length&&(this.isExpandable=!1, this.collapse());g.forEach(b,function(a){a._updateLayout()})},makeExpandable:function(){this.isExpandable=!0;this._setExpando(!1)},setSelected:function(a){this.labelNode.setAttribute("aria-selected",a?"true":"false");k.toggle(this.rowNode,"dijitTreeRowSelected",a)},focus:function(){K.focus(this.focusNode)}});e("dojo-bidi")&&D.extend({_setTextDirAttr:function(a){!a||this.textDir==a&&this._created||(this._set("textDir",a),this.applyTextDir(this.labelNode),g.forEach(this.getChildren(),function(b){b.set("textDir", a)},this))}});var A=h("dijit.Tree",[G,L,I,H],{baseClass:"dijitTree",store:null,model:null,query:null,label:"",showRoot:!0,childrenAttr:["children"],paths:[],path:[],selectedItems:null,selectedItem:null,openOnClick:!1,openOnDblClick:!1,templateString:O,persist:!1,autoExpand:!1,dndController:Q,dndParams:"onDndDrop itemCreator onDndCancel checkAcceptance checkItemAcceptance dragThreshold betweenThreshold".split(" "),onDndDrop:null,itemCreator:null,onDndCancel:null,checkAcceptance:null,checkItemAcceptance:null, dragThreshold:5,betweenThreshold:0,_nodePixelIndent:19,_publish:function(a,b){C.publish(this.id,r.mixin({tree:this,event:a},b||{}))},postMixInProperties:function(){this.tree=this;this.autoExpand&&(this.persist=!1);this._itemNodesMap={};!this.cookieName&&this.id&&(this.cookieName=this.id+"SaveStateCookie");this.expandChildrenDeferred=new c;this.pendingCommandsPromise=this.expandChildrenDeferred.promise;this.inherited(arguments)},postCreate:function(){this._initState();var a=this;this.own(u(this.containerNode, u.selector(".dijitTreeNode",t.enter),function(b){a._onNodeMouseEnter(B.byNode(this),b)}),u(this.containerNode,u.selector(".dijitTreeNode",t.leave),function(b){a._onNodeMouseLeave(B.byNode(this),b)}),u(this.containerNode,u.selector(".dijitTreeRow",E.press),function(b){a._onNodePress(B.getEnclosingWidget(this),b)}),u(this.containerNode,u.selector(".dijitTreeRow",E),function(b){a._onClick(B.getEnclosingWidget(this),b)}),u(this.containerNode,u.selector(".dijitTreeRow","dblclick"),function(b){a._onDblClick(B.getEnclosingWidget(this), b)}));this.model||this._store2model();this.own(m.after(this.model,"onChange",r.hitch(this,"_onItemChange"),!0),m.after(this.model,"onChildrenChange",r.hitch(this,"_onItemChildrenChange"),!0),m.after(this.model,"onDelete",r.hitch(this,"_onItemDelete"),!0));this.inherited(arguments);if(this.dndController){r.isString(this.dndController)&&(this.dndController=r.getObject(this.dndController));for(var b={},e=0;e<this.dndParams.length;e++)this[this.dndParams[e]]&&(b[this.dndParams[e]]=this[this.dndParams[e]]); this.dndController=new this.dndController(this,b)}this._load();this.onLoadDeferred=v(this.pendingCommandsPromise);this.onLoadDeferred.then(r.hitch(this,"onLoad"))},_store2model:function(){this._v10Compat=!0;n.deprecated("Tree: from version 2.0, should specify a model object rather than a store/query");var a={id:this.id+"_ForestStoreModel",store:this.store,query:this.query,childrenAttrs:this.childrenAttr};this.params.mayHaveChildren&&(a.mayHaveChildren=r.hitch(this,"mayHaveChildren"));this.params.getItemChildren&& (a.getChildren=r.hitch(this,function(a,b,e){this.getItemChildren(this._v10Compat&&a===this.model.root?null:a,b,e)}));this.model=new P(a);this.showRoot=!!this.label},onLoad:function(){},_load:function(){this.model.getRoot(r.hitch(this,function(a){var b=this.rootNode=this.tree._createTreeNode({item:a,tree:this,isExpandable:!0,label:this.label||this.getLabel(a),labelType:this.model.labelType||"text",textDir:this.textDir,indent:this.showRoot?0:-1});this.showRoot?(this.domNode.setAttribute("aria-multiselectable", !this.dndController.singular),this.rootLoadingIndicator.style.display="none"):(b.rowNode.style.display="none",this.domNode.setAttribute("role","presentation"),this.domNode.removeAttribute("aria-expanded"),this.domNode.removeAttribute("aria-multiselectable"),this["aria-label"]?(b.containerNode.setAttribute("aria-label",this["aria-label"]),this.domNode.removeAttribute("aria-label")):this["aria-labelledby"]&&(b.containerNode.setAttribute("aria-labelledby",this["aria-labelledby"]),this.domNode.removeAttribute("aria-labelledby")), b.labelNode.setAttribute("role","presentation"),b.labelNode.removeAttribute("aria-selected"),b.containerNode.setAttribute("role","tree"),b.containerNode.setAttribute("aria-expanded","true"),b.containerNode.setAttribute("aria-multiselectable",!this.dndController.singular));this.containerNode.appendChild(b.domNode);a=this.model.getIdentity(a);this._itemNodesMap[a]?this._itemNodesMap[a].push(b):this._itemNodesMap[a]=[b];b._updateLayout();this._expandNode(b).then(r.hitch(this,function(){this._destroyed|| (this.rootLoadingIndicator.style.display="none",this.expandChildrenDeferred.resolve(!0))}))}),r.hitch(this,function(a){console.error(this,": error loading root: ",a)}))},getNodesByItem:function(a){if(!a)return[];a=r.isString(a)?a:this.model.getIdentity(a);return[].concat(this._itemNodesMap[a])},_setSelectedItemAttr:function(a){this.set("selectedItems",[a])},_setSelectedItemsAttr:function(a){var b=this;return this.pendingCommandsPromise=this.pendingCommandsPromise.always(r.hitch(this,function(){var e= g.map(a,function(a){return!a||r.isString(a)?a:b.model.getIdentity(a)}),c=[];g.forEach(e,function(a){c=c.concat(b._itemNodesMap[a]||[])});this.set("selectedNodes",c)}))},_setPathAttr:function(a){return a.length?v(this.set("paths",[a]).then(function(a){return a[0]})):v(this.set("paths",[]).then(function(a){return a[0]}))},_setPathsAttr:function(a){function b(a,c){var d=a.shift(),f=g.filter(c,function(a){return a.getIdentity()==d})[0];if(f)return a.length?e._expandNode(f).then(function(){return b(a, f.getChildren())}):f;throw new A.PathError("Could not expand path at "+d);}var e=this;return v(this.pendingCommandsPromise=this.pendingCommandsPromise.always(function(){return d(g.map(a,function(a){a=g.map(a,function(a){return a&&r.isObject(a)?e.model.getIdentity(a):a});if(a.length)return b(a,[e.rootNode]);throw new A.PathError("Empty path");}))}).then(function(a){e.set("selectedNodes",a);return e.paths}))},_setSelectedNodeAttr:function(a){this.set("selectedNodes",[a])},_setSelectedNodesAttr:function(a){this.dndController.setSelection(a)}, expandAll:function(){function a(e){return b._expandNode(e).then(function(){var b=g.filter(e.getChildren()||[],function(a){return a.isExpandable});return d(g.map(b,a))})}var b=this;return v(a(this.rootNode))},collapseAll:function(){function a(e){var c=g.filter(e.getChildren()||[],function(a){return a.isExpandable}),c=d(g.map(c,a));return!e.isExpanded||e==b.rootNode&&!b.showRoot?c:c.then(function(){return b._collapseNode(e)})}var b=this;return v(a(this.rootNode))},mayHaveChildren:function(){},getItemChildren:function(){}, getLabel:function(a){return this.model.getLabel(a)},getIconClass:function(a,b){return!a||this.model.mayHaveChildren(a)?b?"dijitFolderOpened":"dijitFolderClosed":"dijitLeaf"},getLabelClass:function(){},getRowClass:function(){},getIconStyle:function(){},getLabelStyle:function(){},getRowStyle:function(){},getTooltip:function(){return""},_onDownArrow:function(a,b){(a=this._getNext(b))&&a.isTreeNode&&this.focusNode(a)},_onUpArrow:function(a,b){if(a=b.getPreviousSibling())for(b=a;b.isExpandable&&b.isExpanded&& b.hasChildren();)b=b.getChildren(),b=b[b.length-1];else if(a=b.getParent(),this.showRoot||a!==this.rootNode)b=a;b&&b.isTreeNode&&this.focusNode(b)},_onRightArrow:function(a,b){b.isExpandable&&!b.isExpanded?this._expandNode(b):b.hasChildren()&&(b=b.getChildren()[0])&&b.isTreeNode&&this.focusNode(b)},_onLeftArrow:function(a,b){b.isExpandable&&b.isExpanded?this._collapseNode(b):(a=b.getParent())&&a.isTreeNode&&(this.showRoot||a!==this.rootNode)&&this.focusNode(a)},focusLastChild:function(){var a=this._getLast(); a&&a.isTreeNode&&this.focusNode(a)},_getFirst:function(){return this.showRoot?this.rootNode:this.rootNode.getChildren()[0]},_getLast:function(){for(var a=this.rootNode;a.isExpanded;){var b=a.getChildren();if(!b.length)break;a=b[b.length-1]}return a},_getNext:function(a){if(a.isExpandable&&a.isExpanded&&a.hasChildren())return a.getChildren()[0];for(;a&&a.isTreeNode;){var b=a.getNextSibling();if(b)return b;a=a.getParent()}return null},childSelector:".dijitTreeRow",isExpandoNode:function(a,b){return f.isDescendant(a, b.expandoNode)||f.isDescendant(a,b.expandoNodeText)},_onNodePress:function(a,b){this.focusNode(a)},__click:function(a,b,e,c){var d=this.isExpandoNode(b.target,a);a.isExpandable&&(e||d)?this._onExpandoClick({node:a}):(this._publish("execute",{item:a.item,node:a,evt:b}),this[c](a.item,a,b),this.focusNode(a));b.stopPropagation();b.preventDefault()},_onClick:function(a,b){this.__click(a,b,this.openOnClick,"onClick")},_onDblClick:function(a,b){this.__click(a,b,this.openOnDblClick,"onDblClick")},_onExpandoClick:function(a){a= a.node;this.focusNode(a);a.isExpanded?this._collapseNode(a):this._expandNode(a)},onClick:function(){},onDblClick:function(){},onOpen:function(){},onClose:function(){},_getNextNode:function(a){n.deprecated(this.declaredClass+"::_getNextNode(node) is deprecated. Use _getNext(node) instead.","","2.0");return this._getNext(a)},_getRootOrFirstNode:function(){n.deprecated(this.declaredClass+"::_getRootOrFirstNode() is deprecated. Use _getFirst() instead.","","2.0");return this._getFirst()},_collapseNode:function(a){a._expandNodeDeferred&& delete a._expandNodeDeferred;if("Loading"!=a.state&&a.isExpanded){var b=a.collapse();this.onClose(a.item,a);this._state(a,!1);this._startPaint(b);return b}},_expandNode:function(a){if(a._expandNodeDeferred)return a._expandNodeDeferred;var b=this.model,e=a.item,d=this;a._loadDeferred||(a.markProcessing(),a._loadDeferred=new c,b.getChildren(e,function(b){a.unmarkProcessing();a.setChildItems(b).then(function(){a._loadDeferred.resolve(b)})},function(b){console.error(d,": error loading "+a.label+" children: ", b);a._loadDeferred.reject(b)}));b=a._loadDeferred.then(r.hitch(this,function(){var b=a.expand();this.onOpen(a.item,a);this._state(a,!0);return b}));this._startPaint(b);return b},focusNode:function(a){for(var b=[],e=this.domNode;e&&e.tagName&&"IFRAME"!==e.tagName.toUpperCase();e=e.parentNode)b.push({domNode:e.contentWindow||e,scrollLeft:e.scrollLeft||0,scrollTop:e.scrollTop||0});this.focusChild(a);this.defer(function(){for(var a=0,e=b.length;a<e;a++)b[a].domNode.scrollLeft=b[a].scrollLeft,b[a].domNode.scrollTop= b[a].scrollTop},0)},_onNodeMouseEnter:function(){},_onNodeMouseLeave:function(){},_onItemChange:function(a){var b=this.model.getIdentity(a);if(b=this._itemNodesMap[b]){var e=this.getLabel(a),c=this.getTooltip(a);g.forEach(b,function(b){b.set({item:a,label:e,tooltip:c});b._updateItemClasses(a)})}},_onItemChildrenChange:function(a,b){a=this.model.getIdentity(a);(a=this._itemNodesMap[a])&&g.forEach(a,function(a){a.setChildItems(b)})},_onItemDelete:function(a){a=this.model.getIdentity(a);var b=this._itemNodesMap[a]; b&&(g.forEach(b,function(a){this.dndController.removeTreeNode(a);var b=a.getParent();b&&b.removeChild(a);this.lastFocusedChild&&!f.isDescendant(this.lastFocusedChild.domNode,this.domNode)&&delete this.lastFocusedChild;this.focusedChild&&!f.isDescendant(this.focusedChild.domNode,this.domNode)&&this.focus();a.destroyRecursive()},this),delete this._itemNodesMap[a])},_initState:function(){this._openedNodes={};if(this.persist&&this.cookieName){var a=q(this.cookieName);a&&g.forEach(a.split(","),function(a){this._openedNodes[a]= !0},this)}},_state:function(a,b){if(!this.persist)return!1;var e=g.map(a.getTreePath(),function(a){return this.model.getIdentity(a)},this).join("/");if(1===arguments.length)return this._openedNodes[e];b?this._openedNodes[e]=!0:delete this._openedNodes[e];this._saveExpandedNodes()},_saveExpandedNodes:function(){if(this.persist&&this.cookieName){var a=[],b;for(b in this._openedNodes)a.push(b);q(this.cookieName,a.join(","),{expires:365})}},destroy:function(){this._curSearch&&(this._curSearch.timer.remove(), delete this._curSearch);this.rootNode&&this.rootNode.destroyRecursive();this.dndController&&!r.isString(this.dndController)&&this.dndController.destroy();this.rootNode=null;this.inherited(arguments)},destroyRecursive:function(){this.destroy()},resize:function(a){a&&p.setMarginBox(this.domNode,a);this._nodePixelIndent=p.position(this.tree.indentDetector).w||this._nodePixelIndent;this.expandChildrenDeferred.then(r.hitch(this,function(){this.rootNode.set("indent",this.showRoot?0:-1);this._adjustWidths()}))}, _outstandingPaintOperations:0,_startPaint:function(a){this._outstandingPaintOperations++;this._adjustWidthsTimer&&(this._adjustWidthsTimer.remove(),delete this._adjustWidthsTimer);var b=r.hitch(this,function(){this._outstandingPaintOperations--;0>=this._outstandingPaintOperations&&!this._adjustWidthsTimer&&this._started&&(this._adjustWidthsTimer=this.defer("_adjustWidths"))});z(a,b,b)},_adjustWidths:function(){this._adjustWidthsTimer&&(this._adjustWidthsTimer.remove(),delete this._adjustWidthsTimer); this.containerNode.style.width="auto";this.containerNode.style.width=this.domNode.scrollWidth>this.domNode.offsetWidth?"auto":"100%"},_createTreeNode:function(a){return new D(a)},focus:function(){this.lastFocusedChild?this.focusNode(this.lastFocusedChild):this.focusFirstChild()}});e("dojo-bidi")&&A.extend({_setTextDirAttr:function(a){a&&this.textDir!=a&&(this._set("textDir",a),this.rootNode.set("textDir",a))}});A.PathError=b("TreePathError");A._TreeNode=D;return A})},"dijit/tree/TreeStoreModel":function(){define(["dojo/_base/array", "dojo/aspect","dojo/_base/declare","dojo/_base/lang"],function(g,m,q,h){return q("dijit.tree.TreeStoreModel",null,{store:null,childrenAttrs:["children"],newItemIdAttr:"id",labelAttr:"",root:null,query:null,deferItemLoadingUntilExpand:!1,constructor:function(c){h.mixin(this,c);this.connects=[];c=this.store;if(!c.getFeatures()["dojo.data.api.Identity"])throw Error("dijit.tree.TreeStoreModel: store must support dojo.data.Identity");c.getFeatures()["dojo.data.api.Notification"]&&(this.connects=this.connects.concat([m.after(c, "onNew",h.hitch(this,"onNewItem"),!0),m.after(c,"onDelete",h.hitch(this,"onDeleteItem"),!0),m.after(c,"onSet",h.hitch(this,"onSetItem"),!0)]))},destroy:function(){for(var c;c=this.connects.pop();)c.remove()},getRoot:function(c,d){this.root?c(this.root):this.store.fetch({query:this.query,onComplete:h.hitch(this,function(d){if(1!=d.length)throw Error("dijit.tree.TreeStoreModel: root query returned "+d.length+" items, but must return exactly one");this.root=d[0];c(this.root)}),onError:d})},mayHaveChildren:function(c){return g.some(this.childrenAttrs, function(d){return this.store.hasAttribute(c,d)},this)},getChildren:function(c,d,f){var k=this.store;if(k.isItemLoaded(c)){for(var p=[],l=0;l<this.childrenAttrs.length;l++)var b=k.getValues(c,this.childrenAttrs[l]),p=p.concat(b);var a=0;this.deferItemLoadingUntilExpand||g.forEach(p,function(b){k.isItemLoaded(b)||a++});0==a?d(p):g.forEach(p,function(b,e){k.isItemLoaded(b)||k.loadItem({item:b,onItem:function(b){p[e]=b;0==--a&&d(p)},onError:f})})}else{var e=h.hitch(this,arguments.callee);k.loadItem({item:c, onItem:function(a){e(a,d,f)},onError:f})}},isItem:function(c){return this.store.isItem(c)},fetchItemByIdentity:function(c){this.store.fetchItemByIdentity(c)},getIdentity:function(c){return this.store.getIdentity(c)},getLabel:function(c){return this.labelAttr?this.store.getValue(c,this.labelAttr):this.store.getLabel(c)},newItem:function(c,d,f){var k={parent:d,attribute:this.childrenAttrs[0]},g;this.newItemIdAttr&&c[this.newItemIdAttr]?this.fetchItemByIdentity({identity:c[this.newItemIdAttr],scope:this,
"dojo/_base/declare","dojo/_base/kernel","dojo/_base/lang","./TreeStoreModel"],function(g,m,q,h,c){return m("dijit.tree.ForestStoreModel",c,{rootId:"$root$",rootLabel:"ROOT",query:null,constructor:function(c){this.root={store:this,root:!0,id:c.rootId,label:c.rootLabel,children:c.rootChildren}},mayHaveChildren:function(c){return c===this.root||this.inherited(arguments)},getChildren:function(c,f,g){c===this.root?this.root.children?f(this.root.children):this.store.fetch({query:this.query,onComplete:h.hitch(this, function(c){this.root.children=c;f(c)}),onError:g}):this.inherited(arguments)},isItem:function(c){return c===this.root?!0:this.inherited(arguments)},fetchItemByIdentity:function(c){if(c.identity==this.root.id){var d=c.scope||q.global;c.onItem&&c.onItem.call(d,this.root)}else this.inherited(arguments)},getIdentity:function(c){return c===this.root?this.root.id:this.inherited(arguments)},getLabel:function(c){return c===this.root?this.root.label:this.inherited(arguments)},newItem:function(c,f,g){return f=== this.root?(this.onNewRootItem(c),this.store.newItem(c)):this.inherited(arguments)},onNewRootItem:function(){},pasteItem:function(c,f,g,h,l){if(f===this.root&&!h)this.onLeaveRoot(c);this.inherited(arguments,[c,f===this.root?null:f,g===this.root?null:g,h,l]);if(g===this.root)this.onAddToRoot(c)},onAddToRoot:function(c){console.log(this,": item ",c," added to root")},onLeaveRoot:function(c){console.log(this,": item ",c," removed from root")},_requeryTop:function(){var c=this.root.children||[];this.store.fetch({query:this.query, onComplete:h.hitch(this,function(f){this.root.children=f;if(c.length!=f.length||g.some(c,function(c,d){return f[d]!=c}))this.onChildrenChange(this.root,f)})})},onNewItem:function(c,f){this._requeryTop();this.inherited(arguments)},onDeleteItem:function(c){-1!=g.indexOf(this.root.children,c)&&this._requeryTop();this.inherited(arguments)},onSetItem:function(c,f,g,h){this._requeryTop();this.inherited(arguments)}})})},"dijit/tree/_dndSelector":function(){define("dojo/_base/array dojo/_base/declare dojo/_base/kernel dojo/_base/lang dojo/dnd/common dojo/dom dojo/mouse dojo/on dojo/touch ../a11yclick ./_dndContainer".split(" "), function(g,m,q,h,c,d,f,k,p,l,b){return m("dijit.tree._dndSelector",b,{constructor:function(){this.selection={};this.anchor=null;this.events.push(k(this.tree.domNode,p.press,h.hitch(this,"onMouseDown")),k(this.tree.domNode,p.release,h.hitch(this,"onMouseUp")),k(this.tree.domNode,p.move,h.hitch(this,"onMouseMove")),k(this.tree.domNode,l.press,h.hitch(this,"onClickPress")),k(this.tree.domNode,l.release,h.hitch(this,"onClickRelease")))},singular:!1,getSelectedTreeNodes:function(){var a=[],b=this.selection, c;for(c in b)a.push(b[c]);return a},selectNone:function(){this.setSelection([]);return this},destroy:function(){this.inherited(arguments);this.selection=this.anchor=null},addTreeNode:function(a,b){this.setSelection(this.getSelectedTreeNodes().concat([a]));b&&(this.anchor=a);return a},removeTreeNode:function(a){var b=g.filter(this.getSelectedTreeNodes(),function(b){return!d.isDescendant(b.domNode,a.domNode)});this.setSelection(b);return a},isTreeNodeSelected:function(a){return a.id&&!!this.selection[a.id]}, setSelection:function(a){var b=this.getSelectedTreeNodes();g.forEach(this._setDifference(b,a),h.hitch(this,function(a){a.setSelected(!1);this.anchor==a&&delete this.anchor;delete this.selection[a.id]}));g.forEach(this._setDifference(a,b),h.hitch(this,function(a){a.setSelected(!0);this.selection[a.id]=a}));this._updateSelectionProperties()},_setDifference:function(a,b){g.forEach(b,function(a){a.__exclude__=!0});a=g.filter(a,function(a){return!a.__exclude__});g.forEach(b,function(a){delete a.__exclude__}); return a},_updateSelectionProperties:function(){var a=this.getSelectedTreeNodes(),b=[],c=[];g.forEach(a,function(a){var e=a.getTreePath();c.push(a);b.push(e)},this);a=g.map(c,function(a){return a.item});this.tree._set("paths",b);this.tree._set("path",b[0]||[]);this.tree._set("selectedNodes",c);this.tree._set("selectedNode",c[0]||null);this.tree._set("selectedItems",a);this.tree._set("selectedItem",a[0]||null)},onClickPress:function(a){if(!(this.current&&this.current.isExpandable&&this.tree.isExpandoNode(a.target, this.current))){"mousedown"==a.type&&f.isLeft(a)&&a.preventDefault();var b="keydown"==a.type?this.tree.focusedChild:this.current;if(b){var d=c.getCopyKeyState(a),g=b.id;this.singular||a.shiftKey||!this.selection[g]?(this._doDeselect=!1,this.userSelect(b,d,a.shiftKey)):this._doDeselect=!0}}},onClickRelease:function(a){this._doDeselect&&(this._doDeselect=!1,this.userSelect("keyup"==a.type?this.tree.focusedChild:this.current,c.getCopyKeyState(a),a.shiftKey))},onMouseMove:function(){this._doDeselect= !1},onMouseDown:function(){},onMouseUp:function(){},_compareNodes:function(a,b){if(a===b)return 0;if("sourceIndex"in document.documentElement)return a.sourceIndex-b.sourceIndex;if("compareDocumentPosition"in document.documentElement)return a.compareDocumentPosition(b)&2?1:-1;if(document.createRange){var c=doc.createRange();c.setStartBefore(a);a=doc.createRange();a.setStartBefore(b);return c.compareBoundaryPoints(c.END_TO_END,a)}throw Error("dijit.tree._compareNodes don't know how to compare two different nodes in this browser"); },userSelect:function(a,b,c){if(this.singular)this.anchor==a&&b?this.selectNone():(this.setSelection([a]),this.anchor=a);else if(c&&this.anchor){b=this._compareNodes(this.anchor.rowNode,a.rowNode);c=this.anchor;0>b?b=c:(b=a,a=c);for(c=[];b!=a;)c.push(b),b=this.tree._getNext(b);c.push(a);this.setSelection(c)}else this.selection[a.id]&&b?this.removeTreeNode(a):b?this.addTreeNode(a,!0):(this.setSelection([a]),this.anchor=a)},getItem:function(a){return{data:this.selection[a],type:["treeNode"]}},forInSelectedItems:function(a, b){b=b||q.global;for(var c in this.selection)a.call(b,this.getItem(c),c,this)}})})},"dijit/tree/_dndContainer":function(){define("dojo/aspect dojo/_base/declare dojo/dom-class dojo/_base/lang dojo/on dojo/touch".split(" "),function(g,m,q,h,c,d){return m("dijit.tree._dndContainer",null,{constructor:function(f,k){this.tree=f;this.node=f.domNode;h.mixin(this,k);this.containerState="";q.add(this.node,"dojoDndContainer");this.events=[c(this.node,d.enter,h.hitch(this,"onOverEvent")),c(this.node,d.leave, h.hitch(this,"onOutEvent")),g.after(this.tree,"_onNodeMouseEnter",h.hitch(this,"onMouseOver"),!0),g.after(this.tree,"_onNodeMouseLeave",h.hitch(this,"onMouseOut"),!0),c(this.node,"dragstart, selectstart",function(c){c.preventDefault()})]},destroy:function(){for(var c;c=this.events.pop();)c.remove();this.node=this.parent=null},onMouseOver:function(c){this.current=c},onMouseOut:function(){this.current=null},_changeState:function(c,d){var f="dojoDnd"+c;c=c.toLowerCase()+"State";q.replace(this.node,f+ d,f+this[c]);this[c]=d},_addItemClass:function(c,d){q.add(c,"dojoDndItem"+d)},_removeItemClass:function(c,d){q.remove(c,"dojoDndItem"+d)},onOverEvent:function(){this._changeState("Container","Over")},onOutEvent:function(){this._changeState("Container","")}})})},"jimu/dijit/LayerChooserFromMapLite":function(){define("dojo/on dojo/_base/declare dojo/promise/all dojo/_base/lang dojo/_base/html dojo/_base/array jimu/dijit/CheckBox jimu/dijit/LayerChooserFromMap jimu/LayerStructure".split(" "),function(g, m,q,h,c,d,f,k,p){var l=m([k],{templateString:'\x3cdiv style\x3d"width:100%;"\x3e\x3cdiv data-dojo-attach-point\x3d"errorTipSection" class\x3d"error-tip-section"\x3e\x3cspan class\x3d"jimu-icon jimu-icon-error"\x3e\x3c/span\x3e\x3cspan class\x3d"jimu-state-error-text" data-dojo-attach-point\x3d"errTip"\x3e${nls.noLayersTip}\x3c/span\x3e\x3c/div\x3e\x3cdiv data-dojo-attach-point\x3d"treeSection" class\x3d"tree-section"\x3e\x3cul data-dojo-attach-point\x3d"treeUl" class\x3d"tree-ul tree-root-ul"\x3e\x3c/ul\x3e\x3c/div\x3e\x3c/div\x3e', map:null,layerStateController:null,layerState:null,customFilter:null,onlySelectLeafLayer:!1,displayLayerTypeIcon:!0,showTables:!0,viewMode:!1,onlyShowWebMapLayers:!1,layerStructure:null,_layerDatas:null,_eventHandles:null,postMixInProperties:function(){this.nls=window.jimuNls.basicLayerChooserFromMap},postCreate:function(){c.addClass(this.domNode,"jimu-basic-layer-chooser-from-map");c.addClass(this.domNode,"jimu-basic-layer-chooser-from-map-lite");this._layerDatas={};this._eventHandles=[];this.layerStructure= this.map?p.createInstance(this.map):p.getInstance();this.layerInfosObj=this.layerStructure._layerInfos;this.layerState=this._clearLayerState(this.layerState)||{};this.layerStateController||(this.layerStateController=new l.LayerStateController);var b;b=this.customFilter?h.hitch(this,this.customFilter):h.hitch(this,this.filter);this.filter=k.andCombineFilters([this.basicFilter,b]);this._createTree()},_createTree:function(){var b,a;this.onlyShowWebMapLayers?(b=this.layerStructure.getWebmapLayerNodes(), a=this.layerStructure.getWebmapTableNodes()):(b=this.layerStructure.getLayerNodes(),a=this.layerStructure.getTableNodes());0<this._createLayerNodes(b.concat(this.showTables?a:[]),this.treeUl)&&(c.setStyle(this.errorTipSection,"display","none"),this.layerStateController.restoreState(this.layerState,this.layerStructure))},_createLayerNodes:function(b,a){var c=d.map(b,function(a){return this.filter(a._layerInfo)},this),f=0;q(c).then(h.hitch(this,function(c){d.forEach(c,function(c,e){c&&(this._createLayerNode(b[e], a),f++)},this)}));return f},_createLayerNode:function(b,a){var e=c.create("li",{"class":"tree-node-li",id:"layerchooserlite-tree-node-li-"+b.id},a),d=c.create("div",{"class":"tree-node-div"},e);a=c.create("span",{"class":"tree-node-column-span collapse-span"},d);var k=c.create("span",{"class":"tree-node-column-span check-box-span"},d),k=c.create("div",{"class":"tree-node-column-div check-box-div"},k),l=c.create("span",{"class":"tree-node-column-span icon-span "+(this.displayLayerTypeIcon?"display": "")},d),m;m=(m=this.layerState[b.id])?m.selected:this.layerStateController.getState(b);m=new f({checked:m},k);var p=c.create("span",{"class":"tree-node-column-span title-span",innerHTML:b.title},d),q=c.create("ul",{"class":"tree-ul tree-subnode-ul",style:"display:none; "},e),e={layerNode:b,layerNodeLi:e,layerNodeDiv:d,collapseSpan:a,iconSpan:l,checkBox:m,subLayerNodeUl:q,hasBeenOpened:!1};this._layerDatas[b.id]=e;b.isLeaf()||(c.addClass(a,"is-leaf"),c.addClass(p,"is-leaf"),b=g(a,"click",h.hitch(this, this._onCollapse,e)),this._eventHandles.push(b),b=g(p,"click",h.hitch(this,this._onCollapse,e)),this._eventHandles.push(b),this.onlySelectLeafLayer&&(m.setStatus(!1),c.setStyle(k,"display","none")));!0===this.viewMode&&m.setStatus(!1);b=g(m.domNode,"click",h.hitch(this,this._onCheckBoxChange,e));this._eventHandles.push(b);this._setIconImage(e,!1);return e},_setIconImage:function(b,a){if(this.displayLayerTypeIcon){var e=b.layerNode,d=e.getLayerType(),f=e.getLayerObject();q({layerType:d,layerObject:f}).then(h.hitch(this, function(d){var f;d.layerType&&d.layerObject&&(f={type:d.layerType,layerInfo:e._layerInfo},d=window.location.protocol+"//"+window.location.host+require.toUrl("jimu"),(f=this._getIconInfo(f,a).imageName)&&c.setStyle(b.iconSpan,"background-image","url("+d+"/css/images/"+f+")"))}))}},_getCheckBoxValue:function(b){return b.getStatus()?b.getValue():!1},_clearLayerState:function(b){var a={};b&&this.layerStructure.traversal(h.hitch(this,function(c){b[c.id]&&(a[c.id]={selected:b[c.id].selected})}));return a}, _selectOrDeselectLayer:function(b,a){if(b=this._layerDatas[b])b.checkBox.setValue(a),this._onCheckBoxChange(b)},selectLayer:function(b){this._selectOrDeselectLayer(b,!0)},deselectLayer:function(b){this._selectOrDeselectLayer(b,!1)},getState:function(){var b=h.clone(this.layerState),a;for(a in this._layerDatas)this._layerDatas.hasOwnProperty(a)&&"function"!==typeof this._layerDatas[a]&&(this._getCheckBoxValue(this._layerDatas[a].checkBox)?b[a]={selected:!0}:b[a]={selected:!1});return b},restoreState:function(b){this.layerState= this._clearLayerState(b);for(var a in this._layerDatas)if(this._layerDatas.hasOwnProperty(a)&&"function"!==typeof this._layerDatas[a]){var c=(b=this._layerDatas[a])&&b.checkBox,d=this.layerState[a];d?c.setValue(d.selected):c.setValue(this.layerStateController.getState(b.layerNode))}this.layerStateController.restoreState(this.layerState,this.layerStructure)},setViewMode:function(b){for(var a in this._layerDatas)if(this._layerDatas.hasOwnProperty(a)&&"function"!==typeof this._layerDatas[a]){var c=this._layerDatas[a], c=c&&c.checkBox;!0===b?(this.viewMode=!0,c.setStatus(!1)):(this.viewMode=!1,c.setStatus(!0))}},getSelectedLayerNodes:function(){var b=[],a=this.getState(),c;for(c in a)if(a.hasOwnProperty(c)&&"function"!==typeof a[c]&&a[c].selected){var d=this.layerStructure.getNodeById(c);d&&b.push(d)}return b},getLoadedLayerNodes:function(){var b=[],a;for(a in this._layerDatas)if(this._layerDatas.hasOwnProperty(a)&&"function"!==typeof this._layerDatas[a]){var c=this.layerStructure.getNodeById(a);c&&b.push(c)}return b}, getLayerAssociateDomNodesById:function(b){var a=null;(b=this._layerDatas[b])&&(a={collapseIcon:b.collapseSpan,checkBox:b.checkBox.domNode,layerTypeIcon:b.iconSpan});return a},getSelectedItems:function(){var b=[];return b=d.map(this.getSelectedLayerNodes(),function(a){return{name:a.title,url:a.getUrl(),layerInfo:a._layerInfo}},this)},getAllItems:function(){return[]},_clear:function(){this._layerDatas={};d.forEach(this._eventHandles,function(b){b.remove()},this);this._eventHandles=[];c.empty(this.treeUl)}, destroy:function(){this._clear();this.map&&this.layerStructure.destroy();this.shelter&&(this.shelter.destroy(),this.shelter=null);this.inherited(arguments)},_onCollapse:function(b){var a="none"===c.getStyle(b.subLayerNodeUl,"display")?!0:!1;a?(c.setStyle(b.subLayerNodeUl,"display","block"),c.addClass(b.collapseSpan,"opened")):(c.setStyle(b.subLayerNodeUl,"display","none"),c.removeClass(b.collapseSpan,"opened"));this._setIconImage(b,a);b.hasBeenOpened||(this._createLayerNodes(b.layerNode.getSubNodes(), b.subLayerNodeUl),b.hasBeenOpened=!0)},_onCheckBoxChange:function(b,a){this.layerStateController.setState(b.layerNode,this._getCheckBoxValue(b.checkBox));this.emit("selection-change",b.layerNode,this._getCheckBoxValue(b.checkBox));this._onTreeClick(b,a)},_onLayerInfosChanged:function(){this._createTree();this.emit("update")},_onLayerInfosIsShowInMapChanged:function(){this._createTree();this.emit("update")},_onTreeClick:function(b,a){b={name:b.layerNode.title||"",parent:null,layerInfo:b.layerNode._layerInfo, type:null,layerClass:null,id:null,isLeaf:b.layerNode.isLeaf(),hasChildren:b.layerNode.isLeaf()?!1:!0};this.emit("tree-click",b,null,a)}});l.LayerStateController=m(null,{getState:function(b){return!0},setState:function(b,a){return this},restoreState:function(b,a){return this}});l.LayerVisibilityStateController=m(l.LayerStateController,{getState:function(b){return b.isToggledOn()},setState:function(b,a){b.toggle();return this},restoreState:function(b,a){var c={layerOptions:{}},d;for(d in b)b.hasOwnProperty(d)&& "function"!==typeof b[d]&&(c.layerOptions[d]={visible:b[d].selected});a.restoreState(c);return this}});l.layerVisibilityStateController=new l.LayerVisibilityStateController;l.LayerLegendStateController=m(l.LayerStateController,{getState:function(b){return b.isShowLegend()}});l.layerLegendStateController=new l.LayerLegendStateController;return l})},"jimu/dijit/ColorPickerButton":function(){define("dojo/_base/declare dijit/_WidgetBase dijit/_TemplatedMixin dojo/_base/lang dojo/_base/html dojo/on dojo/_base/Color dijit/TooltipDialog dijit/popup jimu/dijit/ColorSelector".split(" "), function(g,m,q,h,c,d,f,k,p,l){return g([m,q],{baseClass:"jimu-color-picker-btn",declaredClass:"jimu.dijit.ColorPickerButton",templateString:'\x3cdiv\x3e\x3cdiv class\x3d"color-node" data-dojo-attach-point\x3d"colorNode"\x3e\x3c/div\x3e\x3cdiv class\x3d"seperator"\x3e\x3c/div\x3e\x3cdiv class\x3d"action-node" data-dojo-attach-point\x3d"actionNode"\x3e\x3c/div\x3e\x3c/div\x3e',_isTooltipDialogOpened:!1,color:null,showHex:!0,showHsv:!0,showRgb:!0,ensureMode:!1,postMixInProperties:function(){this.nls= window.jimuNls.common},postCreate:function(){this.inherited(arguments);this.color?this.color instanceof f||(this.color=new f(this.color)):this.color=new f("#ccc");c.setStyle(this.colorNode,"backgroundColor",this.color.toHex());this.colorNode.title=this.color.toHex();this.showLabel&&this._changeLabel(this.color);this._createTooltipDialog(this.domNode);this._hideTooltipDialog();this.own(d(this.colorNode,"click",h.hitch(this,this._onNodeClick)));this.own(d(this.actionNode,"click",h.hitch(this,this._onNodeClick))); this.own(d(document.body,"click",h.hitch(this,function(b){this.isPartOfPopup(b.target||b.srcElement)||this._hideTooltipDialog()})))},_onNodeClick:function(b){b.stopPropagation();b.preventDefault();this._isTooltipDialogOpened?this._hideTooltipDialog():this._showTooltipDialog()},destroy:function(){p.close(this.tooltipDialog);this.picker.destroy();this.tooltipDialog.destroy();this.inherited(arguments)},isPartOfPopup:function(b){var a=this.tooltipDialog.domNode;return b===a||c.isDescendant(b,a)},hideTooltipDialog:function(){this._hideTooltipDialog()}, _showTooltipDialog:function(){p.open({parent:this.getParent(),popup:this.tooltipDialog,around:this.domNode});this._isTooltipDialogOpened=!0},_hideTooltipDialog:function(){p.close(this.tooltipDialog);this._isTooltipDialogOpened=!1},_createTooltipDialog:function(){var b=c.create("div");this.tooltipDialog=new k({content:b});c.addClass(this.tooltipDialog.domNode,"jimu-color-picker-dialog");this.picker=new l({showHex:this.showHex,showRgb:this.showRgb,showHsv:this.showHsv,value:this.color.toHex(),onChange:h.hitch(this, function(a){this.ensureMode||(a=new f(a),this.setColor(a))})});this.picker.placeAt(b);this.picker.startup();if(this.ensureMode){var a=c.create("div",{"class":"jimu-btn jimu-btn-vacation jimu-float-trailing jimu-leading-margin1",title:this.nls.cancel,innerHTML:this.nls.cancel},b);this.own(d(a,"click",h.hitch(this,function(){this._hideTooltipDialog()})));a=c.create("div",{"class":"jimu-btn jimu-float-trailing jimu-leading-margin1",title:this.nls.ok,innerHTML:this.nls.ok},b);this.own(d(a,"click",h.hitch(this, function(){var a=this.picker.get("value");this.setColor(new f(a));this._hideTooltipDialog()})));b=c.create("div",{"class":"jimu-btn jimu-float-trailing jimu-leading-margin1",title:this.nls.apply,innerHTML:this.nls.apply},b);this.own(d(b,"click",h.hitch(this,function(){var a=this.picker.get("value");this.setColor(new f(a))})))}},setColor:function(b){if(b instanceof f){var a=this.color,e="";a&&(e=a.toHex());a=b.toHex();this.color=b;c.setStyle(this.colorNode,"backgroundColor",a);e!==a&&(this.picker.set("value", a),this.onChange(new f(a)))}},getColor:function(){return this.color},onChange:function(b){}})})},"widgets/Select/setting/_build-generate_module":function(){define(["dojo/text!./Setting.html","dojo/text!./css/style.css","dojo/i18n!./nls/strings"],function(){})},"url:jimu/dijit/templates/_TreeNode.html":'\x3cdiv class\x3d"dijitTreeNode" role\x3d"presentation"\x3e\r\n\t\x3cdiv data-dojo-attach-point\x3d"rowNode" class\x3d"dijitTreeRow" role\x3d"presentation"\x3e\r\n\t\t\x3cspan data-dojo-attach-point\x3d"expandoNode" class\x3d"dijitInline dijitTreeExpando" role\x3d"presentation"\x3e\x3c/span\x3e\r\n\t\t\x3cspan data-dojo-attach-point\x3d"expandoNodeText" class\x3d"dijitExpandoText" role\x3d"presentation"\x3e\x3c/span\x3e\r\n\t\t\x3cspan data-dojo-attach-point\x3d"contentNode" class\x3d"dijitTreeContent" role\x3d"presentation"\x3e\r\n\t\t\t\x3cspan role\x3d"presentation" class\x3d"dijitInline dijitIcon dijitTreeIcon" data-dojo-attach-point\x3d"iconNode"\x3e\x3c/span\x3e\r\n\t\t\t\x3cspan data-dojo-attach-point\x3d"labelNode,focusNode" class\x3d"dijitTreeLabel" role\x3d"treeitem" tabindex\x3d"-1" aria-selected\x3d"false"\x3e\x3c/span\x3e\r\n\t\t\x3c/span\x3e\r\n\t\x3c/div\x3e\r\n\t\x3cdiv data-dojo-attach-point\x3d"containerNode" class\x3d"dijitTreeNodeContainer" role\x3d"presentation" style\x3d"display: none;"\x3e\x3c/div\x3e\r\n\x3c/div\x3e', "url:dijit/templates/TreeNode.html":'\x3cdiv class\x3d"dijitTreeNode" role\x3d"presentation"\r\n\t\x3e\x3cdiv data-dojo-attach-point\x3d"rowNode" class\x3d"dijitTreeRow" role\x3d"presentation"\r\n\t\t\x3e\x3cspan data-dojo-attach-point\x3d"expandoNode" class\x3d"dijitInline dijitTreeExpando" role\x3d"presentation"\x3e\x3c/span\r\n\t\t\x3e\x3cspan data-dojo-attach-point\x3d"expandoNodeText" class\x3d"dijitExpandoText" role\x3d"presentation"\x3e\x3c/span\r\n\t\t\x3e\x3cspan data-dojo-attach-point\x3d"contentNode"\r\n\t\t\tclass\x3d"dijitTreeContent" role\x3d"presentation"\x3e\r\n\t\t\t\x3cspan role\x3d"presentation" class\x3d"dijitInline dijitIcon dijitTreeIcon" data-dojo-attach-point\x3d"iconNode"\x3e\x3c/span\r\n\t\t\t\x3e\x3cspan data-dojo-attach-point\x3d"labelNode,focusNode" class\x3d"dijitTreeLabel" role\x3d"treeitem"\r\n\t\t\t\t tabindex\x3d"-1" aria-selected\x3d"false" id\x3d"${id}_label"\x3e\x3c/span\x3e\r\n\t\t\x3c/span\r\n\t\x3e\x3c/div\x3e\r\n\t\x3cdiv data-dojo-attach-point\x3d"containerNode" class\x3d"dijitTreeNodeContainer" role\x3d"presentation"\r\n\t\t style\x3d"display: none;" aria-labelledby\x3d"${id}_label"\x3e\x3c/div\x3e\r\n\x3c/div\x3e\r\n', "url:dijit/templates/Tree.html":'\x3cdiv role\x3d"tree"\x3e\r\n\t\x3cdiv class\x3d"dijitInline dijitTreeIndent" style\x3d"position: absolute; top: -9999px" data-dojo-attach-point\x3d"indentDetector"\x3e\x3c/div\x3e\r\n\t\x3cdiv class\x3d"dijitTreeExpando dijitTreeExpandoLoading" data-dojo-attach-point\x3d"rootLoadingIndicator"\x3e\x3c/div\x3e\r\n\t\x3cdiv data-dojo-attach-point\x3d"containerNode" class\x3d"dijitTreeContainer" role\x3d"presentation"\x3e\r\n\t\x3c/div\x3e\r\n\x3c/div\x3e\r\n',"url:widgets/Select/setting/Setting.html":'\x3cdiv\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv class\x3d"label"\x3e${nls.chooseSelectingTools}\x3c/div\x3e\r\n \x3cdiv data-dojo-attach-point\x3d"drawingToolsContainer" class\x3d"drawing-tools-container jimu-draw-box" data-dojo-attach-event\x3d"onclick:_onDrawingToolsContainerClicked"\x3e\r\n \x3cdiv class\x3d"draw-item point-icon" title\x3d"${nls.selectByPoint}" data-geotype\x3d"POINT"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"draw-item polyline-icon" title\x3d"${nls.selectByLine}" data-geotype\x3d"POLYLINE"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"draw-item extent-icon" title\x3d"${nls.selectByRectangle}" data-geotype\x3d"EXTENT"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"draw-item circle-icon" title\x3d"${nls.selectByCircle}" data-geotype\x3d"CIRCLE"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"draw-item polygon-icon" title\x3d"${nls.selectByPolygon}" data-geotype\x3d"POLYGON"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"draw-item freehand-polygon-icon" title\x3d"${nls.selectByFreehandPolygon}" data-geotype\x3d"FREEHAND_POLYGON"\x3e\r\n \x3cdiv class\x3d"draw-item-icon"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3c/div\x3e\r\n\r\n \x3cdiv class\x3d"config-section inline"\x3e\r\n \x3cdiv class\x3d"label"\x3e${nls.setSelectionColor}\x3c/div\x3e\r\n \x3cdiv data-dojo-attach-point\x3d"colorPicker" data-dojo-type\x3d"jimu/dijit/ColorPickerButton"\x3e\r\n \x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv class\x3d"label"\x3e${nls.selectionMode}\x3c/div\x3e\r\n \x3cdiv class\x3d"option"\x3e\r\n \x3cinput id\x3d"selectModePartial" name\x3d"selectionMode" data-dojo-type\x3d"dijit/form/RadioButton"\r\n data-dojo-attach-point\x3d"partialMode"/\x3e\r\n \x3clabel for\x3d"selectModePartial"\x3e${nls.partiallyWithin}\x3c/label\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"option"\x3e\r\n \x3cinput id\x3d"selectModeWhole" name\x3d"selectionMode" data-dojo-type\x3d"dijit/form/RadioButton"\r\n data-dojo-attach-point\x3d"whollyMode"/\x3e\r\n \x3clabel for\x3d"selectModeWhole"\x3e${nls.whollyWithin}\x3c/label\x3e\r\n \x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv data-dojo-attach-point\x3d"enableByDefaultCheckBoxDiv"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv data-dojo-attach-point\x3d"exportCheckBoxDiv"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv data-dojo-attach-point\x3d"runtimeLayersCheckBoxDiv"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n \x3cdiv class\x3d"config-section"\x3e\r\n \x3cdiv class\x3d"label"\x3e${nls.layerToSelect}\x3c/div\x3e\r\n \x3cdiv class\x3d"layer-chooser" data-dojo-attach-point\x3d"layerChooserDiv"\x3e\x3c/div\x3e\r\n \x3c/div\x3e\r\n\x3c/div\x3e', "url:widgets/Select/setting/css/style.css":".jimu-widget-select-setting {margin: 20px 0;}.jimu-widget-select-setting .config-section {width: 100%; margin-bottom: 20px;}.jimu-widget-select-setting .config-section label {color: #353535; font-size: 12px;}.jimu-widget-select-setting .config-section .jimu-color-picker {width: 30px; height: 30px; margin: 0 20px;}.jimu-widget-select-setting .config-section .option {margin: 15px;}.jimu-widget-select-setting .config-section .option input {margin: 0 10px;}.jimu-widget-select-setting .inline \x3e div {display: inline-block; vertical-align: middle;}.jimu-widget-select-setting .label {color: #353535; font-size: 14px;}.jimu-widget-select-setting .drawing-tools-container {margin-top: 5px; overflow: hidden;}.jimu-widget-select-setting .drawing-tools-container .draw-item {float: left; width: 48px; height: 48px; cursor: pointer; background-repeat: no-repeat; background-position: center center; margin-left: 5px; position: relative; border: 1px solid #DBDBDB; border-radius: 1px; color: #666; font-size: 24px; padding: 12px;}.jimu-widget-select-setting .drawing-tools-container .draw-item.point-icon {padding: 12px 17px;}.jimu-rtl .jimu-widget-select-setting .drawing-tools-container .draw-item {float: right; margin-left: 0; margin-right: 5px;}.jimu-widget-select-setting .drawing-tools-container .draw-item:first-child {margin-left: 0 !important; margin-right: 0 !important;}.jimu-widget-select-setting .drawing-tools-container .draw-item .draw-item-icon {position: absolute; left: 2px; top: 2px; width: 16px; height: 16px; display: none; background: url(images/checked.png) center center no-repeat; background-color: #fff;}.jimu-rtl .jimu-widget-select-setting .drawing-tools-container .draw-item .draw-item-icon {left: auto; right: 2px;}.jimu-widget-select-setting .drawing-tools-container .draw-item.selected .draw-item-icon {display: block;}.jimu-widget-select-setting .jimu-basic-layer-chooser-from-map-lite .check-box-div.jimu-state-disabled {display: none;}", "*now":function(g){g(['dojo/i18n!*preload*widgets/Select/setting/nls/Setting*["ar","bs","ca","cs","da","de","en","el","es","et","fi","fr","he","hi","hr","hu","id","it","ja","ko","lt","lv","nb","nl","pl","pt-br","pt-pt","ro","ru","sl","sr","sv","th","tr","zh-cn","uk","vi","zh-hk","zh-tw","ROOT"]'])},"*noref":1}}); define("dojo/_base/declare dojo/_base/lang dojo/_base/Color dojo/on dojo/_base/array dojo/query dojo/_base/html dijit/_WidgetsInTemplateMixin jimu/BaseWidgetSetting jimu/dijit/CheckBox jimu/dijit/LayerChooserFromMap jimu/dijit/LayerChooserFromMapLite jimu/dijit/ColorPickerButton dijit/form/RadioButton".split(" "),function(g,m,q,h,c,d,f,k,p,l,b,a){return g([p,k],{baseClass:"jimu-widget-select-setting",selectionColor:"",selectionMode:"",allowExport:!1,postMixInProperties:function(){this.inherited(arguments); m.mixin(this.nls,window.jimuNls.featureSetChooser)},postCreate:function(){this.inherited(arguments);this.enableByDefaultCheckBox=new l({label:this.nls.enableByDefault,checked:this.config&&!1!==this.config.enableByDefault,onChange:m.hitch(this,this._onEnableByDefaultChange)},this.enableByDefaultCheckBoxDiv);this.allowExportCheckBox=new l({label:this.nls.allowExport,checked:this.config&&this.config.allowExport,onChange:m.hitch(this,this._onAllowExportChange)},this.exportCheckBoxDiv);this.includeRuntimeLayersCheckBox= new l({label:this.nls.includeRuntimeLayers,checked:this.config&&!1!==this.config.includeRuntimeLayers,onChange:m.hitch(this,this._onIncludeRuntimeLayersChange)},this.runtimeLayersCheckBoxDiv);this.config&&this._init();this.own(h(this.partialMode,"click",m.hitch(this,this._onSelectPartialMode)));this.own(h(this.whollyMode,"click",m.hitch(this,this._onSelectWhollyMode)));this.own(h(this.colorPicker,"change",m.hitch(this,this._onColorChange)))},_onColorChange:function(a){this.selectionColor=a.toHex()}, _onSelectPartialMode:function(){this.selectionMode="partial"},_onSelectWhollyMode:function(){this.selectionMode="wholly"},_onAllowExportChange:function(a){this.allowExport=a},_onEnableByDefaultChange:function(a){this.enableByDefault=a},_onIncludeRuntimeLayersChange:function(a){this.includeRuntimeLayers=a},_init:function(){(this.selectionColor=this.config.selectionColor)&&this.colorPicker.setColor(new q(this.selectionColor));this.selectionMode=this.config.selectionMode;"partial"===this.config.selectionMode? (this.partialMode.set("checked",!0),this._onSelectPartialMode()):"wholly"===this.config.selectionMode&&(this.whollyMode.set("checked",!0),this._onSelectWhollyMode());this.enableByDefault=!1!==this.config.enableByDefault;this.enableByDefaultCheckBox.setValue(this.enableByDefault);this.allowExport=this.config.allowExport;this.allowExportCheckBox.setValue(this.allowExport);this.includeRuntimeLayers=!1!==this.config.includeRuntimeLayers;this.includeRuntimeLayersCheckBox.setValue(this.includeRuntimeLayers); this._selectDrawingTools(this.config.geometryTypes||["EXTENT"]);if(this.layerChooser)this.config.layerState&&this.layerChooser.restoreState(this.config.layerState);else{var c=b.createFeaturelayerFilter(null,!0,!1,!1);this.layerChooser=new a({customFilter:c,onlySelectLeafLayer:!0,onlyShowWebMapLayers:!0,layerState:this.config.layerState});this.layerChooser.placeAt(this.layerChooserDiv);this.layerChooser.startup()}},setConfig:function(a){this.config=a;this._init()},getConfig:function(){return{selectionColor:this.selectionColor, selectionMode:this.selectionMode,enableByDefault:this.enableByDefault,allowExport:this.allowExport,includeRuntimeLayers:this.includeRuntimeLayers,geometryTypes:this._getSelectedDrawingTools(),layerState:this.layerChooser.getState()}},_onDrawingToolsContainerClicked:function(a){a=a.target||a.srcElement;var b=null;f.hasClass(a,"draw-item")?b=a:f.hasClass(a,"draw-item-icon")&&(b=a.parentNode);b&&(f.toggleClass(b,"selected"),0===d(".selected",this.drawingToolsContainer).length&&f.addClass(b,"selected"))}, _selectDrawingTools:function(a){var b=d(".draw-item",this.drawingToolsContainer);c.forEach(b,m.hitch(this,function(b){var c=b.getAttribute("data-geotype");0<=a.indexOf(c)?f.addClass(b,"selected"):f.removeClass(b,"selected")}))},_getSelectedDrawingTools:function(){var a=[],a=d(".draw-item.selected",this.drawingToolsContainer);return a=c.map(a,m.hitch(this,function(a){return a.getAttribute("data-geotype")}))}})});
onItem:function(h){h?this.pasteItem(h,null,d,!0,f):(g=this.store.newItem(c,k))&&void 0!=f&&this.pasteItem(g,d,d,!1,f)}}):(g=this.store.newItem(c,k))&&void 0!=f&&this.pasteItem(g,d,d,!1,f)},pasteItem:function(c,d,f,k,h){var l=this.store,b=this.childrenAttrs[0];d&&g.forEach(this.childrenAttrs,function(a){if(l.containsValue(d,a,c)){if(!k){var e=g.filter(l.getValues(d,a),function(a){return a!=c});l.setValues(d,a,e)}b=a}});if(f)if("number"==typeof h){var a=l.getValues(f,b).slice();a.splice(h,0,c);l.setValues(f, b,a)}else l.setValues(f,b,l.getValues(f,b).concat(c))},onChange:function(){},onChildrenChange:function(){},onDelete:function(){},onNewItem:function(c,d){d&&this.getChildren(d.item,h.hitch(this,function(c){this.onChildrenChange(d.item,c)}))},onDeleteItem:function(c){this.onDelete(c)},onSetItem:function(c,d){if(-1!=g.indexOf(this.childrenAttrs,d))this.getChildren(c,h.hitch(this,function(d){this.onChildrenChange(c,d)}));else this.onChange(c)}})})},"dijit/tree/ForestStoreModel":function(){define(["dojo/_base/array",
tooltip.rs
use druid::commands::CLOSE_WINDOW; use druid::widget::prelude::*; use druid::widget::{Controller, ControllerHost, Label, LabelText}; use druid::{ Color, Data, Point, TimerToken, Vec2, Widget, WidgetExt, WindowConfig, WindowId, WindowLevel, WindowSizePolicy, }; use std::time::{Duration, Instant}; use crate::OnMonitorExt; #[derive(Clone)] enum TooltipState { Off, Waiting { timer: TimerToken, last_mouse_move: Instant, last_mouse_pos: Point, }, Showing { id: WindowId, // We store last_mouse_pos here because we seem to sometimes get a synthesized MouseMove // event after showing the tooltip (maybe because the mouse leaves the window?). By storing // the last mouse position, we can filter out these spurious moves. last_mouse_pos: Point, }, } /// A [`Controller`] responsible for listening to mouse hovers and launching tooltip windows. /// /// Instead of constructing this widget explicitly, you probably want to use /// [`TooltipExt::tooltip`]. /// /// [`Controller`]: druid::widget::Controller pub struct TooltipController<T> { text: LabelText<T>, state: TooltipState, } /// Extension methods for tooltips. pub trait TooltipExt<T: Data, W: Widget<T>> { /// Open a tooltip when the mouse is hovered over this widget. fn tooltip<LT: Into<LabelText<T>>>(self, text: LT) -> ControllerHost<W, TooltipController<T>>; } impl<T: Data, W: Widget<T> + 'static> TooltipExt<T, W> for W { fn tooltip<LT: Into<LabelText<T>>>(self, text: LT) -> ControllerHost<W, TooltipController<T>>
} impl<T: Data, W: Widget<T>> Controller<T, W> for TooltipController<T> { fn event(&mut self, child: &mut W, ctx: &mut EventCtx, ev: &Event, data: &mut T, env: &Env) { self.state = match self.state { TooltipState::Waiting { timer, last_mouse_move, last_mouse_pos, } => match ev { Event::MouseMove(ev) if ctx.is_hot() => TooltipState::Waiting { timer, last_mouse_move: Instant::now(), last_mouse_pos: ev.window_pos, }, Event::MouseDown(_) | Event::MouseUp(_) | Event::MouseMove(_) => TooltipState::Off, Event::Timer(tok) if tok == &timer => { ctx.set_handled(); let elapsed = Instant::now().duration_since(last_mouse_move); if elapsed > TOOLTIP_DELAY_CHECK { self.text.resolve(data, env); let tooltip_position_in_window_coordinates = last_mouse_pos + TOOLTIP_OFFSET; let win_id = ctx.new_sub_window( WindowConfig::default() .show_titlebar(false) .window_size_policy(WindowSizePolicy::Content) .set_level(WindowLevel::Tooltip(ctx.window().clone())) .set_position(tooltip_position_in_window_coordinates), // FIXME: we'd like to use the actual label text instead of // resolving, but LabelText isn't Clone Label::new(self.text.display_text()) .border(TOOLTIP_BORDER_COLOR, TOOLTIP_BORDER_WIDTH) .on_monitor(ctx.window()), data.clone(), env.clone(), ); TooltipState::Showing { id: win_id, last_mouse_pos, } } else { TooltipState::Waiting { timer: ctx.request_timer(TOOLTIP_DELAY - elapsed), last_mouse_move, last_mouse_pos, } } } _ => self.state.clone(), }, TooltipState::Off => match ev { Event::MouseMove(ev) if ctx.is_hot() => TooltipState::Waiting { timer: ctx.request_timer(TOOLTIP_DELAY), last_mouse_move: Instant::now(), last_mouse_pos: ev.window_pos, }, _ => TooltipState::Off, }, TooltipState::Showing { id, last_mouse_pos } => match ev { Event::MouseMove(ev) if ctx.is_hot() => { // This is annoying. On GTK, after showing a window we instantly get a new // MouseMove event, with a mouse position that tends to be slightly different // than the previous one. If we don't test the positions, this causes the // tooltip to immediately close. if (ev.window_pos - last_mouse_pos).hypot2() > 1.0 { ctx.submit_command(CLOSE_WINDOW.to(id)); TooltipState::Waiting { timer: ctx.request_timer(TOOLTIP_DELAY), last_mouse_move: Instant::now(), last_mouse_pos: ev.window_pos, } } else { self.state.clone() } } Event::MouseMove(_) | Event::MouseUp(_) | Event::MouseDown(_) => { ctx.submit_command(CLOSE_WINDOW.to(id)); self.state.clone() } _ => self.state.clone(), }, }; child.event(ctx, ev, data, env); } fn lifecycle( &mut self, child: &mut W, ctx: &mut LifeCycleCtx, ev: &LifeCycle, data: &T, env: &Env, ) { if let LifeCycle::HotChanged(false) = ev { if let TooltipState::Showing { id, .. } = self.state { ctx.submit_command(CLOSE_WINDOW.to(id)); self.state = TooltipState::Off; } } child.lifecycle(ctx, ev, data, env); } } const TOOLTIP_DELAY: Duration = Duration::from_millis(350); const TOOLTIP_DELAY_CHECK: Duration = Duration::from_millis(320); const TOOLTIP_BORDER_COLOR: Color = Color::BLACK; const TOOLTIP_BORDER_WIDTH: f64 = 1.0; // It looks better if we don't put the tooltip *right* on the tip of the mouse, // because the mouse obstructs it. // FIXME: this should depend on the actual cursor size. const TOOLTIP_OFFSET: Vec2 = Vec2::new(15.0, 15.0);
{ self.controller(TooltipController { text: text.into(), state: TooltipState::Off, }) }
spec_test.go
package ssim_test import (
func TestUnitSpecs(t *testing.T) { r := gospec.NewRunner() r.AddSpec(DescribeMemEventLog) r.AddSpec(DescribePipelines) r.AddSpec(DescribeSyncedStream) gospec.MainGoTest(r, t) }
"testing" "github.com/ghthor/gospec" )
formato-fecha.pipe.ts
import { Pipe, PipeTransform } from '@angular/core'; import * as moment from 'moment'; @Pipe({ name: 'formatoFecha' }) export class
implements PipeTransform { transform(value: any, ...args: any[]): any { return moment(value).format(args[0] || 'DD/MM/YYYY hh:mm:ss a'); } }
FormatoFechaPipe
xls_wrapper.py
from XLMMacroDeobfuscator.excel_wrapper import ExcelWrapper from XLMMacroDeobfuscator.boundsheet import Boundsheet from XLMMacroDeobfuscator.boundsheet import Cell from win32com.client import Dispatch import pywintypes from enum import Enum import os import re class XlCellType(Enum): xlCellTypeFormulas = -4123
class XLSWrapper(ExcelWrapper): XLEXCEL4MACROSHEET = 3 def __init__(self, xls_doc_path): self._excel = Dispatch("Excel.Application") self.xls_workbook = self._excel.Workbooks.Open(xls_doc_path) self.xls_workbook_name = os.path.basename(xls_doc_path) self._macrosheets = None self._defined_names = None self.xl_international_flags = {} self._international_flags = None def get_xl_international_char(self, flag_name): if flag_name not in self.xl_international_flags: if self._international_flags is None: self._international_flags = self._excel.Application.International # flag value starts at 1, list index starts at 0 self.xl_international_flags[flag_name] = self._international_flags[flag_name.value - 1] result = self.xl_international_flags[flag_name] return result def get_defined_names(self): result = {} name_objects = self.xls_workbook.Excel4MacroSheets.Application.Names for name_obj in name_objects: result[name_obj.NameLocal.lower()] = str(name_obj.RefersToLocal).strip('=') return result def get_defined_name(self, name, full_match=True): result = [] name = name.lower() if self._defined_names is None: self._defined_names = self.get_defined_names() if full_match: if name in self._defined_names: result = self._defined_names[name] else: for defined_name, cell_address in self._defined_names.items(): if defined_name.startswith(name): result.append((defined_name, cell_address)) return result def load_cells(self, macrosheet, xls_sheet): cells = {} try: self._excel.Application.ScreenUpdating = False col_offset = xls_sheet.UsedRange.Column row_offset = xls_sheet.UsedRange.Row formulas = xls_sheet.UsedRange.Formula if formulas is not None: for row_no, row in enumerate(formulas): for col_no, col in enumerate(row): if col: cell = Cell() cell.sheet = macrosheet if len(col)>1 and col.startswith('='): cell.formula = col else: cell.value = col row_addr = row_offset + row_no col_addr = col_offset + col_no cell.row = row_addr cell.column = Cell.convert_to_column_name(col_addr) cells[(col_addr, row_addr)] = cell self._excel.Application.ScreenUpdating = True except pywintypes.com_error as error: print('CELL(Formula): ' + str(error.args[2])) try: values= xls_sheet.UsedRange.Value if values is not None: for row_no, row in enumerate(values): for col_no, col in enumerate(row): if col: row_addr = row_offset + row_no col_addr = col_offset + col_no if (col_addr, row_addr) in cells: cell = cells[(col_addr, row_addr)] cell.value = col else: cell = Cell() cell.sheet = macrosheet cell.value = col cell.row = row_addr cell.column = Cell.convert_to_column_name(col_addr) cells[(col_addr, row_addr)] = cell except pywintypes.com_error as error: print('CELL(Constant): ' + str(error.args[2])) for cell in cells: macrosheet.add_cell(cells[cell]) def get_macrosheets(self): if self._macrosheets is None: self._macrosheets = {} for sheet in self.xls_workbook.Excel4MacroSheets: macrosheet = Boundsheet(sheet.name, 'Macrosheet') self.load_cells(macrosheet, sheet) self._macrosheets[sheet.name] = macrosheet return self._macrosheets def get_workbook_name(self): return self.xls_workbook_name def get_cell_info(self, sheet_name, col, row, type_ID): sheet = self._excel.Excel4MacroSheets(sheet_name) cell = col + row data = None if int(type_ID) == 2: data = sheet.Range(col + row).Row print(data) elif int(type_ID) == 3: data = sheet.Range(cell).Column print(data) elif int(type_ID) == 8: data = sheet.Range(cell).HorizontalAlignment elif int(type_ID) == 17: data = sheet.Range(cell).Height elif int(type_ID) == 19: data = sheet.Range(cell).Font.Size elif int(type_ID) == 20: data = sheet.Range(cell).Font.Bold elif int(type_ID) == 21: data = sheet.Range(cell).Font.Italic elif int(type_ID) == 23: data = sheet.Range(cell).Font.Strikethrough elif int(type_ID) == 24: data = sheet.Range(cell).Font.ColorIndex elif int(type_ID) == 50: data = sheet.Range(cell).VerticalAlignment else: print("Unknown info_type (%d) at cell %s" % (type_ID, cell)) return data, False, False if __name__ == '__main__': path = r"tmp\xls\edd554502033d78ac18e4bd917d023da2fd64843c823c1be8bc273f48a5f3f5f.xls" path = os.path.abspath(path) excel_doc = XLSWrapper(path) try: macrosheets = excel_doc.get_macrosheets() auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False) for label in auto_open_labels: print('auto_open: {}->{}'.format(label[0], label[1])) for macrosheet_name in macrosheets: print('SHEET: {}\t{}'.format(macrosheets[macrosheet_name].name, macrosheets[macrosheet_name].type)) for formula_loc, info in macrosheets[macrosheet_name].cells.items(): if info.formula is not None: print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value)) for formula_loc, info in macrosheets[macrosheet_name].cells.items(): if info.formula is None: print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value)) finally: excel_doc._excel.Application.DisplayAlerts = False excel_doc._excel.Application.Quit()
xlCellTypeConstants = 2
Alert Notification.py
# This is a simple application for alert system from tkinter import * from tkinter import messagebox root = Tk() root.geometry("200x200")
messagebox.showwarning("Alert Box", "Stop virus found") but = Button(root, text="ok", command=Message) but.place(x=100, y=100) root.mainloop()
def message():
server.py
# Copyright 2018-2022 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import base64 import binascii import logging import os import socket import sys import errno import json import time import traceback import click from enum import Enum from typing import ( Any, Dict, Optional, Tuple, Callable, Awaitable, Generator, List, Set, ) import tornado.concurrent import tornado.gen import tornado.ioloop import tornado.locks import tornado.netutil import tornado.web import tornado.websocket from tornado.websocket import WebSocketHandler from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from streamlit import config from streamlit import file_util from streamlit import source_util from streamlit import util from streamlit.caching import get_memo_stats_provider, get_singleton_stats_provider from streamlit.config_option import ConfigOption from streamlit.forward_msg_cache import ForwardMsgCache from streamlit.forward_msg_cache import create_reference_msg from streamlit.forward_msg_cache import populate_hash_if_needed from streamlit.in_memory_file_manager import in_memory_file_manager from streamlit.legacy_caching.caching import _mem_caches from streamlit.app_session import AppSession from streamlit.stats import StatsHandler, StatsManager from streamlit.uploaded_file_manager import UploadedFileManager from streamlit.logger import get_logger from streamlit.components.v1.components import ComponentRegistry from streamlit.components.v1.components import ComponentRequestHandler from streamlit.proto.BackMsg_pb2 import BackMsg from streamlit.proto.ForwardMsg_pb2 import ForwardMsg from streamlit.server.upload_file_request_handler import ( UploadFileRequestHandler, UPLOAD_FILE_ROUTE, ) from streamlit.session_data import SessionData from streamlit.state import ( SCRIPT_RUN_WITHOUT_ERRORS_KEY, SessionStateStatProvider, ) from streamlit.server.routes import AddSlashHandler from streamlit.server.routes import AssetsFileHandler from streamlit.server.routes import DebugHandler from streamlit.server.routes import HealthHandler from streamlit.server.routes import MediaFileHandler from streamlit.server.routes import MessageCacheHandler from streamlit.server.routes import StaticFileHandler from streamlit.server.server_util import is_cacheable_msg from streamlit.server.server_util import is_url_from_allowed_origins from streamlit.server.server_util import make_url_path_regex from streamlit.server.server_util import serialize_forward_msg from streamlit.server.server_util import get_max_message_size_bytes from streamlit.watcher import LocalSourcesWatcher LOGGER = get_logger(__name__) TORNADO_SETTINGS = { # Gzip HTTP responses. "compress_response": True, # Ping every 1s to keep WS alive. # 2021.06.22: this value was previously 20s, and was causing # connection instability for a small number of users. This smaller # ping_interval fixes that instability. # https://github.com/streamlit/streamlit/issues/3196 "websocket_ping_interval": 1, # If we don't get a ping response within 30s, the connection # is timed out. "websocket_ping_timeout": 30, } # When server.port is not available it will look for the next available port # up to MAX_PORT_SEARCH_RETRIES. MAX_PORT_SEARCH_RETRIES = 100 # When server.address starts with this prefix, the server will bind # to an unix socket. UNIX_SOCKET_PREFIX = "unix://" # Wait for the script run result for 60s and if no result is available give up SCRIPT_RUN_CHECK_TIMEOUT = 60 class SessionInfo: """Type stored in our _session_info_by_id dict. For each AppSession, the server tracks that session's script_run_count. This is used to track the age of messages in the ForwardMsgCache. """ def __init__(self, ws: WebSocketHandler, session: AppSession): """Initialize a SessionInfo instance. Parameters ---------- session : AppSession The AppSession object. ws : _BrowserWebSocketHandler The websocket corresponding to this session. """ self.session = session self.ws = ws self.script_run_count = 0 def __repr__(self) -> str: return util.repr_(self) class State(Enum): INITIAL = "INITIAL" WAITING_FOR_FIRST_BROWSER = "WAITING_FOR_FIRST_BROWSER" ONE_OR_MORE_BROWSERS_CONNECTED = "ONE_OR_MORE_BROWSERS_CONNECTED" NO_BROWSERS_CONNECTED = "NO_BROWSERS_CONNECTED" STOPPING = "STOPPING" STOPPED = "STOPPED" class RetriesExceeded(Exception): pass def server_port_is_manually_set() -> bool: return config.is_manually_set("server.port") def server_address_is_unix_socket() -> bool: address = config.get_option("server.address") return address is not None and address.startswith(UNIX_SOCKET_PREFIX) def start_listening(app: tornado.web.Application) -> None: """Makes the server start listening at the configured port. In case the port is already taken it tries listening to the next available port. It will error after MAX_PORT_SEARCH_RETRIES attempts. """ http_server = HTTPServer( app, max_buffer_size=config.get_option("server.maxUploadSize") * 1024 * 1024 ) if server_address_is_unix_socket(): start_listening_unix_socket(http_server) else: start_listening_tcp_socket(http_server) def start_listening_unix_socket(http_server: HTTPServer) -> None: address = config.get_option("server.address") file_name = os.path.expanduser(address[len(UNIX_SOCKET_PREFIX) :]) unix_socket = tornado.netutil.bind_unix_socket(file_name) http_server.add_socket(unix_socket) def start_listening_tcp_socket(http_server: HTTPServer) -> None: call_count = 0 port = None while call_count < MAX_PORT_SEARCH_RETRIES: address = config.get_option("server.address") port = config.get_option("server.port") try: http_server.listen(port, address) break # It worked! So let's break out of the loop. except (OSError, socket.error) as e: if e.errno == errno.EADDRINUSE: if server_port_is_manually_set(): LOGGER.error("Port %s is already in use", port) sys.exit(1) else: LOGGER.debug( "Port %s already in use, trying to use the next one.", port ) port += 1 # Save port 3000 because it is used for the development # server in the front end. if port == 3000: port += 1 config.set_option( "server.port", port, ConfigOption.STREAMLIT_DEFINITION ) call_count += 1 else: raise if call_count >= MAX_PORT_SEARCH_RETRIES: raise RetriesExceeded( f"Cannot start Streamlit server. Port {port} is already in use, and " f"Streamlit was unable to find a free port after {MAX_PORT_SEARCH_RETRIES} attempts.", ) class Server: _singleton: Optional["Server"] = None @classmethod def get_current(cls) -> "Server": """ Returns ------- Server The singleton Server object. """ if Server._singleton is None: raise RuntimeError("Server has not been initialized yet") return Server._singleton def __init__( self, ioloop: IOLoop, main_script_path: str, command_line: Optional[str] ): """Create the server. It won't be started yet.""" if Server._singleton is not None: raise RuntimeError("Server already initialized. Use .get_current() instead") Server._singleton = self _set_tornado_log_levels() self._ioloop = ioloop self._main_script_path = main_script_path self._command_line = command_line if command_line is not None else "" # Mapping of AppSession.id -> SessionInfo. self._session_info_by_id: Dict[str, SessionInfo] = {} self._must_stop = tornado.locks.Event() self._state = State.INITIAL self._message_cache = ForwardMsgCache() self._uploaded_file_mgr = UploadedFileManager() self._uploaded_file_mgr.on_files_updated.connect(self.on_files_updated) self._session_data: Optional[SessionData] = None self._has_connection = tornado.locks.Condition() self._need_send_data = tornado.locks.Event() # StatsManager self._stats_mgr = StatsManager() self._stats_mgr.register_provider(get_memo_stats_provider()) self._stats_mgr.register_provider(get_singleton_stats_provider()) self._stats_mgr.register_provider(_mem_caches) self._stats_mgr.register_provider(self._message_cache) self._stats_mgr.register_provider(in_memory_file_manager) self._stats_mgr.register_provider(self._uploaded_file_mgr) self._stats_mgr.register_provider( SessionStateStatProvider(self._session_info_by_id) ) def __repr__(self) -> str: return util.repr_(self) @property def main_script_path(self) -> str: return self._main_script_path def get_session_by_id(self, session_id: str) -> Optional[AppSession]: """Return the AppSession corresponding to the given id, or None if no such session exists.""" session_info = self._get_session_info(session_id) if session_info is None: return None return session_info.session def on_files_updated(self, session_id: str) -> None: """Event handler for UploadedFileManager.on_file_added. Ensures that uploaded files from stale sessions get deleted. """ session_info = self._get_session_info(session_id) if session_info is None: # If an uploaded file doesn't belong to an existing session, # remove it so it doesn't stick around forever. self._uploaded_file_mgr.remove_session_files(session_id) def _get_session_info(self, session_id: str) -> Optional[SessionInfo]: """Return the SessionInfo with the given id, or None if no such session exists. """ return self._session_info_by_id.get(session_id, None) def start(self, on_started: Callable[["Server"], Any]) -> None: """Start the server. Parameters ---------- on_started : callable A callback that will be called when the server's run-loop has started, and the server is ready to begin receiving clients. """ if self._state != State.INITIAL: raise RuntimeError("Server has already been started") LOGGER.debug("Starting server...") app = self._create_app() start_listening(app) port = config.get_option("server.port") LOGGER.debug("Server started on port %s", port) self._ioloop.spawn_callback(self._loop_coroutine, on_started) def _create_app(self) -> tornado.web.Application: """Create our tornado web app.""" base = config.get_option("server.baseUrlPath") routes: List[Any] = [ ( make_url_path_regex(base, "stream"), _BrowserWebSocketHandler, dict(server=self), ), ( make_url_path_regex(base, "healthz"), HealthHandler, dict(callback=lambda: self.is_ready_for_browser_connection), ), (make_url_path_regex(base, "debugz"), DebugHandler, dict(server=self)), ( make_url_path_regex(base, "message"), MessageCacheHandler, dict(cache=self._message_cache), ), ( make_url_path_regex(base, "st-metrics"), StatsHandler, dict(stats_manager=self._stats_mgr), ), ( make_url_path_regex( base, UPLOAD_FILE_ROUTE, ), UploadFileRequestHandler, dict( file_mgr=self._uploaded_file_mgr, get_session_info=self._get_session_info, ), ), ( make_url_path_regex(base, "assets/(.*)"), AssetsFileHandler, {"path": "%s/" % file_util.get_assets_dir()}, ), (make_url_path_regex(base, "media/(.*)"), MediaFileHandler, {"path": ""}), ( make_url_path_regex(base, "component/(.*)"), ComponentRequestHandler, dict(registry=ComponentRegistry.instance()), ), ] if config.get_option("server.scriptHealthCheckEnabled"): routes.extend( [ ( make_url_path_regex(base, "script-health-check"), HealthHandler, dict(callback=lambda: self.does_script_run_without_error()), ) ] ) if config.get_option("global.developmentMode"): LOGGER.debug("Serving static content from the Node dev server") else: static_path = file_util.get_static_dir() LOGGER.debug("Serving static content from %s", static_path) routes.extend( [ ( make_url_path_regex(base, "(.*)"), StaticFileHandler, { "path": "%s/" % static_path, "default_filename": "index.html", "get_pages": lambda: set( [ page_info["page_name"] for page_info in source_util.get_pages( self.main_script_path ).values() ] ), }, ), (make_url_path_regex(base, trailing_slash=False), AddSlashHandler), ] ) return tornado.web.Application( routes, cookie_secret=config.get_option("server.cookieSecret"), xsrf_cookies=config.get_option("server.enableXsrfProtection"), # Set the websocket message size. The default value is too low. websocket_max_message_size=get_max_message_size_bytes(), **TORNADO_SETTINGS, # type: ignore[arg-type] ) def _set_state(self, new_state: State) -> None: LOGGER.debug("Server state: %s -> %s" % (self._state, new_state)) self._state = new_state @property async def is_ready_for_browser_connection(self) -> Tuple[bool, str]: if self._state not in (State.INITIAL, State.STOPPING, State.STOPPED): return True, "ok" return False, "unavailable" async def does_script_run_without_error(self) -> Tuple[bool, str]: """Load and execute the app's script to verify it runs without an error. Returns ------- (True, "ok") if the script completes without error, or (False, err_msg) if the script raises an exception. """ session_data = SessionData(self._main_script_path, self._command_line) local_sources_watcher = LocalSourcesWatcher(session_data) session = AppSession( ioloop=self._ioloop, session_data=session_data, uploaded_file_manager=self._uploaded_file_mgr, message_enqueued_callback=self._enqueued_some_message, local_sources_watcher=local_sources_watcher, user_info={"email": "[email protected]"}, ) try: session.request_rerun(None) now = time.perf_counter() while ( SCRIPT_RUN_WITHOUT_ERRORS_KEY not in session.session_state and (time.perf_counter() - now) < SCRIPT_RUN_CHECK_TIMEOUT ): await tornado.gen.sleep(0.1) if SCRIPT_RUN_WITHOUT_ERRORS_KEY not in session.session_state: return False, "timeout" ok = session.session_state[SCRIPT_RUN_WITHOUT_ERRORS_KEY] msg = "ok" if ok else "error" return ok, msg finally: session.shutdown() @property def browser_is_connected(self) -> bool: return self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED @property def is_running_hello(self) -> bool:
@tornado.gen.coroutine def _loop_coroutine( self, on_started: Optional[Callable[["Server"], Any]] = None ) -> Generator[Any, None, None]: try: if self._state == State.INITIAL: self._set_state(State.WAITING_FOR_FIRST_BROWSER) elif self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED: pass else: raise RuntimeError("Bad server state at start: %s" % self._state) if on_started is not None: on_started(self) while not self._must_stop.is_set(): if self._state == State.WAITING_FOR_FIRST_BROWSER: yield tornado.gen.convert_yielded( asyncio.wait( [self._must_stop.wait(), self._has_connection.wait()], return_when=asyncio.FIRST_COMPLETED, ) ) elif self._state == State.ONE_OR_MORE_BROWSERS_CONNECTED: self._need_send_data.clear() # Shallow-clone our sessions into a list, so we can iterate # over it and not worry about whether it's being changed # outside this coroutine. session_infos = list(self._session_info_by_id.values()) for session_info in session_infos: msg_list = session_info.session.flush_browser_queue() for msg in msg_list: try: self._send_message(session_info, msg) except tornado.websocket.WebSocketClosedError: self._close_app_session(session_info.session.id) yield yield yield tornado.gen.sleep(0.01) elif self._state == State.NO_BROWSERS_CONNECTED: yield tornado.gen.convert_yielded( asyncio.wait( [self._must_stop.wait(), self._has_connection.wait()], return_when=asyncio.FIRST_COMPLETED, ) ) else: # Break out of the thread loop if we encounter any other state. break yield tornado.gen.convert_yielded( asyncio.wait( [self._must_stop.wait(), self._need_send_data.wait()], return_when=asyncio.FIRST_COMPLETED, ) ) # Shut down all AppSessions for session_info in list(self._session_info_by_id.values()): session_info.session.shutdown() self._set_state(State.STOPPED) except Exception: # Can't just re-raise here because co-routines use Tornado # exceptions for control flow, which appears to swallow the reraised # exception. traceback.print_exc() LOGGER.info( """ Please report this bug at https://github.com/streamlit/streamlit/issues. """ ) finally: self._on_stopped() def _send_message(self, session_info: SessionInfo, msg: ForwardMsg) -> None: """Send a message to a client. If the client is likely to have already cached the message, we may instead send a "reference" message that contains only the hash of the message. Parameters ---------- session_info : SessionInfo The SessionInfo associated with websocket msg : ForwardMsg The message to send to the client """ msg.metadata.cacheable = is_cacheable_msg(msg) msg_to_send = msg if msg.metadata.cacheable: populate_hash_if_needed(msg) if self._message_cache.has_message_reference( msg, session_info.session, session_info.script_run_count ): # This session has probably cached this message. Send # a reference instead. LOGGER.debug("Sending cached message ref (hash=%s)" % msg.hash) msg_to_send = create_reference_msg(msg) # Cache the message so it can be referenced in the future. # If the message is already cached, this will reset its # age. LOGGER.debug("Caching message (hash=%s)" % msg.hash) self._message_cache.add_message( msg, session_info.session, session_info.script_run_count ) # If this was a `script_finished` message, we increment the # script_run_count for this session, and update the cache if ( msg.WhichOneof("type") == "script_finished" and msg.script_finished == ForwardMsg.FINISHED_SUCCESSFULLY ): LOGGER.debug( "Script run finished successfully; " "removing expired entries from MessageCache " "(max_age=%s)", config.get_option("global.maxCachedMessageAge"), ) session_info.script_run_count += 1 self._message_cache.remove_expired_session_entries( session_info.session, session_info.script_run_count ) # Ship it off! session_info.ws.write_message(serialize_forward_msg(msg_to_send), binary=True) def _enqueued_some_message(self) -> None: self._ioloop.add_callback(self._need_send_data.set) def stop(self, from_signal=False) -> None: click.secho(" Stopping...", fg="blue") self._set_state(State.STOPPING) if from_signal: self._ioloop.add_callback_from_signal(self._must_stop.set) else: self._ioloop.add_callback(self._must_stop.set) def _on_stopped(self) -> None: """Called when our runloop is exiting, to shut down the ioloop. This will end our process. (Tests can patch this method out, to prevent the test's ioloop from being shutdown.) """ self._ioloop.stop() def _create_app_session(self, ws: WebSocketHandler) -> AppSession: """Register a connected browser with the server. Parameters ---------- ws : _BrowserWebSocketHandler The newly-connected websocket handler. Returns ------- AppSession The newly-created AppSession for this browser connection. """ session_data = SessionData(self._main_script_path, self._command_line) local_sources_watcher = LocalSourcesWatcher(session_data) is_public_cloud_app = False try: header_content = ws.request.headers["X-Streamlit-User"] payload = base64.b64decode(header_content) user_obj = json.loads(payload) email = user_obj["email"] is_public_cloud_app = user_obj["isPublicCloudApp"] except (KeyError, binascii.Error, json.decoder.JSONDecodeError): email = "[email protected]" user_info: Dict[str, Optional[str]] = dict() if is_public_cloud_app: user_info["email"] = None else: user_info["email"] = email session = AppSession( ioloop=self._ioloop, session_data=session_data, uploaded_file_manager=self._uploaded_file_mgr, message_enqueued_callback=self._enqueued_some_message, local_sources_watcher=local_sources_watcher, user_info=user_info, ) LOGGER.debug( "Created new session for ws %s. Session ID: %s", id(ws), session.id ) assert ( session.id not in self._session_info_by_id ), f"session.id '{session.id}' registered multiple times!" self._session_info_by_id[session.id] = SessionInfo(ws, session) self._set_state(State.ONE_OR_MORE_BROWSERS_CONNECTED) self._has_connection.notify_all() return session def _close_app_session(self, session_id: str) -> None: """Shutdown and remove a AppSession. This function may be called multiple times for the same session, which is not an error. (Subsequent calls just no-op.) Parameters ---------- session_id : str The AppSession's id string. """ if session_id in self._session_info_by_id: session_info = self._session_info_by_id[session_id] del self._session_info_by_id[session_id] session_info.session.shutdown() if len(self._session_info_by_id) == 0: self._set_state(State.NO_BROWSERS_CONNECTED) class _BrowserWebSocketHandler(WebSocketHandler): """Handles a WebSocket connection from the browser""" def initialize(self, server: Server) -> None: self._server = server self._session: Optional[AppSession] = None # The XSRF cookie is normally set when xsrf_form_html is used, but in a pure-Javascript application # that does not use any regular forms we just need to read the self.xsrf_token manually to set the # cookie as a side effect. # See https://www.tornadoweb.org/en/stable/guide/security.html#cross-site-request-forgery-protection # for more details. if config.get_option("server.enableXsrfProtection"): _ = self.xsrf_token def check_origin(self, origin: str) -> bool: """Set up CORS.""" return super().check_origin(origin) or is_url_from_allowed_origins(origin) def open(self, *args, **kwargs) -> Optional[Awaitable[None]]: self._session = self._server._create_app_session(self) return None def on_close(self) -> None: if not self._session: return self._server._close_app_session(self._session.id) self._session = None def get_compression_options(self) -> Optional[Dict[Any, Any]]: """Enable WebSocket compression. Returning an empty dict enables websocket compression. Returning None disables it. (See the docstring in the parent class.) """ if config.get_option("server.enableWebsocketCompression"): return {} return None @tornado.gen.coroutine def on_message(self, payload: bytes) -> None: if not self._session: return msg = BackMsg() try: msg.ParseFromString(payload) msg_type = msg.WhichOneof("type") LOGGER.debug("Received the following back message:\n%s", msg) if msg_type == "rerun_script": self._session.handle_rerun_script_request(msg.rerun_script) elif msg_type == "load_git_info": self._session.handle_git_information_request() elif msg_type == "clear_cache": self._session.handle_clear_cache_request() elif msg_type == "set_run_on_save": self._session.handle_set_run_on_save_request(msg.set_run_on_save) elif msg_type == "stop_script": self._session.handle_stop_script_request() elif msg_type == "close_connection": if config.get_option("global.developmentMode"): Server.get_current().stop() else: LOGGER.warning( "Client tried to close connection when " "not in development mode" ) else: LOGGER.warning('No handler for "%s"', msg_type) except BaseException as e: LOGGER.error(e) self._session.handle_backmsg_exception(e) def _set_tornado_log_levels() -> None: if not config.get_option("global.developmentMode"): # Hide logs unless they're super important. # Example of stuff we don't care about: 404 about .js.map files. logging.getLogger("tornado.access").setLevel(logging.ERROR) logging.getLogger("tornado.application").setLevel(logging.ERROR) logging.getLogger("tornado.general").setLevel(logging.ERROR)
from streamlit.hello import Hello return self._main_script_path == Hello.__file__
transformsgpu.py
''' Code taken from https://github.com/WilhelmT/ClassMix Slightly modified ''' import kornia import torch import random import torch.nn as nn def normalize_rgb(data, dataset): """ Args: data: data to normalize BxCxWxH dataset: name of the dataset to normalize Returns: normalized data as (x-mean)/255 """ if dataset == 'pascal_voc': mean = (122.6789143, 116.66876762, 104.00698793) # rgb elif dataset == 'cityscapes': mean = (73.15835921, 82.90891754, 72.39239876) # rgb else: mean = (127.5, 127.5, 127.5 ) mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda() data_norm = ((data-mean)/255.0) return data_norm def normalize_bgr(data, dataset): """ Args: data: data to normalize BxCxWxH dataset: name of the dataset to normalize Returns: normalized data as (x-mean)/255 """ if dataset == 'pascal_voc': mean = (104.00698793, 116.66876762, 122.6789143) # bgr elif dataset == 'cityscapes': mean = (72.39239876, 82.90891754, 73.15835921) # bgr else: mean = (127.5, 127.5, 127.5 ) mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda() data_norm = ((data-mean)/255.0) return data_norm def grayscale(grayscale, data = None, target = None, probs = None): """ Args: grayscale: boolean whether to apply grayscale augmentation data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH Returns: data is converted from rgb to grayscale if [grayscale] is True target and probs are also returned with no modifications applied """ if not (data is None): if grayscale and data.shape[1]==3: seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )
def colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None): """ Args: colorJitter: boolean whether to apply colorJitter augmentation data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH s: brightness and contrast strength of the color jitter Returns: colorJitter is applied to data if [colorJitter] is True target and probs are also returned with no modifications applied """ if not (data is None): if colorJitter and data.shape[1]==3: seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.)) data = seq(data/255.)*255. # assumes [0,1] return data, target, probs def gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None): """ Args: blur: boolean whether to apply blur data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH min_sigma: minimum sigma value for the gaussian blur max_sigma: maximum sigma value for the gaussian blur Returns: gaussian blur is applied to data if [blur] is True target and probs are also returned with no modifications applied """ if not (data is None): if blur and data.shape[1]==3: seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma))) data = seq(data) return data, target, probs def flip(flip, data = None, target = None, probs = None): """ Args: flip: boolean whether to apply flip augmentation data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH Returns: data, target and probs are flipped if the boolean flip is True """ if flip: if not (data is None): data = torch.flip(data,(3,)) if not (target is None): target = torch.flip(target,(2,)) if not (probs is None): probs = torch.flip(probs,(2,)) return data, target, probs def solarize(solarize, data = None, target = None, probs = None): """ Args: solarize: boolean whether to apply solarize augmentation data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH Returns: data, target, probs, where data is solarized if [solarize] is True """ if not (data is None): if solarize and data.shape[1]==3: seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1))) data = seq(data.cpu()/255.).cuda()*255. return data, target, probs def mix(mask, data = None, target = None, probs = None): """ Applies classMix augmentation: https://openaccess.thecvf.com/content/WACV2021/papers/Olsson_ClassMix_Segmentation-Based_Data_Augmentation_for_Semi-Supervised_Learning_WACV_2021_paper.pdf Args: mask: masks for applying ClassMix. A list of B elements of CxWxH tensors data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH Returns: data, target and probs augmented with classMix """ if not (data is None): if mask.shape[0] == data.shape[0]: data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])]) if not (target is None): target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])]) if not (probs is None): probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])]) return data, target, probs def random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None): """ Args: scale: scale ratio. Float data: input data to augment BxCxWxH target: labels to augment BxWxH probs: probability masks to augment BxCxWxH ignore_label: integeer value that defines the ignore class in the datasets for the labels Returns: data, target and prob, after applied a scaling operation. output resolution is preserve as the same as the input resolution WxH """ if scale != 1: init_size_w = data.shape[2] init_size_h = data.shape[3] # scale data, labels and probs data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True) if target is not None: target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1) if probs is not None: probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1) final_size_w = data.shape[2] final_size_h = data.shape[3] diff_h = init_size_h - final_size_h diff_w = init_size_w - final_size_w if scale < 1: # add padding if needed if diff_h % 2 == 1: pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0) else: pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0) data = pad(data) if probs is not None: probs = pad(probs) # padding with ignore label to add to labels if diff_h % 2 == 1: pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label) else: pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label) if target is not None: target = pad(target) else: # crop if needed w = random.randint(0, data.shape[2] - init_size_w) h = random.randint(0, data.shape[3] - init_size_h) data = data [:,:,h:h+init_size_h,w:w + init_size_w] if probs is not None: probs = probs [:,h:h+init_size_h,w:w + init_size_w] if target is not None: target = target [:,h:h+init_size_h,w:w + init_size_w] return data, target, probs
data = seq(data) return data, target, probs
scoreboard.rs
use stdweb::traits::*; use stdweb::web::{document, Element}; pub struct Scoreboard { pub scoreboard: Element, pub best: Element, }
.query_selector(attr_id_scoreboard) .unwrap() .unwrap(); let best: Element = document().query_selector(attr_id_best).unwrap().unwrap(); Scoreboard { scoreboard, best } } }
impl Scoreboard { pub fn new(attr_id_scoreboard: &str, attr_id_best: &str) -> Self { let scoreboard: Element = document()
email.js
module.exports = { host: 'smtp.mailtrap.io',
port: 2525, user: '4599c20de5b18b', pass: '9c0864f2e154ab', };
BlogAuthor.js
import React from 'react'; /** * React component implementation. * * @author dfilipovic * @namespace ReactApp * @class BlogAuthor * @extends ReactApp */ export class BlogAuthor extends React.Component { //------------------------------------------------------------------------------------------------------------------ // React methods //------------------------------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------------------------------ // Render methods //------------------------------------------------------------------------------------------------------------------ /** * Renders the component * * @method render * @returns {XML} * @public */ render () { return ( <div className="blog-post-author mb50 pt30 bt-solid-1"> <img src={this.props.author.avatar} className="img-circle" alt="image" /> <span className="blog-post-author-name"> {this.props.author.name} </span> <a href={this.props.author.url}><i className="fa fa-twitter"></i></a> <p> {this.props.author.description} </p> </div> );
BlogAuthor.propTypes = { author: React.PropTypes.object }; BlogAuthor.defaultProps = { author: { avatar: '', name: '', url: '', description: '' } }; export default BlogAuthor;
} }
LwFnet.py
import torch.nn as nn import torchvision import copy import torch import numpy as np from .bnneck import BNClassifier, Classifier, Classifier_without_bias from torch.autograd import Variable def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') nn.init.constant_(m.bias, 0.0) elif classname.find('Conv') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') if m.bias is not None: nn.init.constant_(m.bias, 0.0) elif classname.find('BatchNorm') != -1: if m.affine: nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) def weights_init_classifier(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.normal_(m.weight, std=0.001) if m.bias: nn.init.constant_(m.bias, 0.0) class GlobalPoolFlat(nn.Module): def __init__(self, pool_mode='avg'): super(GlobalPoolFlat, self).__init__() if pool_mode == 'avg': self.pool = nn.AdaptiveAvgPool2d(1) else: self.pool = nn.AdaptiveMaxPool2d(1) def forward(self, x): x = self.pool(x) if len(x.size()) == 4: n, c = x.size(0), x.size(1) else: assert len(x.size()) == 4 flatted = x.view(n, -1) assert flatted.size(1) == c return flatted class LwFNet(nn.Module): def __init__(self, class_num_list, pretrained=True): super(LwFNet, self).__init__() self.class_num_list = class_num_list # backbone and optimize its architecture resnet = torchvision.models.resnet50(pretrained=pretrained) resnet.layer4[0].conv2.stride = (1, 1) resnet.layer4[0].downsample[0].stride = (1, 1) self.backbone = nn.Sequential( copy.deepcopy(resnet.conv1), copy.deepcopy(resnet.bn1), # copy.deepcopy(resnet.relu), # no relu copy.deepcopy(resnet.maxpool), copy.deepcopy(resnet.layer1), copy.deepcopy(resnet.layer2), copy.deepcopy(resnet.layer3[0])) # conv4_1 # cnn backbone res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_conv5 = resnet.layer4 self.feature_dim = resnet.fc.in_features self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5), GlobalPoolFlat(pool_mode='avg'), ) del resnet # classifier self.classifier_dict = nn.ModuleDict() for step, num in enumerate(self.class_num_list): self.classifier_dict[f'step:{step}'] = BNClassifier(self.feature_dim, num) def forward(self, x, current_step=0): if isinstance(current_step, list): feature_maps = self.backbone(x) cls_score_list = [] features = self.encoder_feature(feature_maps) for c_s in current_step: bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features) cls_score_list.append(cls_score) if self.training: # cls_score = torch.cat(cls_score_list, dim=1) return features, cls_score_list, feature_maps else: return bned_features, feature_maps else: feature_maps = self.backbone(x) features = self.encoder_feature(feature_maps) bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features) if self.training: return features, cls_score, feature_maps else: return bned_features, feature_maps def classify_latent_codes(self, latent_codes, current_step): if isinstance(current_step, list): cls_score_list = [] for c_s in current_step: bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](latent_codes) cls_score_list.append(cls_score) if self.training: # cls_score = torch.cat(cls_score_list, dim=1) return None, cls_score_list, None else: return bned_features, None else: bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes) if self.training: return None, cls_score, None else: return bned_features, None class LwFNet_without_bn(nn.Module): def
(self, class_num_list, pretrained=True): super(LwFNet_without_bn, self).__init__() self.class_num_list = class_num_list # backbone and optimize its architecture resnet = torchvision.models.resnet50(pretrained=pretrained) resnet.layer4[0].conv2.stride = (1, 1) resnet.layer4[0].downsample[0].stride = (1, 1) self.backbone = nn.Sequential( copy.deepcopy(resnet.conv1), copy.deepcopy(resnet.bn1), # copy.deepcopy(resnet.relu), # no relu copy.deepcopy(resnet.maxpool), copy.deepcopy(resnet.layer1), copy.deepcopy(resnet.layer2), copy.deepcopy(resnet.layer3[0])) # conv4_1 # cnn backbone res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_conv5 = resnet.layer4 feature_dim = resnet.fc.in_features self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5), GlobalPoolFlat(pool_mode='avg'), ) del resnet # classifier self.classifier_dict = nn.ModuleDict() for step, num in enumerate(self.class_num_list): self.classifier_dict[f'step:{step}'] = Classifier(feature_dim, num) def forward(self, x, current_step): if isinstance(current_step, list): feature_maps = self.backbone(x) cls_score_list = [] features = self.encoder_feature(feature_maps) for c_s in current_step: bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features) cls_score_list.append(cls_score) if self.training: # cls_score = torch.cat(cls_score_list, dim=1) return features, cls_score_list, feature_maps else: return bned_features, feature_maps else: feature_maps = self.backbone(x) features = self.encoder_feature(feature_maps) bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features) if self.training: return features, cls_score, feature_maps else: return bned_features, feature_maps def classify_featuremaps(self, featuremaps): features = self.encoder_feature(featuremaps) bned_features, cls_score = self.classifier(features) return cls_score def classify_latent_codes(self, latent_codes, current_step): bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes) return cls_score class LwFNet_without_bn_bias(nn.Module): def __init__(self, class_num_list, pretrained=True): super(LwFNet_without_bn_bias, self).__init__() self.class_num_list = class_num_list # backbone and optimize its architecture resnet = torchvision.models.resnet50(pretrained=pretrained) resnet.layer4[0].conv2.stride = (1, 1) resnet.layer4[0].downsample[0].stride = (1, 1) self.backbone = nn.Sequential( copy.deepcopy(resnet.conv1), copy.deepcopy(resnet.bn1), # copy.deepcopy(resnet.relu), # no relu copy.deepcopy(resnet.maxpool), copy.deepcopy(resnet.layer1), copy.deepcopy(resnet.layer2), copy.deepcopy(resnet.layer3[0])) # conv4_1 # cnn backbone res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_conv5 = resnet.layer4 feature_dim = resnet.fc.in_features self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5), GlobalPoolFlat(pool_mode='avg'), ) del resnet # classifier self.classifier_dict = nn.ModuleDict() for step, num in enumerate(self.class_num_list): self.classifier_dict[f'step:{step}'] = Classifier_without_bias(feature_dim, num) def forward(self, x, current_step): if isinstance(current_step, list): feature_maps = self.backbone(x) cls_score_list = [] features = self.encoder_feature(feature_maps) for c_s in current_step: bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features) cls_score_list.append(cls_score) if self.training: # cls_score = torch.cat(cls_score_list, dim=1) return features, cls_score_list, feature_maps else: return bned_features, feature_maps else: feature_maps = self.backbone(x) features = self.encoder_feature(feature_maps) bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features) if self.training: return features, cls_score, feature_maps else: return bned_features, feature_maps def classify_featuremaps(self, featuremaps): features = self.encoder_feature(featuremaps) bned_features, cls_score = self.classifier(features) return cls_score def classify_latent_codes(self, latent_codes, current_step): bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes) return cls_score
__init__
compiler.rs
use crate::errors::*; use crate::module_resolver::{ ModuleResolver }; pub struct
{ pub module_id: String, pub file_name: String, pub source_code: String, } pub struct Compiler { pub module_resolvers: Vec<Box<ModuleResolver>>, } impl Compiler { #[allow(dead_code)] pub fn new(module_resolvers: Vec<Box<ModuleResolver>>) -> Self { Self { module_resolvers } } pub fn fetch_module( &self, module_specifier: &str, containing_file: &str, ) -> FlyResult<ModuleInfo> { info!( "fetch_module {} from {}", &module_specifier, &containing_file ); for resolver in &self.module_resolvers { match resolver.resolve_module(module_specifier, containing_file) { Ok(m) => return Ok(m), Err(_err) => info!("resolver failed moving on"), }; } Err(FlyError::from(format!( "Could not resolve {} from {}", module_specifier, containing_file ))) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_resolve() { // TODO: these module ids should be normalized into a URL: // https://html.spec.whatwg.org/multipage/webappapis.html#resolve-a-module-specifier let cases = [ ( "./tests/hello.ts", ".", "././tests/hello.ts", "<cwd>/tests/hello.ts", ), ( "./hello.ts", "./tests/main.ts", "./tests/./hello.ts", "<cwd>/tests/hello.ts", ), ( "../hello.ts", "./tests/subdir/index.ts", "./tests/subdir/../hello.ts", "<cwd>/tests/hello.ts", ), ( "<cwd>/tests/hello.ts", ".", "<cwd>/tests/hello.ts", "<cwd>/tests/hello.ts", ), ]; let current_dir = std::env::current_dir().expect("current_dir failed"); let local_disk_resolver = LocalDiskModuleResolver::new(None); let resolvers = vec![Box::new(local_disk_resolver) as Box<ModuleResolver>]; let compiler = Compiler::new(resolvers); for &test in cases.iter() { let specifier = String::from(test.0).replace("<cwd>", current_dir.to_str().unwrap()); let containing_file = String::from(test.1).replace("<cwd>", current_dir.to_str().unwrap()); ; let module_info = compiler .fetch_module(&specifier, &containing_file) .unwrap(); assert_eq!( String::from(test.2).replace("<cwd>", current_dir.to_str().unwrap()), module_info.module_id, ); assert_eq!( String::from(test.3).replace("<cwd>", current_dir.to_str().unwrap()), module_info.file_name, ); } } }
ModuleInfo
kissy-xtemplate.js
#!/usr/bin/env node //noinspection JSUnresolvedFunction,JSUnresolvedVariable /** * Generate xtemplate function by xtemplate file using kissy xtemplate. * @author [email protected] */ var program = require('./lib/commander'); program .option('-p, --package-path <packagePath>', 'Set kissy package path') .option('-e, --encoding [encoding]', 'Set xtemplate file encoding', 'utf-8') .option('-w, --watch', 'Watch xtemplate file change') .parse(process.argv); var S = require('../build/kissy-nodejs'), chokidar = require('chokidar'), /*jshint camelcase: false*/ jsBeautify = require('js-beautify').js_beautify, fs = require('fs'), path = require('path'), packagePath = program.packagePath, encoding = program.encoding, cwd = process.cwd(); packagePath = path.resolve(cwd, packagePath); var tplTemplate = '' + '/*\n' + ' Generated by kissy-tpl2mod.' + '*/\n' + 'KISSY.add(\'{code}\');'; function
(str) { return str.replace(/\\/g, '/'); } function myJsBeautify(str) { var opts = { 'indent_size': '4', 'indent_char': ' ', 'preserve_newlines': true, 'brace_style': 'collapse', 'keep_array_indentation': false, 'space_after_anon_function': true }; return jsBeautify(str, opts); } S.use('xtemplate/compiler', function (S, XTemplateCompiler) { function compile(tpl, modulePath) { var tplContent = fs.readFileSync(tpl, encoding); var moduleCode = myJsBeautify( '/** Compiled By kissy-xtemplate */\n' + 'KISSY.add(function(S,require,exports,module){\n' + '/*jshint quotmark:false, loopfunc:true, indent:false, asi:true, unused:false, boss:true*/\n' + 'return ' + XTemplateCompiler.compileToStr(tplContent)) + ';\n' + '});'; fs.writeFileSync(modulePath, moduleCode, encoding); console.info('generate xtpl module: ' + modulePath + ' at ' + (new Date().toLocaleString())); } function process(filePath) { var modulePath; if (S.endsWith(filePath, '.xtpl.html')||S.endsWith(filePath, '-xtpl.html')) { modulePath = filePath.replace(/[.-]xtpl\.html$/, '-xtpl.js'); compile(filePath, modulePath); } else if (S.endsWith(filePath, '.tpl.html')) { modulePath = filePath.replace(/\.tpl\.html$/, '-tpl.js'); var tplContent = fs.readFileSync(filePath, encoding); tplContent = tplContent.replace(/\\/g, '\\') .replace(/\r?\n/g, '\\n') .replace(/'/g, '\\\''); var moduleCode = myJsBeautify(S.substitute(tplTemplate, { code: tplContent })); fs.writeFileSync(modulePath, moduleCode, encoding); console.info('generate tpl module: ' + modulePath + ' at ' + (new Date().toLocaleString())); } } if (program.watch) { var watcher = chokidar.watch(packagePath, {ignored: /^\./, persistent: true}); watcher.on('add', process).on('change', process); } else { var walk = require('walk'); //noinspection JSUnresolvedFunction var walker = walk.walk(packagePath); walker.on('file', function (root, fileStats, next) { var filePath = normalizeSlash(root + '/' + fileStats.name); process(filePath); next(); }); } });
normalizeSlash
clock.rs
/* * Copyright (c) 2017 Boucher, Antoni <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ use chrono::Local; use gtk::{ ContainerExt, Inhibit, Label, LabelExt, WidgetExt, Window, WindowType, }; use relm_derive::Msg; use relm::{connect,Relm, Update, Widget, WidgetTest, interval}; use self::Msg::*; #[derive(Msg)] enum Msg { Quit, Tick, } #[derive(Clone)] struct Win { label: Label, window: Window, } impl Update for Win { type Model = (); type ModelParam = (); type Msg = Msg; fn model(_: &Relm<Self>, _: ()) -> () { () } fn subscriptions(&mut self, relm: &Relm<Self>) { interval(relm.stream(), 1000, || Tick); } fn update(&mut self, event: Msg)
} impl Widget for Win { type Root = Window; fn root(&self) -> Self::Root { self.window.clone() } fn view(relm: &Relm<Self>, _model: Self::Model) -> Self { let label = Label::new(None); let window = Window::new(WindowType::Toplevel); window.add(&label); window.show_all(); connect!(relm, window, connect_delete_event(_, _), return (Some(Quit), Inhibit(false))); let mut win = Win { label: label, window: window, }; win.update(Tick); win } } impl WidgetTest for Win { type Widgets = Win; fn get_widgets(&self) -> Self::Widgets { self.clone() } } fn main() { Win::run(()).expect("Win::run failed"); } #[cfg(test)] mod tests { use chrono::{Local, NaiveTime}; use gtk::LabelExt; use gtk_test::wait; use crate::Win; #[test] fn label_change() { let (_component, widgets) = relm::init_test::<Win>(()).expect("init_test failed"); let label = &widgets.label; fn time_close(time1: glib::GString, time2: String) -> bool { println!("{}", time1); println!("{}", time2); let date1 = NaiveTime::parse_from_str(&time1, "%H:%M:%S").expect("parse time1"); let date2 = NaiveTime::parse_from_str(&time2, "%H:%M:%S").expect("parse time2"); (date1.signed_duration_since(date2)).num_seconds() <= 1 } let time = Local::now(); assert!(time_close(label.get_text().expect("text"), time.format("%H:%M:%S").to_string())); wait(2000); let time2 = Local::now(); assert_ne!(time, time2); assert!(time_close(label.get_text().expect("text"), time2.format("%H:%M:%S").to_string())); } }
{ match event { Tick => { let time = Local::now(); self.label.set_text(&format!("{}", time.format("%H:%M:%S"))); }, Quit => gtk::main_quit(), } }
runtime-main.5e15a8d8.js
!function(e){function r(r){for(var n,u,p=r[0],f=r[1],i=r[2],c=0,s=[];c<p.length;c++)u=p[c],Object.prototype.hasOwnProperty.call(o,u)&&o[u]&&s.push(o[u][0]),o[u]=0;for(n in f)Object.prototype.hasOwnProperty.call(f,n)&&(e[n]=f[n]);for(a&&a(r);s.length;)s.shift()();return l.push.apply(l,i||[]),t()}function t(){for(var e,r=0;r<l.length;r++){for(var t=l[r],n=!0,p=1;p<t.length;p++){var f=t[p];0!==o[f]&&(n=!1)}n&&(l.splice(r--,1),e=u(u.s=t[0]))}return e}var n={},o={1:0},l=[];function u(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,u),t.l=!0,t.exports}u.m=e,u.c=n,u.d=function(e,r,t){u.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},u.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},u.t=function(e,r){if(1&r&&(e=u(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(u.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)u.d(t,n,function(r){return e[r]}.bind(null,n));return t},u.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return u.d(r,"a",r),r},u.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},u.p="/sleepyowl/";var p=this.webpackJsonpsleepyowl=this.webpackJsonpsleepyowl||[],f=p.push.bind(p);p.push=r,p=p.slice();for(var i=0;i<p.length;i++)r(p[i]);var a=f;t()}([]); //# sourceMappingURL=runtime-main.5e15a8d8.js.map
main.go
package main import ( "flag" "fmt" "log" "net/http" "net/url" "strings" "github.com/JoseFeng/nsq_exporter/collector" "github.com/prometheus/client_golang/prometheus" ) // Version of nsq_exporter. Set at build time. const Version = "0.0.0.dev" var ( listenAddress = flag.String("web.listen", ":9117", "Address on which to expose metrics and web interface.") metricsPath = flag.String("web.path", "/metrics", "Path under which to expose metrics.") nsqdURL = flag.String("nsqd.addr", "http://localhost:4151/stats", "Address of the nsqd node.") enabledCollectors = flag.String("collect", "stats.topics,stats.channels", "Comma-separated list of collectors to use.") namespace = flag.String("namespace", "nsq", "Namespace for the NSQ metrics.") tlsCACert = flag.String("tls.ca_cert", "", "CA certificate file to be used for nsqd connections.") tlsCert = flag.String("tls.cert", "", "TLS certificate file to be used for client connections to nsqd.") tlsKey = flag.String("tls.key", "", "TLS key file to be used for TLS client connections to nsqd.") statsRegistry = map[string]func(namespace string) collector.StatsCollector{ "topics": collector.TopicStats, "channels": collector.ChannelStats, "clients": collector.ClientStats, } ) func main() { flag.Parse() ex, err := createNsqExecutor() if err != nil { log.Fatalf("error creating nsq executor: %v", err) } prometheus.MustRegister(ex) http.Handle(*metricsPath, prometheus.Handler()) if *metricsPath != "" && *metricsPath != "/" { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(`<html> <head><title>NSQ Exporter</title></head> <body> <h1>NSQ Exporter</h1> <p><a href="` + *metricsPath + `">Metrics</a></p> </body> </html>`)) }) } log.Print("listening to ", *listenAddress) err = http.ListenAndServe(*listenAddress, nil) if err != nil { log.Fatal(err) } } func createNsqExecutor() (*collector.NsqExecutor, error) { nsqdURL, err := normalizeURL(*nsqdURL) if err != nil
ex, err := collector.NewNsqExecutor(*namespace, nsqdURL, *tlsCACert, *tlsCert, *tlsKey) if err != nil { log.Fatal(err) } for _, param := range strings.Split(*enabledCollectors, ",") { param = strings.TrimSpace(param) parts := strings.SplitN(param, ".", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid collector name: %s", param) } if parts[0] != "stats" { return nil, fmt.Errorf("invalid collector prefix: %s", parts[0]) } name := parts[1] c, has := statsRegistry[name] if !has { return nil, fmt.Errorf("unknown stats collector: %s", name) } ex.Use(c(*namespace)) } return ex, nil } func normalizeURL(ustr string) (string, error) { ustr = strings.ToLower(ustr) if !strings.HasPrefix(ustr, "https://") && !strings.HasPrefix(ustr, "http://") { ustr = "http://" + ustr } u, err := url.Parse(ustr) if err != nil { return "", err } if u.Path == "" { u.Path = "/stats" } u.RawQuery = "format=json" return u.String(), nil }
{ return nil, err }
p_Alloy improves the effective.rs
<?xml version="1.0" encoding="UTF-8"?> <WebElementEntity> <description></description> <name>p_Alloy improves the effective</name> <tag></tag> <elementGuidId>baa9945a-a66a-4c46-b662-7495601f9519</elementGuidId> <selectorMethod>BASIC</selectorMethod> <useRalativeImagePath>false</useRalativeImagePath> <webElementProperties>
<type>Main</type> <value>p</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>class</name> <type>Main</type> <value>introduction</value> </webElementProperties> <webElementProperties> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>text</name> <type>Main</type> <value>Alloy improves the effectiveness of project teams by putting the proper tools in your hands. Communication is made easy and inexpensive, no matter where team members are located. </value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath</name> <type>Main</type> <value>/html[1]/body[1]/div[@class=&quot;container&quot;]/div[@class=&quot;row&quot;]/div[@class=&quot;span8&quot;]/p[@class=&quot;introduction&quot;]</value> </webElementProperties> </WebElementEntity>
<isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>tag</name>
with_parse.rs
#![feature(bench_black_box)] #![feature(test)] extern crate test; use std::hint::black_box; use swc_common::FileName; use swc_ecma_codegen::{self, Emitter}; use swc_ecma_parser::{Parser, StringInput, Syntax}; use test::Bencher; const COLORS_JS: &str = r#" 'use strict'; /** * Extract red color out of a color integer: * * 0x00DEAD -> 0x00 * * @param {Number} color * @return {Number} */ function red( color ) { let foo = 3.14; return color >> 16; } /** * Extract green out of a color integer: * * 0x00DEAD -> 0xDE * * @param {Number} color * @return {Number} */ function green( color ) { return ( color >> 8 ) & 0xFF; } /** * Extract blue color out of a color integer: * * 0x00DEAD -> 0xAD * * @param {Number} color * @return {Number} */ function blue( color ) { return color & 0xFF; } /** * Converts an integer containing a color such as 0x00DEAD to a hex * string, such as '#00DEAD'; * * @param {Number} int * @return {String} */ function intToHex( int ) { const mask = '#000000'; const hex = int.toString( 16 ); return mask.substring( 0, 7 - hex.length ) + hex; } /** * Converts a hex string containing a color such as '#00DEAD' to * an integer, such as 0x00DEAD; * * @param {Number} num * @return {String} */ function hexToInt( hex ) { return parseInt( hex.substring( 1 ), 16 ); } module.exports = { red, green, blue, intToHex, hexToInt, }; "#; const LARGE_PARTIAL_JS: &str = include_str!("large-partial.js"); fn
(b: &mut Bencher, s: &str) { b.bytes = s.len() as _; let _ = ::testing::run_test(true, |cm, handler| { b.iter(|| { let fm = cm.new_source_file(FileName::Anon, s.into()); let mut parser = Parser::new(Syntax::default(), StringInput::from(&*fm), None); let mut src_map_buf = vec![]; let module = parser .parse_module() .map_err(|e| e.into_diagnostic(handler).emit()) .unwrap(); for err in parser.take_errors() { err.into_diagnostic(handler).emit(); } let mut buf = vec![]; { let mut emitter = Emitter { cfg: swc_ecma_codegen::Config { ..Default::default() }, comments: None, cm: cm.clone(), wr: Box::new(swc_ecma_codegen::text_writer::JsWriter::new( cm.clone(), "\n", &mut buf, Some(&mut src_map_buf), )), }; let _ = emitter.emit_module(&module); } black_box(buf); let srcmap = cm.build_source_map(&mut src_map_buf); black_box(srcmap); }); Ok(()) }); } #[bench] fn colors(b: &mut Bencher) { bench_emitter(b, COLORS_JS) } #[bench] fn large_partial(b: &mut Bencher) { bench_emitter(b, LARGE_PARTIAL_JS) }
bench_emitter
tls.rs
use native_tls::{Certificate, Identity, TlsConnectorBuilder}; use openssl::{ pkcs12::Pkcs12, pkey::{PKey, Private}, x509::X509, }; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; use std::fmt; use std::fs::File; use std::io::Read; use std::path::{Path, PathBuf}; #[derive(Debug, Snafu)] enum TlsError { #[snafu(display("Could not open {} file {:?}: {}", note, filename, source))] FileOpenFailed { note: &'static str, filename: PathBuf, source: std::io::Error, }, #[snafu(display("Could not read {} file {:?}: {}", note, filename, source))] FileReadFailed { note: &'static str, filename: PathBuf, source: std::io::Error, }, #[snafu(display("Could not set TCP TLS identity: {}", source))] TlsIdentityError { source: native_tls::Error }, #[snafu(display("Could not export identity to DER: {}", source))] DerExportError { source: openssl::error::ErrorStack }, #[snafu(display("Could not parse certificate in {:?}: {}", filename, source))] CertificateParseError { filename: PathBuf, source: native_tls::Error, }, #[snafu(display("Must specify both TLS key_file and crt_file"))] MissingCrtKeyFile, #[snafu(display("Could not parse X509 certificate in {:?}: {}", filename, source))] X509ParseError { filename: PathBuf, source: openssl::error::ErrorStack, }, #[snafu(display("Could not parse private key in {:?}: {}", filename, source))] PrivateKeyParseError { filename: PathBuf, source: openssl::error::ErrorStack, }, #[snafu(display("Could not build PKCS#12 archive for identity: {}", source))] Pkcs12Error { source: openssl::error::ErrorStack }, #[snafu(display("Could not parse identity in {:?}: {}", filename, source))] IdentityParseError { filename: PathBuf, source: native_tls::Error, }, } /// Standard TLS connector options #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct TlsOptions { pub verify_certificate: Option<bool>, pub verify_hostname: Option<bool>, pub ca_path: Option<PathBuf>, pub crt_path: Option<PathBuf>, pub key_path: Option<PathBuf>, pub key_pass: Option<String>, } /// Directly usable settings for TLS connectors #[derive(Clone, Default)] pub struct TlsSettings { accept_invalid_certificates: bool, accept_invalid_hostnames: bool, authority: Option<Certificate>, identity: Option<IdentityStore>, // native_tls::Identity doesn't implement Clone yet } #[derive(Clone)] pub struct IdentityStore(Vec<u8>, String); impl TlsSettings { pub fn from_options(options: &Option<TlsOptions>) -> crate::Result<Self> { let default = TlsOptions::default(); let options = options.as_ref().unwrap_or(&default); if options.verify_certificate == Some(false) { warn!("`verify_certificate` is DISABLED, this may lead to security vulnerabilities"); } if options.verify_hostname == Some(false) { warn!("`verify_hostname` is DISABLED, this may lead to security vulnerabilities"); } if options.key_path.is_some() && options.crt_path.is_none() { return Err(TlsError::MissingCrtKeyFile.into()); } let authority = match options.ca_path { None => None, Some(ref path) => Some(load_certificate(path)?), }; let identity = match options.crt_path { None => None, Some(ref crt_path) => { let name = crt_path.to_string_lossy().to_string(); let cert_data = open_read(crt_path, "certificate")?; let key_pass: &str = options.key_pass.as_ref().map(|s| s.as_str()).unwrap_or(""); match Identity::from_pkcs12(&cert_data, key_pass) { Ok(_) => Some(IdentityStore(cert_data, key_pass.to_string())), Err(err) => { if options.key_path.is_some() { return Err(err.into()); } let crt = load_x509(crt_path)?; let key_path = options.key_path.as_ref().unwrap(); let key = load_key(&key_path, &options.key_pass)?; let pkcs12 = Pkcs12::builder() .build("", &name, &key, &crt) .context(Pkcs12Error)?; let identity = pkcs12.to_der().context(DerExportError)?; // Build the resulting Identity, but don't store it, as // it cannot be cloned. This is just for error // checking. let _identity = Identity::from_pkcs12(&identity, "").context(TlsIdentityError)?; Some(IdentityStore(identity, "".into())) } } } }; Ok(Self { accept_invalid_certificates: !options.verify_certificate.unwrap_or(true), accept_invalid_hostnames: !options.verify_hostname.unwrap_or(true), authority, identity, }) } } impl fmt::Debug for TlsSettings { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
} pub trait TlsConnectorExt { fn use_tls_settings(&mut self, settings: TlsSettings) -> &mut Self; } impl TlsConnectorExt for TlsConnectorBuilder { fn use_tls_settings(&mut self, settings: TlsSettings) -> &mut Self { self.danger_accept_invalid_certs(settings.accept_invalid_certificates); self.danger_accept_invalid_hostnames(settings.accept_invalid_hostnames); if let Some(certificate) = settings.authority { self.add_root_certificate(certificate); } if let Some(identity) = settings.identity { // This data was test-built previously, so we can just use // it here and expect the results will not fail. This can // all be reworked when `native_tls::Identity` gains the // Clone impl. let identity = Identity::from_pkcs12(&identity.0, &identity.1).expect("Could not build identity"); self.identity(identity); } self } } /// Load a `native_tls::Certificate` (X.509) from a named file fn load_certificate(filename: &Path) -> crate::Result<Certificate> { let data = open_read(filename, "certificate")?; Ok(Certificate::from_der(&data) .or_else(|_| Certificate::from_pem(&data)) .with_context(|| CertificateParseError { filename })?) } /// Load a private key from a named file fn load_key(filename: &Path, pass_phrase: &Option<String>) -> crate::Result<PKey<Private>> { let data = open_read(filename, "key")?; match pass_phrase { None => Ok(PKey::private_key_from_der(&data) .or_else(|_| PKey::private_key_from_pem(&data)) .with_context(|| PrivateKeyParseError { filename })?), Some(phrase) => Ok( PKey::private_key_from_pkcs8_passphrase(&data, phrase.as_bytes()) .or_else(|_| PKey::private_key_from_pem_passphrase(&data, phrase.as_bytes())) .with_context(|| PrivateKeyParseError { filename })?, ), } } /// Load an X.509 certificate from a named file fn load_x509(filename: &Path) -> crate::Result<X509> { let data = open_read(filename, "certificate")?; Ok(X509::from_der(&data) .or_else(|_| X509::from_pem(&data)) .with_context(|| X509ParseError { filename })?) } fn open_read(filename: &Path, note: &'static str) -> crate::Result<Vec<u8>> { let mut text = Vec::<u8>::new(); File::open(filename) .with_context(|| FileOpenFailed { note, filename })? .read_to_end(&mut text) .with_context(|| FileReadFailed { note, filename })?; Ok(text) }
{ f.debug_struct("TlsSettings") .field( "accept_invalid_certificates", &self.accept_invalid_certificates, ) .field("accept_invalid_hostnames", &self.accept_invalid_hostnames) .finish() }
mod.rs
#![cfg(feature = "full")] use { crate::{ pubkey::Pubkey, signature::{PresignerError, Signature}, transaction::TransactionError, }, itertools::Itertools, thiserror::Error, }; pub mod keypair; pub mod null_signer; pub mod presigner; pub mod signers; #[derive(Debug, Error, PartialEq)] pub enum SignerError { #[error("keypair-pubkey mismatch")] KeypairPubkeyMismatch, #[error("not enough signers")] NotEnoughSigners, #[error("transaction error")] TransactionError(#[from] TransactionError), #[error("custom error: {0}")] Custom(String), // Presigner-specific Errors #[error("presigner error")] PresignerError(#[from] PresignerError), // Remote Keypair-specific Errors #[error("connection error: {0}")] Connection(String), #[error("invalid input: {0}")] InvalidInput(String), #[error("no device found")] NoDeviceFound, #[error("{0}")] Protocol(String), #[error("{0}")] UserCancel(String), #[error("too many signers")] TooManySigners, } /// The `Signer` trait declares operations that all digital signature providers /// must support. It is the primary interface by which signers are specified in /// `Transaction` signing interfaces pub trait Signer { /// Infallibly gets the implementor's public key. Returns the all-zeros /// `Pubkey` if the implementor has none. fn pubkey(&self) -> Pubkey { self.try_pubkey().unwrap_or_default() } /// Fallibly gets the implementor's public key fn try_pubkey(&self) -> Result<Pubkey, SignerError>; /// Infallibly produces an Ed25519 signature over the provided `message` /// bytes. Returns the all-zeros `Signature` if signing is not possible. fn sign_message(&self, message: &[u8]) -> Signature { self.try_sign_message(message).unwrap_or_default() } /// Fallibly produces an Ed25519 signature over the provided `message` bytes. fn try_sign_message(&self, message: &[u8]) -> Result<Signature, SignerError>; /// Whether the impelmentation requires user interaction to sign fn is_interactive(&self) -> bool; } impl<T> From<T> for Box<dyn Signer> where T: Signer + 'static, { fn from(signer: T) -> Self { Box::new(signer) } } impl PartialEq for dyn Signer { fn eq(&self, other: &dyn Signer) -> bool { self.pubkey() == other.pubkey() } } impl std::fmt::Debug for dyn Signer { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "Signer: {:?}", self.pubkey()) } } /// Removes duplicate signers while preserving order. O(n²) pub fn u
signers: Vec<&dyn Signer>) -> Vec<&dyn Signer> { signers.into_iter().unique_by(|s| s.pubkey()).collect() } #[cfg(test)] mod tests { use {super::*, crate::signer::keypair::Keypair}; fn pubkeys(signers: &[&dyn Signer]) -> Vec<Pubkey> { signers.iter().map(|x| x.pubkey()).collect() } #[test] fn test_unique_signers() { let alice = Keypair::new(); let bob = Keypair::new(); assert_eq!( pubkeys(&unique_signers(vec![&alice, &bob, &alice])), pubkeys(&[&alice, &bob]) ); } }
nique_signers(
timezone.go
package components import ( "fmt" "net/url" "time" "github.com/pauldemarco/caldav-go/icalendar/values" ) type TimeZone struct { // defines the persistent, globally unique identifier for the calendar component. Id string `ical:"tzid,required"` // the location name, as defined by the standards body ExtLocationName string `ical:"x-lic-location,omitempty"` // defines a Uniform Resource Locator (URL) associated with the iCalendar object. Url *values.Url `ical:"tzurl,omitempty"` // specifies the date and time that the information associated with the calendar component was last revised in the // calendar store. // Note: This is analogous to the modification date and time for a file in the file system. LastModified *values.DateTime `ical:"last-modified,omitempty"` Daylight []*Daylight `ical:",omitempty"` Standard []*Standard `ical:",omitempty"` } type Daylight struct { DateStart *values.DateTime `ical:"dtstart,omitempty"` RDates *values.RecurrenceDateTimes `ical:",omitempty"` TzName string `ical:"tzname,omitempty"` TzOffsetFrom string `ical:tzoffsetfrom,omitempty` TzOffsetTo string `ical:tzoffsetto,omitempty` } func (*Daylight) EncodeICalTag() (string, error) { return "DAYLIGHT", nil } type Standard Daylight func (*Standard) EncodeICalTag() (string, error) { return "STANDARD", nil } func NewDynamicTimeZone(location *time.Location) *TimeZone
{ t := new(TimeZone) t.Id = location.String() t.ExtLocationName = location.String() t.Url = values.NewUrl(url.URL{ Scheme: "http", Host: "tzurl.org", Path: fmt.Sprintf("/zoneinfo/%s", t.Id), }) return t }
chats.py
""" Chatbox API """ import os from bottle import get, local, post, request import yaml from codalab.objects.chat_box_qa import ChatBoxQA from codalab.server.authenticated_plugin import AuthenticatedPlugin @get('/chats', apply=AuthenticatedPlugin()) def get_chat_box(): """ Return a list of chats that the current user has had """ query = {'user_id': request.user.user_id} return { 'chats': local.model.get_chat_log_info(query), 'root_user_id': local.model.root_user_id, 'system_user_id': local.model.system_user_id, } @post('/chats', apply=AuthenticatedPlugin()) def post_chat_box(): """ Add the chat to the log. Return an auto response, if the chat is directed to the system. Otherwise, return an updated chat list of the sender. """ recipient_user_id = request.POST.get('recipientUserId', None) message = request.POST.get('message', None) worksheet_uuid = request.POST.get('worksheetId', -1) bundle_uuid = request.POST.get('bundleId', -1) info = { 'sender_user_id': request.user.user_id, 'recipient_user_id': recipient_user_id, 'message': message, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } chats = add_chat_log_info(info) return {'chats': chats} # @get('/faqs') def get_faq():
'answer': { 'response': 'You can do cl upload or click Update Bundle.', 'command': 'cl upload <file_path>' } } Currently disabled. Needs further work. """ file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../objects/chat_box_qa.yaml' ) with open(file_path, 'r') as stream: content = yaml.safe_load(stream) return {'faq': content} def add_chat_log_info(query_info): """ Add the given chat into the database. |query_info| encapsulates all the information of one chat Example: query_info = { 'sender_user_id': 1, 'recipient_user_id': 2, 'message': 'Hello this is my message', 'worksheet_uuid': 0x508cf51e546742beba97ed9a69329838, // the worksheet the user is browsing when he/she sends this message 'bundle_uuid': 0x8e66b11ecbda42e2a1f544627acf1418, // the bundle the user is browsing when he/she sends this message } Return an auto response, if the chat is directed to the system. Otherwise, return an updated chat list of the sender. """ updated_data = local.model.add_chat_log_info(query_info) if query_info.get('recipient_user_id') != local.model.system_user_id: return updated_data else: message = query_info.get('message') worksheet_uuid = query_info.get('worksheet_uuid') bundle_uuid = query_info.get('bundle_uuid') bot_response = format_message_response( ChatBoxQA.answer(message, worksheet_uuid, bundle_uuid) ) info = { 'sender_user_id': local.model.system_user_id, 'recipient_user_id': request.user.user_id, 'message': bot_response, 'worksheet_uuid': worksheet_uuid, 'bundle_uuid': bundle_uuid, } local.model.add_chat_log_info(info) return bot_response def format_message_response(params): """ Format automatic response |params| is None if the system can't process the user's message or is not confident enough to give a response. Otherwise, |params| is a triple that consists of the question that the system is trying to answer, the response it has for that question, and the recommended command to run. Return the automatic response that will be sent back to the user's chat box. """ if params is None: return 'Thank you for your question. Our staff will get back to you as soon as we can.' else: question, response, command = params result = 'This is the question we are trying to answer: ' + question + '\n' result += response + '\n' result += 'You can try to run the following command: \n' result += command return result
""" Return a list of FAQ items, each of the following format: '0': { 'question': 'how can I upload / add a bundle?'
prod.env.js
module.exports = { NODE_ENV: '"production"', ENV_CONFIG: '"prod"', BASE_API: '"http://www.goodexam.com.cn"'
}
command.rs
use nu_engine::get_full_help; use nu_protocol::{ ast::Call, engine::{Command, EvaluationContext}, Signature, Value, }; pub struct Into; impl Command for Into { fn name(&self) -> &str
fn signature(&self) -> Signature { Signature::build("into") } fn usage(&self) -> &str { "Apply into function." } fn run( &self, context: &EvaluationContext, call: &Call, _input: Value, ) -> Result<nu_protocol::Value, nu_protocol::ShellError> { Ok(Value::String { val: get_full_help(&Into.signature(), &[], context), span: call.head, }) } } #[cfg(test)] mod test { use super::*; #[test] fn test_examples() { use crate::test_examples; test_examples(Into {}) } }
{ "into" }
framework_test.go
package pv_monitor_controller import ( "context" "testing" "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/mock/gomock" "github.com/kubernetes-csi/csi-test/v3/driver" "github.com/kubernetes-csi/external-health-monitor/pkg/mock" "github.com/stretchr/testify/assert" ) type fakeNativeObjects struct { MockVolume *mock.MockVolume MockNode *mock.MockNode MockEvent *mock.MockEvent } type testCase struct { name string enableNodeWatcher bool fakeNativeObjects *fakeNativeObjects supportListVolumes bool wantAbnormalEvent bool hasRecoveryEvent bool } func runTest(t *testing.T, tc *testCase) { assert := assert.New(t) // Initialize native controller objects nativeObjects := []runtime.Object{ tc.fakeNativeObjects.MockVolume.NativeVolume, tc.fakeNativeObjects.MockVolume.NativeVolumeClaim, } if tc.enableNodeWatcher { nativeObjects = append(nativeObjects, tc.fakeNativeObjects.MockNode.NativeNode) } if tc.hasRecoveryEvent { nativeObjects = append(nativeObjects, tc.fakeNativeObjects.MockEvent.NativeEvent) } client := fake.NewSimpleClientset(nativeObjects...) informers := informers.NewSharedInformerFactory(client, 0) pvInformer := informers.Core().V1().PersistentVolumes() pvcInformer := informers.Core().V1().PersistentVolumeClaims() podInformer := informers.Core().V1().Pods() nodeInformer := informers.Core().V1().Nodes() eventInformer := informers.Core().V1().Events() option := &PVMonitorOptions{ DriverName: "fake.csi.driver.io", ContextTimeout: 15 * time.Second, EnableNodeWatcher: tc.enableNodeWatcher, ListVolumesInterval: 5 * time.Minute, PVWorkerExecuteInterval: 1 * time.Minute, VolumeListAndAddInterval: 5 * time.Minute, NodeWorkerExecuteInterval: 1 * time.Minute, NodeListAndAddInterval: 5 * time.Minute, SupportListVolume: tc.supportListVolumes, } _, _, _, controllerServer, _, csiConn, err := mock.CreateMockServer(t) assert.Nil(err) eventStore := make(chan string, 1) eventRecorder := record.FakeRecorder{ Events: eventStore, } var ( volumes []*mock.CSIVolume ) // Inject test cases volumes = append(volumes, tc.fakeNativeObjects.MockVolume.CSIVolume) err = pvInformer.Informer().GetStore().Add(tc.fakeNativeObjects.MockVolume.NativeVolume) assert.Nil(err) err = pvcInformer.Informer().GetStore().Add(tc.fakeNativeObjects.MockVolume.NativeVolumeClaim) assert.Nil(err) if tc.enableNodeWatcher { err = nodeInformer.Informer().GetStore().Add(tc.fakeNativeObjects.MockNode.NativeNode) assert.Nil(err) } mockCSIcontrollerServer(controllerServer, tc.supportListVolumes, volumes) pvMonitorController := NewPVMonitorController(client, csiConn, pvInformer, pvcInformer, podInformer, nodeInformer, eventInformer, &eventRecorder, option) assert.NotNil(pvMonitorController) if tc.hasRecoveryEvent { err = eventInformer.Informer().GetStore().Add(tc.fakeNativeObjects.MockEvent.NativeEvent) assert.Nil(err) } ctx, cancel := context.WithCancel(context.TODO()) stopCh := ctx.Done() informers.Start(stopCh) go pvMonitorController.Run(1, stopCh) event, err := mock.WatchEvent(tc.wantAbnormalEvent, eventStore) if tc.wantAbnormalEvent { assert.Nil(err) assert.EqualValues(event, mock.AbnormalEvent) } else if tc.hasRecoveryEvent { assert.Nil(err) assert.EqualValues(event, mock.NormalEvent) } else { assert.EqualValues(mock.ErrorWatchTimeout.Error(), err.Error()) } cancel() } func mockCSIcontrollerServer(csiControllerServer *driver.MockControllerServer, supportListVolume bool, objects []*mock.CSIVolume)
{ if supportListVolume { volumeResponseEntries := make([]*csi.ListVolumesResponse_Entry, len(objects)) for index, volume := range objects { volumeResponseEntries[index] = &csi.ListVolumesResponse_Entry{ Volume: volume.Volume, Status: &csi.ListVolumesResponse_VolumeStatus{ VolumeCondition: volume.Condition, }, } } in := &csi.ListVolumesRequest{ StartingToken: "", } out := &csi.ListVolumesResponse{ Entries: volumeResponseEntries, NextToken: "", } csiControllerServer.EXPECT().ListVolumes(gomock.Any(), in).Return(out, nil).Times(100000) } else { for _, volume := range objects { in := &csi.ControllerGetVolumeRequest{ VolumeId: volume.Volume.VolumeId, } out := &csi.ControllerGetVolumeResponse{ Volume: volume.Volume, Status: &csi.ControllerGetVolumeResponse_VolumeStatus{ VolumeCondition: volume.Condition, }, } csiControllerServer.EXPECT().ControllerGetVolume(gomock.Any(), in).Return(out, nil).Times(100000) } } }
client.rs
use { serde_json::{json, Value}, serial_test::serial, solana_client::{ pubsub_client::PubsubClient, rpc_client::RpcClient, rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, rpc_response::SlotInfo, }, solana_rpc::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, rpc_pubsub_service::{PubSubConfig, PubSubService}, rpc_subscriptions::RpcSubscriptions, }, solana_runtime::{ bank::Bank, bank_forks::BankForks, commitment::{BlockCommitmentCache, CommitmentSlots}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }, solana_sdk::{ clock::Slot, commitment_config::CommitmentConfig, native_token::sol_to_lamports, pubkey::Pubkey, rpc_port, signature::{Keypair, Signer}, system_program, system_transaction, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, std::{ collections::HashSet, net::{IpAddr, SocketAddr}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, }, thread::sleep, time::{Duration, Instant}, }, systemstat::Ipv4Addr, }; #[test] fn test_rpc_client() { solana_logger::setup(); let alice = Keypair::new(); let test_validator = TestValidator::with_no_fees(alice.pubkey(), None, SocketAddrSpace::Unspecified); let bob_pubkey = solana_sdk::pubkey::new_rand(); let client = RpcClient::new(test_validator.rpc_url()); assert_eq!( client.get_version().unwrap().solana_core, solana_version::semver!() ); assert!(client.get_account(&bob_pubkey).is_err()); assert_eq!(client.get_balance(&bob_pubkey).unwrap(), 0); let original_alice_balance = client.get_balance(&alice.pubkey()).unwrap(); let blockhash = client.get_latest_blockhash().unwrap(); let tx = system_transaction::transfer(&alice, &bob_pubkey, sol_to_lamports(20.0), blockhash); let signature = client.send_transaction(&tx).unwrap(); let mut confirmed_tx = false; let now = Instant::now(); while now.elapsed().as_secs() <= 20 { let response = client .confirm_transaction_with_commitment(&signature, CommitmentConfig::default()) .unwrap(); if response.value { confirmed_tx = true; break; } sleep(Duration::from_millis(500)); } assert!(confirmed_tx); assert_eq!( client.get_balance(&bob_pubkey).unwrap(), sol_to_lamports(20.0) ); assert_eq!( client.get_balance(&alice.pubkey()).unwrap(), original_alice_balance - sol_to_lamports(20.0) ); } #[test] #[serial] fn test_account_subscription() { let pubsub_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port::DEFAULT_RPC_PUBSUB_PORT, ); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, mint_keypair: alice, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bob = Keypair::new(); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); std::thread::sleep(Duration::from_millis(400)); let config = Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::finalized()), encoding: None, data_slice: None, }); let (mut client, receiver) = PubsubClient::account_subscribe( &format!("ws://0.0.0.0:{}/", pubsub_addr.port()), &bob.pubkey(), config, ) .unwrap(); // Transfer 100 lamports from alice to bob let tx = system_transaction::transfer(&alice, &bob.pubkey(), 100, blockhash); bank_forks .write() .unwrap() .get(1) .unwrap() .process_transaction(&tx) .unwrap(); let commitment_slots = CommitmentSlots { slot: 1, ..CommitmentSlots::default() }; subscriptions.notify_subscribers(commitment_slots); let commitment_slots = CommitmentSlots { slot: 2, root: 1, highest_confirmed_slot: 1, highest_confirmed_root: 1, }; subscriptions.notify_subscribers(commitment_slots); let expected = json!({ "context": { "slot": 1 }, "value": { "owner": system_program::id().to_string(), "lamports": 100, "data": "", "executable": false, "rentEpoch": 0, }, }); // Read notification let mut errors: Vec<(Value, Value)> = Vec::new(); let response = receiver.recv(); match response { Ok(response) => { let actual = serde_json::to_value(response).unwrap();
errors.push((expected, actual)); } } Err(_) => eprintln!("unexpected websocket receive timeout"), } exit.store(true, Ordering::Relaxed); trigger.cancel(); client.shutdown().unwrap(); pubsub_service.close().unwrap(); assert_eq!(errors, [].to_vec()); } #[test] #[serial] fn test_program_subscription() { let pubsub_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port::DEFAULT_RPC_PUBSUB_PORT, ); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, mint_keypair: alice, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bob = Keypair::new(); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); std::thread::sleep(Duration::from_millis(400)); let config = Some(RpcProgramAccountsConfig { ..RpcProgramAccountsConfig::default() }); let program_id = Pubkey::new_unique(); let (mut client, receiver) = PubsubClient::program_subscribe( &format!("ws://0.0.0.0:{}/", pubsub_addr.port()), &program_id, config, ) .unwrap(); // Create new program account at bob's address let tx = system_transaction::create_account(&alice, &bob, blockhash, 100, 0, &program_id); bank_forks .write() .unwrap() .get(1) .unwrap() .process_transaction(&tx) .unwrap(); let commitment_slots = CommitmentSlots { slot: 1, ..CommitmentSlots::default() }; subscriptions.notify_subscribers(commitment_slots); let commitment_slots = CommitmentSlots { slot: 2, root: 1, highest_confirmed_slot: 1, highest_confirmed_root: 1, }; subscriptions.notify_subscribers(commitment_slots); // Poll notifications generated by the transfer let mut notifications = Vec::new(); let mut pubkeys = HashSet::new(); loop { let response = receiver.recv_timeout(Duration::from_millis(100)); match response { Ok(response) => { notifications.push(response.clone()); pubkeys.insert(response.value.pubkey); } Err(_) => { break; } } } // Shutdown exit.store(true, Ordering::Relaxed); trigger.cancel(); client.shutdown().unwrap(); pubsub_service.close().unwrap(); assert_eq!(notifications.len(), 1); assert!(pubkeys.contains(&bob.pubkey().to_string())); } #[test] #[serial] fn test_root_subscription() { let pubsub_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port::DEFAULT_RPC_PUBSUB_PORT, ); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); std::thread::sleep(Duration::from_millis(400)); let (mut client, receiver) = PubsubClient::root_subscribe(&format!("ws://0.0.0.0:{}/", pubsub_addr.port())).unwrap(); let roots = vec![1, 2, 3]; subscriptions.notify_roots(roots.clone()); // Read notifications let mut errors: Vec<(Slot, Slot)> = Vec::new(); for expected in roots { let response = receiver.recv(); match response { Ok(response) => { if expected != response { errors.push((expected, response)); } } Err(_) => eprintln!("unexpected websocket receive timeout"), } } exit.store(true, Ordering::Relaxed); trigger.cancel(); client.shutdown().unwrap(); pubsub_service.close().unwrap(); assert_eq!(errors, [].to_vec()); } #[test] #[serial] fn test_slot_subscription() { let pubsub_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rpc_port::DEFAULT_RPC_PUBSUB_PORT, ); let exit = Arc::new(AtomicBool::new(false)); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, )); let (trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); std::thread::sleep(Duration::from_millis(400)); let (mut client, receiver) = PubsubClient::slot_subscribe(&format!("ws://0.0.0.0:{}/", pubsub_addr.port())).unwrap(); let mut errors: Vec<(SlotInfo, SlotInfo)> = Vec::new(); for i in 0..3 { subscriptions.notify_slot(i + 1, i, i); let maybe_actual = receiver.recv_timeout(Duration::from_millis(400)); match maybe_actual { Ok(actual) => { let expected = SlotInfo { slot: i + 1, parent: i, root: i, }; if actual != expected { errors.push((actual, expected)); } } Err(_err) => { eprintln!("unexpected websocket receive timeout"); break; } } } exit.store(true, Ordering::Relaxed); trigger.cancel(); client.shutdown().unwrap(); pubsub_service.close().unwrap(); assert_eq!(errors, [].to_vec()); }
if expected != actual {
main.go
// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT. // [START analyticsadmin_v1alpha_generated_AnalyticsAdminService_DeleteProperty_sync] package main import ( "context" admin "cloud.google.com/go/analytics/admin/apiv1alpha" adminpb "google.golang.org/genproto/googleapis/analytics/admin/v1alpha" ) func main() { ctx := context.Background() c, err := admin.NewAnalyticsAdminClient(ctx) if err != nil { // TODO: Handle error. } defer c.Close() req := &adminpb.DeletePropertyRequest{ // TODO: Fill request struct fields. // See https://pkg.go.dev/google.golang.org/genproto/googleapis/analytics/admin/v1alpha#DeletePropertyRequest. } resp, err := c.DeleteProperty(ctx, req) if err != nil
// TODO: Use resp. _ = resp } // [END analyticsadmin_v1alpha_generated_AnalyticsAdminService_DeleteProperty_sync]
{ // TODO: Handle error. }
test.py
from django.test import TestCase, override_settings from model_bakery import baker from rest_framework.test import APIClient from accounts.models import User from core.models import CoreSettings from rest_framework.authtoken.models import Token class
(TestCase): def authenticate(self): self.john = User(username="john") self.john.set_password("hunter2") self.john.save() self.alice = User(username="alice") self.alice.set_password("hunter2") self.alice.save() self.client_setup() self.client.force_authenticate(user=self.john) def setup_agent_auth(self, agent): agent_user = User.objects.create_user( username=agent.agent_id, password=User.objects.make_random_password(60) ) Token.objects.create(user=agent_user) def client_setup(self): self.client = APIClient() # fixes tests waiting 2 minutes for mesh token to appear @override_settings( MESH_TOKEN_KEY="41410834b8bb4481446027f87d88ec6f119eb9aa97860366440b778540c7399613f7cabfef4f1aa5c0bd9beae03757e17b2e990e5876b0d9924da59bdf24d3437b3ed1a8593b78d65a72a76c794160d9" ) def setup_coresettings(self): self.coresettings = CoreSettings.objects.create() def check_not_authenticated(self, method, url): self.client.logout() switch = { "get": self.client.get(url), "post": self.client.post(url), "put": self.client.put(url), "patch": self.client.patch(url), "delete": self.client.delete(url), } r = switch.get(method) self.assertEqual(r.status_code, 401) def create_checks(self, policy=None, agent=None, script=None): if not policy and not agent: return # will create 1 of every check and associate it with the policy object passed check_recipes = [ "checks.diskspace_check", "checks.ping_check", "checks.cpuload_check", "checks.memory_check", "checks.winsvc_check", "checks.script_check", "checks.eventlog_check", ] checks = list() for recipe in check_recipes: if not script: checks.append(baker.make_recipe(recipe, policy=policy, agent=agent)) else: checks.append( baker.make_recipe(recipe, policy=policy, agent=agent, script=script) ) return checks
TacticalTestCase
data_test.go
package data import ( "github.com/stretchr/testify/assert" "testing" ) func TestNew(t *testing.T) { p := New() assert.NotNil(t, p.data) } func TestKeys(t *testing.T) { p := New() p.Set("foo", "") p.Set("bar", "") n := p.Keys() assert.Contains(t, n, "foo") assert.Contains(t, n, "bar") assert.Len(t, n, 2) } func TestExists(t *testing.T) { p := New() p.Set("foo", "") assert.True(t, p.Exists("foo")) assert.False(t, p.Exists("bar")) } func TestGetAll(t *testing.T) { p := New() p.Set("foo", "b", "a", "r") assert.Equal(t, []string{"b", "a", "r"}, p.GetAll("foo")) assert.Equal(t, []string{}, p.GetAll("non-existing")) } func TestGet(t *testing.T) { p := New() p.Set("foo", "bar") assert.Equal(t, "bar", p.Get("foo")) p.Set("foo2", "b", "a", "r") assert.Equal(t, "", p.Get("foo2")) } func TestSet(t *testing.T) { p := New() p.Set("foo", "bar") assert.Equal(t, "bar", p.Get("foo")) // test overwrite p.Set("foo", "rab") assert.Equal(t, "rab", p.Get("foo")) } func TestAdd(t *testing.T) { p := New() p.Add("foo", "bar") p.Add("foo", "rab") p.Add("foo", "foo", "bar") assert.Equal(t, []string{"bar", "rab", "foo", "bar"}, p.GetAll("foo")) } func TestDelete(t *testing.T) { p := New() p.Set("foo", "bar") p.Delete("foo") assert.Equal(t, []string{}, p.GetAll("foo")) p.Delete("non-existing") } func TestPickAll(t *testing.T) { p := New() p.Set("foo", "b", "a", "r") assert.Equal(t, []string{"b", "a", "r"}, p.PickAll("foo")) assert.False(t, p.Exists("foo")) } func TestPick(t *testing.T) { p := New() p.Set("foo", "bar") assert.Equal(t, "bar", p.Pick("foo")) assert.False(t, p.Exists("foo")) } func TestMerge(t *testing.T) { p1 := New() p1.Set("1", "1") p1.Set("2", "2") p1.Set("3", "3") p2 := New() p2.Set("4", "4") p2.Set("2", "b") r := New() r.Set("1", "1") r.Set("2", "b") r.Set("3", "3") r.Set("4", "4") n := Merge(p1, p2) assert.Equal(t, r, n) p1.Merge(p2) assert.Equal(t, r, p1) } func TestFilter(t *testing.T) { p1 := New() p1.Set("1", "1") p1.Set("2", "2") p1.Set("3", "3") p1.Set("4", "4") p2 := New() p2.Set("2", "2") p2.Set("4", "4") p1.Filter(p2) assert.Equal(t, p2, p1) } func TestFilterWithStringsSlice(t *testing.T) { p1 := New() p1.Set("1", "1") p1.Set("2", "2") p1.Set("3", "3") p1.Set("4", "4") p2 := New() p2.Set("2", "2") p2.Set("4", "4") p1.Filter([]string{"4", "2"}) assert.Equal(t, p2, p1) } func TestFilterWithString(t *testing.T) { p1 := New() p1.Set("1", "1") p1.Set("2", "2") p1.Set("3", "3") p1.Set("4", "4") p2 := New() p2.Set("2", "2") p1.Filter("2") assert.Equal(t, p2, p1) } func TestToData(t *testing.T)
func TestRaw(t *testing.T) { // TODO } func TestRawEnhanced(t *testing.T) { // TODO } func TestIsTrue(t *testing.T) { // TODO } func TestIsFalse(t *testing.T) { // TODO }
{ p := New() p.Set("foo", "bar") tp := ToData(map[string][]string{"foo": []string{"bar"}}) assert.Equal(t, p, tp) }
SkyCoverComponent.py
from ..Distance import Distance from ..CloudCoverage import CloudCoverage from ..Constant import Constant from .BaseComponent import BaseComponent class SkyCoverComponent(BaseComponent): ''' handle GA1..GA8 sky component types ''' CLOUD_TYPES = { "00": "Cirrus (Ci)", "01": "Cirrocumulus (Cc)", "02": "Cirrostratus (Cs)", "03": "Altocumulus (Ac)", "04": "Altostratus (As)", "05": "Nimbostratus (Ns)", "06": "Stratocumulus (Sc)", "07": "Stratus (St)", "08": "Cumulus (Cu)", "09": "Cumulonimbus (Cb)", "10": """Cloud not visible owing to darkness, fog, duststorm, sandstorm, or other analogous phenomena / sky obscured""", "11": "Not used", "12": "Towering Cumulus (Tcu)", "13": "Stratus fractus (Stfra)", "14": "Stratocumulus Lenticular (Scsl)", "15": "Cumulus Fractus (Cufra)", "16": "Cumulonimbus Mammatus (Cbmam)", "17": "Altocumulus Lenticular (Acsl)", "18": "Altocumulus Castellanus (Accas)", "19": "Altocumulus Mammatus (Acmam)", "20": "Cirrocumulus Lenticular (Ccsl)", "21": "Cirrus and/or Cirrocumulus", "22": "Stratus and/or Fracto-stratus", "23": "Cumulus and/or Fracto-cumulus"} def loads(self, string):
def __repr__(self): return str(self.sky_cover) def __str__(self): return str(self.sky_cover)
self.sky_cover = { 'coverage': CloudCoverage(string[0:2], CloudCoverage.OKTA, string[2:3]), 'base_height': Distance(int(string[4:9]), Distance.METERS, string[9:10]), 'cloud_type': Constant(string[9:11], None, string[11:12], self.CLOUD_TYPES)}
dump.py
""" Dump/export our own data to a local file. Script is installed as `location_dump`. """ import argparse import os import os.path import sys from sqlalchemy import text from ichnaea.db import ( configure_db, db_worker_session, ) from ichnaea.geocalc import bbox from ichnaea.log import ( configure_logging, LOGGER, ) from ichnaea.models import ( BlueShard, CellShard, WifiShard, ) from ichnaea import util def where_area(lat, lon, radius): # Construct a where clause based on a bounding box around the given # center point. if lat is None or lon is None or radius is None: return None max_lat, min_lat, max_lon, min_lon = bbox(lat, lon, radius) return '`lat` <= %s and `lat` >= %s and `lon` <= %s and `lon` >= %s' % ( round(max_lat, 5), round(min_lat, 5), round(max_lon, 5), round(min_lon, 5)) def dump_model(shard_model, session, fd, where=None):
def dump_file(datatype, session, filename, lat=None, lon=None, radius=None): model = { 'blue': BlueShard, 'cell': CellShard, 'wifi': WifiShard, } where = where_area(lat, lon, radius) with util.gzip_open(filename, 'w') as fd: dump_model(model[datatype], session, fd, where=where) return 0 def main(argv, _db=None, _dump_file=dump_file): parser = argparse.ArgumentParser( prog=argv[0], description='Dump/export data.') parser.add_argument('--datatype', required=True, help='Type of the data file, blue, cell or wifi') parser.add_argument('--filename', required=True, help='Path to the csv.gz export file.') parser.add_argument('--lat', default=None, help='The center latitude of the desired area.') parser.add_argument('--lon', default=None, help='The center longitude of the desired area.') parser.add_argument('--radius', default=None, help='The radius of the desired area.') args = parser.parse_args(argv[1:]) if not args.filename: # pragma: no cover parser.print_help() return 1 filename = os.path.abspath(os.path.expanduser(args.filename)) if os.path.isfile(filename): # pragma: no cover print('File already exists.') return 1 datatype = args.datatype if datatype not in ('blue', 'cell', 'wifi'): # pragma: no cover print('Unknown data type.') return 1 lat, lon, radius = (None, None, None) if (args.lat is not None and args.lon is not None and args.radius is not None): lat = float(args.lat) lon = float(args.lon) radius = int(args.radius) configure_logging() db = configure_db('ro', transport='sync', _db=_db) with db_worker_session(db, commit=False) as session: exit_code = _dump_file( datatype, session, filename, lat=lat, lon=lon, radius=radius) return exit_code def console_entry(): # pragma: no cover sys.exit(main(sys.argv))
fd.write(shard_model.export_header() + '\n') for model in shard_model.shards().values(): LOGGER.info('Exporting table: %s', model.__tablename__) stmt = model.export_stmt() if where: stmt = stmt.replace(' WHERE ', ' WHERE %s AND ' % where) stmt = text(stmt) min_key = '' limit = 25000 while True: rows = session.execute( stmt.bindparams( export_key=min_key, limit=limit )).fetchall() if rows: buf = '\n'.join([row.export_value for row in rows]) if buf: buf += '\n' fd.write(buf) min_key = rows[-1].export_key else: break
rpc.rs
//! The `rpc` module implements the Solana RPC interface. use crate::{ cluster_info::ClusterInfo, contact_info::ContactInfo, max_slots::MaxSlots, non_circulating_supply::calculate_non_circulating_supply, optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, rpc_health::*, send_transaction_service::{SendTransactionService, TransactionInfo}, validator::ValidatorExit, }; use bincode::{config::Options, serialize}; use jsonrpc_core::{types::error, Error, Metadata, Result}; use jsonrpc_derive::rpc; use solana_account_decoder::{ parse_account_data::AccountAdditionalData, parse_token::{ get_token_account_mint, spl_token_id_v2_0, spl_token_v2_0_native_mint, token_amount_to_ui_amount, UiTokenAmount, }, UiAccount, UiAccountData, UiAccountEncoding, UiDataSliceConfig, }; use solana_client::{ rpc_cache::LargestAccountsCache, rpc_config::*, rpc_custom_error::RpcCustomError, rpc_deprecated_config::*, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, rpc_request::{ TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_PROGRAM_ACCOUNT_FILTERS, MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS, NUM_LARGEST_ACCOUNTS, }, rpc_response::Response as RpcResponse, rpc_response::*, }; use solana_faucet::faucet::request_airdrop_transaction; use solana_ledger::{ blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, }; use solana_metrics::inc_new_counter_info; use solana_perf::packet::PACKET_DATA_SIZE; use solana_runtime::{ accounts::AccountAddressFilter, accounts_index::{AccountIndex, IndexKey}, bank::Bank, bank_forks::{BankForks, SnapshotConfig}, commitment::{BlockCommitmentArray, BlockCommitmentCache, CommitmentSlots}, inline_spl_token_v2_0::{SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, snapshot_utils::get_highest_snapshot_archive_path, }; use solana_sdk::{ account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, sanitize::Sanitize, signature::Signature, stake_history::StakeHistory, system_instruction, sysvar::stake_history, transaction::{self, Transaction}, }; use solana_stake_program::stake_state::StakeState; use solana_transaction_status::{ EncodedConfirmedTransaction, Reward, RewardType, TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, }; use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}; use spl_token_v2_0::{ solana_program::program_pack::Pack, state::{Account as TokenAccount, Mint}, }; use std::{ cmp::{max, min}, collections::{HashMap, HashSet}, net::SocketAddr, str::FromStr, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, mpsc::{channel, Receiver, Sender}, Arc, Mutex, RwLock, }, time::Duration, }; use tokio::runtime::Runtime; pub const MAX_REQUEST_PAYLOAD_SIZE: usize = 50 * (1 << 10); // 50kB pub const PERFORMANCE_SAMPLES_LIMIT: usize = 720; // Limit the length of the `epoch_credits` array for each validator in a `get_vote_accounts` // response const MAX_RPC_EPOCH_CREDITS_HISTORY: usize = 5; fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> { let context = RpcResponseContext { slot: bank.slot() }; Response { context, value } } fn is_finalized( block_commitment_cache: &BlockCommitmentCache, bank: &Bank, blockstore: &Blockstore, slot: Slot, ) -> bool { slot <= block_commitment_cache.highest_confirmed_root() && (blockstore.is_root(slot) || bank.status_cache_ancestors().contains(&slot)) } #[derive(Debug, Default, Clone)] pub struct JsonRpcConfig { pub enable_rpc_transaction_history: bool, pub enable_cpi_and_log_storage: bool, pub identity_pubkey: Pubkey, pub faucet_addr: Option<SocketAddr>, pub health_check_slot_distance: u64, pub enable_bigtable_ledger_storage: bool, pub enable_bigtable_ledger_upload: bool, pub max_multiple_accounts: Option<usize>, pub account_indexes: HashSet<AccountIndex>, pub rpc_threads: usize, pub rpc_bigtable_timeout: Option<Duration>, pub minimal_api: bool, pub obsolete_v1_7_api: bool, } #[derive(Clone)] pub struct JsonRpcRequestProcessor { bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, blockstore: Arc<Blockstore>, config: JsonRpcConfig, snapshot_config: Option<SnapshotConfig>, validator_exit: Arc<RwLock<ValidatorExit>>, health: Arc<RpcHealth>, cluster_info: Arc<ClusterInfo>, genesis_hash: Hash, transaction_sender: Arc<Mutex<Sender<TransactionInfo>>>, runtime: Arc<Runtime>, bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>, max_slots: Arc<MaxSlots>, leader_schedule_cache: Arc<LeaderScheduleCache>, max_complete_transaction_status_slot: Arc<AtomicU64>, } impl Metadata for JsonRpcRequestProcessor {} impl JsonRpcRequestProcessor { #[allow(deprecated)] fn bank(&self, commitment: Option<CommitmentConfig>) -> Arc<Bank> { debug!("RPC commitment_config: {:?}", commitment); let r_bank_forks = self.bank_forks.read().unwrap(); let commitment = commitment.unwrap_or_default(); if commitment.is_confirmed() { let bank = self .optimistically_confirmed_bank .read() .unwrap() .bank .clone(); debug!("RPC using optimistically confirmed slot: {:?}", bank.slot()); return bank; } let slot = self .block_commitment_cache .read() .unwrap() .slot_with_commitment(commitment.commitment); match commitment.commitment { // Recent variant is deprecated CommitmentLevel::Recent | CommitmentLevel::Processed => { debug!("RPC using the heaviest slot: {:?}", slot); } // Root variant is deprecated CommitmentLevel::Root => { debug!("RPC using node root: {:?}", slot); } // Single variant is deprecated CommitmentLevel::Single => { debug!("RPC using confirmed slot: {:?}", slot); } // Max variant is deprecated CommitmentLevel::Max | CommitmentLevel::Finalized => { debug!("RPC using block: {:?}", slot); } CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated }; r_bank_forks.get(slot).cloned().unwrap_or_else(|| { // We log a warning instead of returning an error, because all known error cases // are due to known bugs that should be fixed instead. // // The slot may not be found as a result of a known bug in snapshot creation, where // the bank at the given slot was not included in the snapshot. // Also, it may occur after an old bank has been purged from BankForks and a new // BlockCommitmentCache has not yet arrived. To make this case impossible, // BlockCommitmentCache should hold an `Arc<Bank>` everywhere it currently holds // a slot. // // For more information, see https://github.com/solana-labs/solana/issues/11078 warn!( "Bank with {:?} not found at slot: {:?}", commitment.commitment, slot ); r_bank_forks.root_bank() }) } #[allow(clippy::too_many_arguments)] pub fn new( config: JsonRpcConfig, snapshot_config: Option<SnapshotConfig>, bank_forks: Arc<RwLock<BankForks>>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, blockstore: Arc<Blockstore>, validator_exit: Arc<RwLock<ValidatorExit>>, health: Arc<RpcHealth>, cluster_info: Arc<ClusterInfo>, genesis_hash: Hash, runtime: Arc<Runtime>, bigtable_ledger_storage: Option<solana_storage_bigtable::LedgerStorage>, optimistically_confirmed_bank: Arc<RwLock<OptimisticallyConfirmedBank>>, largest_accounts_cache: Arc<RwLock<LargestAccountsCache>>, max_slots: Arc<MaxSlots>, leader_schedule_cache: Arc<LeaderScheduleCache>, max_complete_transaction_status_slot: Arc<AtomicU64>, ) -> (Self, Receiver<TransactionInfo>) { let (sender, receiver) = channel(); ( Self { config, snapshot_config, bank_forks, block_commitment_cache, blockstore, validator_exit, health, cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), runtime, bigtable_ledger_storage, optimistically_confirmed_bank, largest_accounts_cache, max_slots, leader_schedule_cache, max_complete_transaction_status_slot, }, receiver, ) } // Useful for unit testing pub fn new_from_bank(bank: &Arc<Bank>) -> Self { let genesis_hash = bank.hash(); let bank_forks = Arc::new(RwLock::new(BankForks::new_from_banks( &[bank.clone()], bank.slot(), ))); let blockstore = Arc::new(Blockstore::open(&get_tmp_ledger_path!()).unwrap()); let exit = Arc::new(AtomicBool::new(false)); let cluster_info = Arc::new(ClusterInfo::default()); let tpu_address = cluster_info.my_contact_info().tpu; let (sender, receiver) = channel(); SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); Self { config: JsonRpcConfig::default(), snapshot_config: None, bank_forks, block_commitment_cache: Arc::new(RwLock::new(BlockCommitmentCache::new( HashMap::new(), 0, CommitmentSlots::new_from_slot(bank.slot()), ))), blockstore, validator_exit: create_validator_exit(&exit), health: Arc::new(RpcHealth::new(cluster_info.clone(), None, 0, exit.clone())), cluster_info, genesis_hash, transaction_sender: Arc::new(Mutex::new(sender)), runtime: Arc::new(Runtime::new().expect("Runtime")), bigtable_ledger_storage: None, optimistically_confirmed_bank: Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank: bank.clone(), })), largest_accounts_cache: Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots: Arc::new(MaxSlots::default()), leader_schedule_cache: Arc::new(LeaderScheduleCache::new_from_bank(bank)), max_complete_transaction_status_slot: Arc::new(AtomicU64::default()), } } pub fn get_account_info( &self, pubkey: &Pubkey, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); check_slice_and_encoding(&encoding, config.data_slice.is_some())?; let response = get_encoded_account(&bank, pubkey, encoding, config.data_slice)?; Ok(new_response(&bank, response)) } pub fn get_multiple_accounts( &self, pubkeys: Vec<Pubkey>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>> { let mut accounts: Vec<Option<UiAccount>> = vec![]; let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Base64); check_slice_and_encoding(&encoding, config.data_slice.is_some())?; for pubkey in pubkeys { let response_account = get_encoded_account(&bank, &pubkey, encoding.clone(), config.data_slice)?; accounts.push(response_account) } Ok(new_response(&bank, accounts)) } pub fn get_minimum_balance_for_rent_exemption( &self, data_len: usize, commitment: Option<CommitmentConfig>, ) -> u64 { self.bank(commitment) .get_minimum_balance_for_rent_exemption(data_len) } pub fn get_program_accounts( &self, program_id: &Pubkey, config: Option<RpcAccountInfoConfig>, filters: Vec<RpcFilterType>, ) -> Result<Vec<RpcKeyedAccount>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let keyed_accounts = { if let Some(owner) = get_spl_token_owner_filter(program_id, &filters) { self.get_filtered_spl_token_accounts_by_owner(&bank, &owner, filters) } else if let Some(mint) = get_spl_token_mint_filter(program_id, &filters) { self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters) } else { self.get_filtered_program_accounts(&bank, program_id, filters) } }; let result = if program_id == &spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank, keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, account, encoding.clone(), None, data_slice_config, ), }) .collect() }; Ok(result) } pub fn get_inflation_reward( &self, addresses: Vec<Pubkey>, config: Option<RpcEpochConfig>, ) -> Result<Vec<Option<RpcInflationReward>>> { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block(); let epoch = config.epoch.unwrap_or_else(|| { epoch_schedule .get_epoch(self.get_slot(config.commitment)) .saturating_sub(1) }); // Rewards for this epoch are found in the first confirmed block of the next epoch let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); if first_slot_in_epoch < first_available_block { if self.bigtable_ledger_storage.is_some() { return Err(RpcCustomError::LongTermStorageSlotSkipped { slot: first_slot_in_epoch, } .into()); } else { return Err(RpcCustomError::BlockCleanedUp { slot: first_slot_in_epoch, first_available_block, } .into()); } } let first_confirmed_block_in_epoch = *self .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment)? .get(0) .ok_or(RpcCustomError::BlockNotAvailable { slot: first_slot_in_epoch, })?; let first_confirmed_block = if let Ok(Some(first_confirmed_block)) = self.get_block( first_confirmed_block_in_epoch, Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), ) { first_confirmed_block } else { return Err(RpcCustomError::BlockNotAvailable { slot: first_confirmed_block_in_epoch, } .into()); }; let addresses: Vec<String> = addresses .into_iter() .map(|pubkey| pubkey.to_string()) .collect(); let reward_hash: HashMap<String, Reward> = first_confirmed_block .rewards .unwrap_or_default() .into_iter() .filter_map(|reward| match reward.reward_type? { RewardType::Staking | RewardType::Voting => addresses .contains(&reward.pubkey) .then(|| (reward.clone().pubkey, reward)), _ => None, }) .collect(); let rewards = addresses .iter() .map(|address| { if let Some(reward) = reward_hash.get(address) { return Some(RpcInflationReward { epoch, effective_slot: first_confirmed_block_in_epoch, amount: reward.lamports.abs() as u64, post_balance: reward.post_balance, }); } None }) .collect(); Ok(rewards) } pub fn get_inflation_governor( &self, commitment: Option<CommitmentConfig>, ) -> RpcInflationGovernor { self.bank(commitment).inflation().into() } pub fn get_inflation_rate(&self) -> RpcInflationRate { let bank = self.bank(None); let epoch = bank.epoch(); let inflation = bank.inflation(); let slot_in_year = bank.slot_in_year_for_inflation(); RpcInflationRate { total: inflation.total(slot_in_year), validator: inflation.validator(slot_in_year), foundation: inflation.foundation(slot_in_year), epoch, } } pub fn get_epoch_schedule(&self) -> EpochSchedule { // Since epoch schedule data comes from the genesis config, any commitment level should be // fine let bank = self.bank(Some(CommitmentConfig::finalized())); *bank.epoch_schedule() } pub fn get_balance( &self, pubkey: &Pubkey, commitment: Option<CommitmentConfig>, ) -> RpcResponse<u64> { let bank = self.bank(commitment); new_response(&bank, bank.get_balance(pubkey)) } fn get_recent_blockhash( &self, commitment: Option<CommitmentConfig>, ) -> RpcResponse<RpcBlockhashFeeCalculator> { let bank = self.bank(commitment); let (blockhash, fee_calculator) = bank.confirmed_last_blockhash(); new_response( &bank, RpcBlockhashFeeCalculator { blockhash: blockhash.to_string(), fee_calculator, }, ) } fn get_fees(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcFees> { let bank = self.bank(commitment); let (blockhash, fee_calculator) = bank.confirmed_last_blockhash(); let last_valid_slot = bank .get_blockhash_last_valid_slot(&blockhash) .expect("bank blockhash queue should contain blockhash"); new_response( &bank, RpcFees { blockhash: blockhash.to_string(), fee_calculator, last_valid_slot, }, ) } fn get_fee_calculator_for_blockhash( &self, blockhash: &Hash, commitment: Option<CommitmentConfig>, ) -> RpcResponse<Option<RpcFeeCalculator>> { let bank = self.bank(commitment); let fee_calculator = bank.get_fee_calculator(blockhash); new_response( &bank, fee_calculator.map(|fee_calculator| RpcFeeCalculator { fee_calculator }), ) } fn get_fee_rate_governor(&self) -> RpcResponse<RpcFeeRateGovernor> { let bank = self.bank(None); let fee_rate_governor = bank.get_fee_rate_governor(); new_response( &bank, RpcFeeRateGovernor { fee_rate_governor: fee_rate_governor.clone(), }, ) } pub fn confirm_transaction( &self, signature: &Signature, commitment: Option<CommitmentConfig>, ) -> RpcResponse<bool> { let bank = self.bank(commitment); let status = bank.get_signature_status(signature); match status { Some(status) => new_response(&bank, status.is_ok()), None => new_response(&bank, false), } } fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitmentArray> { let r_block_commitment = self.block_commitment_cache.read().unwrap(); RpcBlockCommitment { commitment: r_block_commitment .get_block_commitment(block) .map(|block_commitment| block_commitment.commitment), total_stake: r_block_commitment.total_stake(), } } fn get_slot(&self, commitment: Option<CommitmentConfig>) -> Slot { self.bank(commitment).slot() } fn get_max_retransmit_slot(&self) -> Slot { self.max_slots.retransmit.load(Ordering::Relaxed) } fn get_max_shred_insert_slot(&self) -> Slot { self.max_slots.shred_insert.load(Ordering::Relaxed) } fn get_slot_leader(&self, commitment: Option<CommitmentConfig>) -> String { self.bank(commitment).collector_id().to_string() } fn minimum_ledger_slot(&self) -> Result<Slot> { match self.blockstore.slot_meta_iterator(0) { Ok(mut metas) => match metas.next() { Some((slot, _meta)) => Ok(slot), None => Err(Error::invalid_request()), }, Err(err) => { warn!("slot_meta_iterator failed: {:?}", err); Err(Error::invalid_request()) } } } fn get_transaction_count(&self, commitment: Option<CommitmentConfig>) -> u64 { self.bank(commitment).transaction_count() as u64 } fn get_total_supply(&self, commitment: Option<CommitmentConfig>) -> u64 { self.bank(commitment).capitalization() } fn get_cached_largest_accounts( &self, filter: &Option<RpcLargestAccountsFilter>, ) -> Option<(u64, Vec<RpcAccountBalance>)> { let largest_accounts_cache = self.largest_accounts_cache.read().unwrap(); largest_accounts_cache.get_largest_accounts(filter) } fn set_cached_largest_accounts( &self, filter: &Option<RpcLargestAccountsFilter>, slot: u64, accounts: &[RpcAccountBalance], ) { let mut largest_accounts_cache = self.largest_accounts_cache.write().unwrap(); largest_accounts_cache.set_largest_accounts(filter, slot, accounts) } fn get_largest_accounts( &self, config: Option<RpcLargestAccountsConfig>, ) -> RpcResponse<Vec<RpcAccountBalance>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); if let Some((slot, accounts)) = self.get_cached_largest_accounts(&config.filter) { Response { context: RpcResponseContext { slot }, value: accounts, } } else { let (addresses, address_filter) = if let Some(filter) = config.clone().filter { let non_circulating_supply = calculate_non_circulating_supply(&bank); let addresses = non_circulating_supply.accounts.into_iter().collect(); let address_filter = match filter { RpcLargestAccountsFilter::Circulating => AccountAddressFilter::Exclude, RpcLargestAccountsFilter::NonCirculating => AccountAddressFilter::Include, }; (addresses, address_filter) } else { (HashSet::new(), AccountAddressFilter::Exclude) }; let accounts = bank .get_largest_accounts(NUM_LARGEST_ACCOUNTS, &addresses, address_filter) .into_iter() .map(|(address, lamports)| RpcAccountBalance { address: address.to_string(), lamports, }) .collect::<Vec<RpcAccountBalance>>(); self.set_cached_largest_accounts(&config.filter, bank.slot(), &accounts); new_response(&bank, accounts) } } fn get_supply(&self, commitment: Option<CommitmentConfig>) -> RpcResponse<RpcSupply> { let bank = self.bank(commitment); let non_circulating_supply = calculate_non_circulating_supply(&bank); let total_supply = bank.capitalization(); new_response( &bank, RpcSupply { total: total_supply, circulating: total_supply - non_circulating_supply.lamports, non_circulating: non_circulating_supply.lamports, non_circulating_accounts: non_circulating_supply .accounts .iter() .map(|pubkey| pubkey.to_string()) .collect(), }, ) } fn get_vote_accounts( &self, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus> { let config = config.unwrap_or_default(); let filter_by_vote_pubkey = if let Some(ref vote_pubkey) = config.vote_pubkey { Some(verify_pubkey(vote_pubkey)?) } else { None }; let bank = self.bank(config.commitment); let vote_accounts = bank.vote_accounts(); let epoch_vote_accounts = bank .epoch_vote_accounts(bank.get_epoch_and_slot_index(bank.slot()).0) .ok_or_else(Error::invalid_request)?; let default_vote_state = VoteState::default(); let (current_vote_accounts, delinquent_vote_accounts): ( Vec<RpcVoteAccountInfo>, Vec<RpcVoteAccountInfo>, ) = vote_accounts .iter() .filter_map(|(vote_pubkey, (activated_stake, account))| { if let Some(filter_by_vote_pubkey) = filter_by_vote_pubkey { if *vote_pubkey != filter_by_vote_pubkey { return None; } } let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().unwrap_or(&default_vote_state); let last_vote = if let Some(vote) = vote_state.votes.iter().last() { vote.slot } else { 0 }; let epoch_credits = vote_state.epoch_credits(); let epoch_credits = if epoch_credits.len() > MAX_RPC_EPOCH_CREDITS_HISTORY { epoch_credits .iter() .skip(epoch_credits.len() - MAX_RPC_EPOCH_CREDITS_HISTORY) .cloned() .collect() } else { epoch_credits.clone() }; Some(RpcVoteAccountInfo { vote_pubkey: vote_pubkey.to_string(), node_pubkey: vote_state.node_pubkey.to_string(), activated_stake: *activated_stake, commission: vote_state.commission, root_slot: vote_state.root_slot.unwrap_or(0), epoch_credits, epoch_vote_account: epoch_vote_accounts.contains_key(vote_pubkey), last_vote, }) }) .partition(|vote_account_info| { if bank.slot() >= DELINQUENT_VALIDATOR_SLOT_DISTANCE as u64 { vote_account_info.last_vote > bank.slot() - DELINQUENT_VALIDATOR_SLOT_DISTANCE as u64 } else { vote_account_info.last_vote > 0 } }); let delinquent_staked_vote_accounts = delinquent_vote_accounts .into_iter() .filter(|vote_account_info| vote_account_info.activated_stake > 0) .collect::<Vec<_>>(); Ok(RpcVoteAccountStatus { current: current_vote_accounts, delinquent: delinquent_staked_vote_accounts, }) } fn check_blockstore_root<T>( &self, result: &std::result::Result<T, BlockstoreError>, slot: Slot, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { let err = result.as_ref().unwrap_err(); debug!( "check_blockstore_root, slot: {:?}, max root: {:?}, err: {:?}", slot, self.blockstore.max_root(), err ); if slot >= self.blockstore.max_root() { return Err(RpcCustomError::BlockNotAvailable { slot }.into()); } if self.blockstore.is_skipped(slot) { return Err(RpcCustomError::SlotSkipped { slot }.into()); } } Ok(()) } fn check_slot_cleaned_up<T>( &self, result: &std::result::Result<T, BlockstoreError>, slot: Slot, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { if let BlockstoreError::SlotCleanedUp = result.as_ref().unwrap_err() { return Err(RpcCustomError::BlockCleanedUp { slot, first_available_block: self .blockstore .get_first_available_block() .unwrap_or_default(), } .into()); } } Ok(()) } fn check_bigtable_result<T>( &self, result: &std::result::Result<T, solana_storage_bigtable::Error>, ) -> Result<()> where T: std::fmt::Debug, { if result.is_err() { let err = result.as_ref().unwrap_err(); if let solana_storage_bigtable::Error::BlockNotFound(slot) = err { return Err(RpcCustomError::LongTermStorageSlotSkipped { slot: *slot }.into()); } } Ok(()) } pub fn get_block( &self, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>> { if self.config.enable_rpc_transaction_history { let config = config .map(|config| config.convert_to_current()) .unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); let transaction_details = config.transaction_details.unwrap_or_default(); let show_rewards = config.rewards.unwrap_or(true); let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; // Block is old enough to be finalized if slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { let result = self.blockstore.get_rooted_block(slot, true); self.check_blockstore_root(&result, slot)?; if result.is_err() { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = self .runtime .block_on(bigtable_ledger_storage.get_confirmed_block(slot)); self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result.ok().map(|confirmed_block| { confirmed_block.configure(encoding, transaction_details, show_rewards) })); } } self.check_slot_cleaned_up(&result, slot)?; return Ok(result.ok().map(|confirmed_block| { confirmed_block.configure(encoding, transaction_details, show_rewards) })); } else if commitment.is_confirmed() { // Check if block is confirmed let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); if confirmed_bank.status_cache_ancestors().contains(&slot) && slot <= self .max_complete_transaction_status_slot .load(Ordering::SeqCst) { let result = self.blockstore.get_complete_block(slot, true); return Ok(result.ok().map(|confirmed_block| { confirmed_block.configure(encoding, transaction_details, show_rewards) })); } } } Err(RpcCustomError::BlockNotAvailable { slot }.into()) } pub fn get_blocks( &self, start_slot: Slot, end_slot: Option<Slot>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); let end_slot = min( end_slot.unwrap_or_else(|| start_slot.saturating_add(MAX_GET_CONFIRMED_BLOCKS_RANGE)), if commitment.is_finalized() { highest_confirmed_root } else { self.bank(Some(CommitmentConfig::confirmed())).slot() }, ); if end_slot < start_slot { return Ok(vec![]); } if end_slot - start_slot > MAX_GET_CONFIRMED_BLOCKS_RANGE { return Err(Error::invalid_params(format!( "Slot range too large; max {}", MAX_GET_CONFIRMED_BLOCKS_RANGE ))); } let lowest_blockstore_slot = self.blockstore.lowest_slot(); if start_slot < lowest_blockstore_slot { // If the starting slot is lower than what's available in blockstore assume the entire // [start_slot..end_slot] can be fetched from BigTable. This range should not ever run // into unfinalized confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return self .runtime .block_on( bigtable_ledger_storage .get_confirmed_blocks(start_slot, (end_slot - start_slot) as usize + 1), // increment limit by 1 to ensure returned range is inclusive of both start_slot and end_slot ) .map(|mut bigtable_blocks| { bigtable_blocks.retain(|&slot| slot <= end_slot); bigtable_blocks }) .map_err(|_| { Error::invalid_params( "BigTable query failed (maybe timeout due to too large range?)" .to_string(), ) }); } } // Finalized blocks let mut blocks: Vec<_> = self .blockstore .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) .map_err(|_| Error::internal_error())? .filter(|&slot| slot <= end_slot && slot <= highest_confirmed_root) .collect(); let last_element = blocks.last().cloned().unwrap_or_default(); // Maybe add confirmed blocks if commitment.is_confirmed() && last_element < end_slot { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let mut confirmed_blocks = confirmed_bank .status_cache_ancestors() .into_iter() .filter(|&slot| slot <= end_slot && slot > last_element) .collect(); blocks.append(&mut confirmed_blocks); } Ok(blocks) } pub fn get_blocks_with_limit( &self, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if limit > MAX_GET_CONFIRMED_BLOCKS_RANGE as usize { return Err(Error::invalid_params(format!( "Limit too large; max {}", MAX_GET_CONFIRMED_BLOCKS_RANGE ))); } let lowest_blockstore_slot = self.blockstore.lowest_slot(); if start_slot < lowest_blockstore_slot { // If the starting slot is lower than what's available in blockstore assume the entire // range can be fetched from BigTable. This range should not ever run into unfinalized // confirmed blocks due to MAX_GET_CONFIRMED_BLOCKS_RANGE if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(self .runtime .block_on(bigtable_ledger_storage.get_confirmed_blocks(start_slot, limit)) .unwrap_or_default()); } } let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); // Finalized blocks let mut blocks: Vec<_> = self .blockstore .rooted_slot_iterator(max(start_slot, lowest_blockstore_slot)) .map_err(|_| Error::internal_error())? .take(limit) .filter(|&slot| slot <= highest_confirmed_root) .collect(); // Maybe add confirmed blocks if commitment.is_confirmed() && blocks.len() < limit { let last_element = blocks .last() .cloned() .unwrap_or_else(|| start_slot.saturating_sub(1)); let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let mut confirmed_blocks = confirmed_bank .status_cache_ancestors() .into_iter() .filter(|&slot| slot > last_element) .collect(); blocks.append(&mut confirmed_blocks); blocks.truncate(limit); } Ok(blocks) } pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> { if slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { let result = self.blockstore.get_block_time(slot); self.check_blockstore_root(&result, slot)?; if result.is_err() || matches!(result, Ok(None)) { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_result = self .runtime .block_on(bigtable_ledger_storage.get_confirmed_block(slot)); self.check_bigtable_result(&bigtable_result)?; return Ok(bigtable_result .ok() .and_then(|confirmed_block| confirmed_block.block_time)); } } self.check_slot_cleaned_up(&result, slot)?; Ok(result.ok().unwrap_or(None)) } else { let r_bank_forks = self.bank_forks.read().unwrap(); if let Some(bank) = r_bank_forks.get(slot) { Ok(Some(bank.clock().unix_timestamp)) } else { Err(RpcCustomError::BlockNotAvailable { slot }.into()) } } } pub fn get_signature_confirmation_status( &self, signature: Signature, commitment: Option<CommitmentConfig>, ) -> Option<RpcSignatureConfirmation> { let bank = self.bank(commitment); let transaction_status = self.get_transaction_status(signature, &bank)?; let confirmations = transaction_status .confirmations .unwrap_or(MAX_LOCKOUT_HISTORY + 1); Some(RpcSignatureConfirmation { confirmations, status: transaction_status.status, }) } pub fn get_signature_status( &self, signature: Signature, commitment: Option<CommitmentConfig>, ) -> Option<transaction::Result<()>> { let bank = self.bank(commitment); let (_, status) = bank.get_signature_status_slot(&signature)?; Some(status) } pub fn get_signature_statuses( &self, signatures: Vec<Signature>, config: Option<RpcSignatureStatusConfig>, ) -> Result<RpcResponse<Vec<Option<TransactionStatus>>>> { let mut statuses: Vec<Option<TransactionStatus>> = vec![]; let search_transaction_history = config .map(|x| x.search_transaction_history) .unwrap_or(false); let bank = self.bank(Some(CommitmentConfig::processed())); for signature in signatures { let status = if let Some(status) = self.get_transaction_status(signature, &bank) { Some(status) } else if self.config.enable_rpc_transaction_history && search_transaction_history { self.blockstore .get_rooted_transaction_status(signature) .map_err(|_| Error::internal_error())? .filter(|(slot, _status_meta)| { slot <= &self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() }) .map(|(slot, status_meta)| { let err = status_meta.status.clone().err(); TransactionStatus { slot, status: status_meta.status, confirmations: None, err, confirmation_status: Some(TransactionConfirmationStatus::Finalized), } }) .or_else(|| { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { self.runtime .block_on(bigtable_ledger_storage.get_signature_status(&signature)) .map(Some) .unwrap_or(None) } else { None } }) } else { None }; statuses.push(status); } Ok(new_response(&bank, statuses)) } fn get_transaction_status( &self, signature: Signature, bank: &Arc<Bank>, ) -> Option<TransactionStatus> { let (slot, status) = bank.get_signature_status_slot(&signature)?; let r_block_commitment_cache = self.block_commitment_cache.read().unwrap(); let optimistically_confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let optimistically_confirmed = optimistically_confirmed_bank.get_signature_status_slot(&signature); let confirmations = if r_block_commitment_cache.root() >= slot && is_finalized(&r_block_commitment_cache, bank, &self.blockstore, slot) { None } else { r_block_commitment_cache .get_confirmation_count(slot) .or(Some(0)) }; let err = status.clone().err(); Some(TransactionStatus { slot, status, confirmations, err, confirmation_status: if confirmations.is_none() { Some(TransactionConfirmationStatus::Finalized) } else if optimistically_confirmed.is_some() { Some(TransactionConfirmationStatus::Confirmed) } else { Some(TransactionConfirmationStatus::Processed) }, }) } pub fn get_transaction( &self, signature: Signature, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>> { let config = config .map(|config| config.convert_to_current()) .unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Json); let commitment = config.commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if self.config.enable_rpc_transaction_history { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); let transaction = if commitment.is_confirmed() { let highest_confirmed_slot = confirmed_bank.slot(); self.blockstore .get_complete_transaction(signature, highest_confirmed_slot) } else { self.blockstore.get_rooted_transaction(signature) }; match transaction.unwrap_or(None) { Some(mut confirmed_transaction) => { if commitment.is_confirmed() && confirmed_bank // should be redundant .status_cache_ancestors() .contains(&confirmed_transaction.slot) { if confirmed_transaction.block_time.is_none() { let r_bank_forks = self.bank_forks.read().unwrap(); confirmed_transaction.block_time = r_bank_forks .get(confirmed_transaction.slot) .map(|bank| bank.clock().unix_timestamp); } return Ok(Some(confirmed_transaction.encode(encoding))); } if confirmed_transaction.slot <= self .block_commitment_cache .read() .unwrap() .highest_confirmed_root() { return Ok(Some(confirmed_transaction.encode(encoding))); } } None => { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { return Ok(self .runtime .block_on(bigtable_ledger_storage.get_confirmed_transaction(&signature)) .unwrap_or(None) .map(|confirmed| confirmed.encode(encoding))); } } } } Ok(None) } pub fn get_confirmed_signatures_for_address( &self, pubkey: Pubkey, start_slot: Slot, end_slot: Slot, ) -> Vec<Signature> { if self.config.enable_rpc_transaction_history { // TODO: Add bigtable_ledger_storage support as a part of // https://github.com/solana-labs/solana/pull/10928 let end_slot = min( end_slot, self.block_commitment_cache .read() .unwrap() .highest_confirmed_root(), ); self.blockstore .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) .unwrap_or_default() } else { vec![] } } pub fn get_signatures_for_address( &self, address: Pubkey, mut before: Option<Signature>, until: Option<Signature>, mut limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> { let commitment = commitment.unwrap_or_default(); check_is_at_least_confirmed(commitment)?; if self.config.enable_rpc_transaction_history { let highest_confirmed_root = self .block_commitment_cache .read() .unwrap() .highest_confirmed_root(); let highest_slot = if commitment.is_confirmed() { let confirmed_bank = self.bank(Some(CommitmentConfig::confirmed())); confirmed_bank.slot() } else { highest_confirmed_root }; let mut results = self .blockstore .get_confirmed_signatures_for_address2(address, highest_slot, before, until, limit) .map_err(|err| Error::invalid_params(format!("{}", err)))?; if results.len() < limit { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { if !results.is_empty() { limit -= results.len(); before = results.last().map(|x| x.signature); } let bigtable_results = self.runtime.block_on( bigtable_ledger_storage.get_confirmed_signatures_for_address( &address, before.as_ref(), until.as_ref(), limit, ), ); match bigtable_results { Ok(bigtable_results) => { results.extend(bigtable_results.into_iter().map(|x| x.0)); } Err(err) => { warn!("{:?}", err); } } } } Ok(results .into_iter() .map(|x| { let mut item: RpcConfirmedTransactionStatusWithSignature = x.into(); if item.slot <= highest_confirmed_root { item.confirmation_status = Some(TransactionConfirmationStatus::Finalized); } else { item.confirmation_status = Some(TransactionConfirmationStatus::Confirmed); if item.block_time.is_none() { let r_bank_forks = self.bank_forks.read().unwrap(); item.block_time = r_bank_forks .get(item.slot) .map(|bank| bank.clock().unix_timestamp); } } item }) .collect()) } else { Ok(vec![]) } } pub fn get_first_available_block(&self) -> Slot { let slot = self .blockstore .get_first_available_block() .unwrap_or_default(); if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { let bigtable_slot = self .runtime .block_on(bigtable_ledger_storage.get_first_available_block()) .unwrap_or(None) .unwrap_or(slot); if bigtable_slot < slot { return bigtable_slot; } } slot } pub fn get_stake_activation( &self, pubkey: &Pubkey, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let epoch = config.epoch.unwrap_or_else(|| bank.epoch()); if bank.epoch().saturating_sub(epoch) > solana_sdk::stake_history::MAX_ENTRIES as u64 { return Err(Error::invalid_params(format!( "Invalid param: epoch {:?} is too far in the past", epoch ))); } if epoch > bank.epoch() { return Err(Error::invalid_params(format!( "Invalid param: epoch {:?} has not yet started", epoch ))); } let stake_account = bank .get_account(pubkey) .ok_or_else(|| Error::invalid_params("Invalid param: account not found".to_string()))?; let stake_state: StakeState = stake_account .state() .map_err(|_| Error::invalid_params("Invalid param: not a stake account".to_string()))?; let delegation = stake_state.delegation(); if delegation.is_none() { match stake_state.meta() { None => { return Err(Error::invalid_params( "Invalid param: stake account not initialized".to_string(), )); } Some(meta) => { let rent_exempt_reserve = meta.rent_exempt_reserve; return Ok(RpcStakeActivation { state: StakeActivationState::Inactive, active: 0, inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve), }); } } } let delegation = delegation.unwrap(); let stake_history_account = bank .get_account(&stake_history::id()) .ok_or_else(Error::internal_error)?; let stake_history = solana_sdk::account::from_account::<StakeHistory, _>(&stake_history_account) .ok_or_else(Error::internal_error)?; let (active, activating, deactivating) = delegation.stake_activating_and_deactivating( epoch, Some(&stake_history), bank.stake_program_v2_enabled(), ); let stake_activation_state = if deactivating > 0 { StakeActivationState::Deactivating } else if activating > 0 { StakeActivationState::Activating } else if active > 0 { StakeActivationState::Active } else { StakeActivationState::Inactive }; let inactive_stake = match stake_activation_state { StakeActivationState::Activating => activating, StakeActivationState::Active => 0, StakeActivationState::Deactivating => delegation.stake.saturating_sub(active), StakeActivationState::Inactive => delegation.stake, }; Ok(RpcStakeActivation { state: stake_activation_state, active, inactive: inactive_stake, }) } pub fn get_token_account_balance( &self, pubkey: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { let bank = self.bank(commitment); let account = bank.get_account(pubkey).ok_or_else(|| { Error::invalid_params("Invalid param: could not find account".to_string()) })?; if account.owner() != &spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token account".to_string(), )); } let token_account = TokenAccount::unpack(&account.data()).map_err(|_| { Error::invalid_params("Invalid param: not a v2.0 Token account".to_string()) })?; let mint = &Pubkey::from_str(&token_account.mint.to_string()) .expect("Token account mint should be convertible to Pubkey"); let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint)?; let balance = token_amount_to_ui_amount(token_account.amount, decimals); Ok(new_response(&bank, balance)) } pub fn get_token_supply( &self, mint: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { let bank = self.bank(commitment); let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find account".to_string()) })?; if mint_account.owner() != &spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token mint".to_string(), )); } let mint = Mint::unpack(&mint_account.data()).map_err(|_| { Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) })?; let supply = token_amount_to_ui_amount(mint.supply, mint.decimals); Ok(new_response(&bank, supply)) } pub fn get_token_largest_accounts( &self, mint: &Pubkey, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>> { let bank = self.bank(commitment); let (mint_owner, decimals) = get_mint_owner_and_decimals(&bank, mint)?; if mint_owner != spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token mint".to_string(), )); } let mut token_balances: Vec<RpcTokenAccountBalance> = self .get_filtered_spl_token_accounts_by_mint(&bank, &mint, vec![]) .into_iter() .map(|(address, account)| { let amount = TokenAccount::unpack(&account.data()) .map(|account| account.amount) .unwrap_or(0); let amount = token_amount_to_ui_amount(amount, decimals); RpcTokenAccountBalance { address: address.to_string(), amount, } }) .collect(); token_balances.sort_by(|a, b| { a.amount .amount .parse::<u64>() .unwrap() .cmp(&b.amount.amount.parse::<u64>().unwrap()) .reverse() }); token_balances.truncate(NUM_LARGEST_ACCOUNTS); Ok(new_response(&bank, token_balances)) } pub fn get_token_accounts_by_owner( &self, owner: &Pubkey, token_account_filter: TokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let (_, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; let mut filters = vec![]; if let Some(mint) = mint { // Optional filter on Mint address filters.push(RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary(mint.to_string()), encoding: None, })); } let keyed_accounts = self.get_filtered_spl_token_accounts_by_owner(&bank, owner, filters); let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, account, encoding.clone(), None, data_slice_config, ), }) .collect() }; Ok(new_response(&bank, accounts)) } pub fn get_token_accounts_by_delegate( &self, delegate: &Pubkey, token_account_filter: TokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { let config = config.unwrap_or_default(); let bank = self.bank(config.commitment); let encoding = config.encoding.unwrap_or(UiAccountEncoding::Binary); let data_slice_config = config.data_slice; check_slice_and_encoding(&encoding, data_slice_config.is_some())?; let (token_program_id, mint) = get_token_program_id_and_mint(&bank, token_account_filter)?; let mut filters = vec![ // Filter on Delegate is_some() RpcFilterType::Memcmp(Memcmp { offset: 72, bytes: MemcmpEncodedBytes::Binary( bs58::encode(bincode::serialize(&1u32).unwrap()).into_string(), ), encoding: None, }), // Filter on Delegate address RpcFilterType::Memcmp(Memcmp { offset: 76, bytes: MemcmpEncodedBytes::Binary(delegate.to_string()), encoding: None, }), ]; // Optional filter on Mint address, uses mint account index for scan let keyed_accounts = if let Some(mint) = mint { self.get_filtered_spl_token_accounts_by_mint(&bank, &mint, filters) } else { // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); self.get_filtered_program_accounts(&bank, &token_program_id, filters) }; let accounts = if encoding == UiAccountEncoding::JsonParsed { get_parsed_token_accounts(bank.clone(), keyed_accounts.into_iter()).collect() } else { keyed_accounts .into_iter() .map(|(pubkey, account)| RpcKeyedAccount { pubkey: pubkey.to_string(), account: UiAccount::encode( &pubkey, account, encoding.clone(), None, data_slice_config, ), }) .collect() }; Ok(new_response(&bank, accounts)) } /// Use a set of filters to get an iterator of keyed program accounts from a bank fn get_filtered_program_accounts( &self, bank: &Arc<Bank>, program_id: &Pubkey, filters: Vec<RpcFilterType>, ) -> Vec<(Pubkey, AccountSharedData)> { let filter_closure = |account: &AccountSharedData| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), }) }; if self .config .account_indexes .contains(&AccountIndex::ProgramId) { bank.get_filtered_indexed_accounts(&IndexKey::ProgramId(*program_id), |account| { // The program-id account index checks for Account owner on inclusion. However, due // to the current AccountsDb implementation, an account may remain in storage as a // zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these // accounts. account.owner() == program_id && filter_closure(account) }) } else { bank.get_filtered_program_accounts(program_id, filter_closure) } } /// Get an iterator of spl-token accounts by owner address fn get_filtered_spl_token_accounts_by_owner( &self, bank: &Arc<Bank>, owner_key: &Pubkey, mut filters: Vec<RpcFilterType>, ) -> Vec<(Pubkey, AccountSharedData)> { // The by-owner accounts index checks for Token Account state and Owner address on // inclusion. However, due to the current AccountsDb implementation, an account may remain // in storage as a zero-lamport AccountSharedData::Default() after being wiped and reinitialized in // later updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); // Filter on Owner address filters.push(RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_OWNER_OFFSET, bytes: MemcmpEncodedBytes::Binary(owner_key.to_string()), encoding: None, })); if self .config .account_indexes .contains(&AccountIndex::SplTokenOwner) { bank.get_filtered_indexed_accounts(&IndexKey::SplTokenOwner(*owner_key), |account| { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), }) }) } else { self.get_filtered_program_accounts(bank, &spl_token_id_v2_0(), filters) } } /// Get an iterator of spl-token accounts by mint address fn get_filtered_spl_token_accounts_by_mint( &self, bank: &Arc<Bank>, mint_key: &Pubkey, mut filters: Vec<RpcFilterType>, ) -> Vec<(Pubkey, AccountSharedData)> { // The by-mint accounts index checks for Token Account state and Mint address on inclusion. // However, due to the current AccountsDb implementation, an account may remain in storage // as be zero-lamport AccountSharedData::Default() after being wiped and reinitialized in later // updates. We include the redundant filters here to avoid returning these accounts. // // Filter on Token Account state filters.push(RpcFilterType::DataSize( TokenAccount::get_packed_len() as u64 )); // Filter on Mint address filters.push(RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_MINT_OFFSET, bytes: MemcmpEncodedBytes::Binary(mint_key.to_string()), encoding: None, })); if self .config .account_indexes .contains(&AccountIndex::SplTokenMint) { bank.get_filtered_indexed_accounts(&IndexKey::SplTokenMint(*mint_key), |account| { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), }) }) } else { self.get_filtered_program_accounts(bank, &spl_token_id_v2_0(), filters) } } } fn verify_transaction(transaction: &Transaction) -> Result<()> { if transaction.verify().is_err() { return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); } if let Err(e) = transaction.verify_precompiles() { return Err(RpcCustomError::TransactionPrecompileVerificationFailure(e).into()); } Ok(()) } fn verify_filter(input: &RpcFilterType) -> Result<()> { input .verify() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_pubkey(input: &str) -> Result<Pubkey> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_hash(input: &str) -> Result<Hash> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_signature(input: &str) -> Result<Signature> { input .parse() .map_err(|e| Error::invalid_params(format!("Invalid param: {:?}", e))) } fn verify_token_account_filter( token_account_filter: RpcTokenAccountsFilter, ) -> Result<TokenAccountsFilter> { match token_account_filter { RpcTokenAccountsFilter::Mint(mint_str) => { let mint = verify_pubkey(&mint_str)?; Ok(TokenAccountsFilter::Mint(mint)) } RpcTokenAccountsFilter::ProgramId(program_id_str) => { let program_id = verify_pubkey(&program_id_str)?; Ok(TokenAccountsFilter::ProgramId(program_id)) } } } fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> { if !commitment.is_at_least_confirmed() { return Err(Error::invalid_params( "Method does not support commitment below `confirmed`", )); } Ok(()) } fn check_slice_and_encoding(encoding: &UiAccountEncoding, data_slice_is_some: bool) -> Result<()> { match encoding { UiAccountEncoding::JsonParsed => { if data_slice_is_some { let message = "Sliced account data can only be encoded using binary (base 58) or base64 encoding." .to_string(); Err(error::Error { code: error::ErrorCode::InvalidRequest, message, data: None, }) } else { Ok(()) } } UiAccountEncoding::Binary | UiAccountEncoding::Base58 | UiAccountEncoding::Base64 | UiAccountEncoding::Base64Zstd => Ok(()), } } fn get_encoded_account( bank: &Arc<Bank>, pubkey: &Pubkey, encoding: UiAccountEncoding, data_slice: Option<UiDataSliceConfig>, ) -> Result<Option<UiAccount>> { let mut response = None; if let Some(account) = bank.get_account(pubkey) { if account.owner() == &spl_token_id_v2_0() && encoding == UiAccountEncoding::JsonParsed { response = Some(get_parsed_token_account(bank.clone(), pubkey, account)); } else if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) && account.data().len() > 128 { let message = "Encoded binary (base 58) data should be less than 128 bytes, please use Base64 encoding.".to_string(); return Err(error::Error { code: error::ErrorCode::InvalidRequest, message, data: None, }); } else { response = Some(UiAccount::encode( pubkey, account, encoding, None, data_slice, )); } } Ok(response) } fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option<Pubkey> { if program_id != &spl_token_id_v2_0() { return None; } let mut data_size_filter: Option<u64> = None; let mut owner_key: Option<Pubkey> = None; for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_OWNER_OFFSET, bytes: MemcmpEncodedBytes::Binary(bytes), .. }) => { if let Ok(key) = Pubkey::from_str(bytes) { owner_key = Some(key) } } _ => {} } } if data_size_filter == Some(TokenAccount::get_packed_len() as u64) { owner_key } else { None } } fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option<Pubkey> { if program_id != &spl_token_id_v2_0() { return None; } let mut data_size_filter: Option<u64> = None; let mut mint: Option<Pubkey> = None; for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), RpcFilterType::Memcmp(Memcmp { offset: SPL_TOKEN_ACCOUNT_MINT_OFFSET, bytes: MemcmpEncodedBytes::Binary(bytes), .. }) => { if let Ok(key) = Pubkey::from_str(bytes) { mint = Some(key) } } _ => {} } } if data_size_filter == Some(TokenAccount::get_packed_len() as u64) { mint } else { None } } pub(crate) fn get_parsed_token_account( bank: Arc<Bank>, pubkey: &Pubkey, account: AccountSharedData, ) -> UiAccount { let additional_data = get_token_account_mint(&account.data()) .and_then(|mint_pubkey| get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()) .map(|(_, decimals)| AccountAdditionalData { spl_token_decimals: Some(decimals), }); UiAccount::encode( pubkey, account, UiAccountEncoding::JsonParsed, additional_data, None, ) } pub(crate) fn get_parsed_token_accounts<I>( bank: Arc<Bank>, keyed_accounts: I, ) -> impl Iterator<Item = RpcKeyedAccount> where I: Iterator<Item = (Pubkey, AccountSharedData)>, { let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new(); keyed_accounts.filter_map(move |(pubkey, account)| { let additional_data = get_token_account_mint(&account.data()).map(|mint_pubkey| { let spl_token_decimals = mint_decimals.get(&mint_pubkey).cloned().or_else(|| { let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()?; mint_decimals.insert(mint_pubkey, decimals); Some(decimals) }); AccountAdditionalData { spl_token_decimals } }); let maybe_encoded_account = UiAccount::encode( &pubkey, account, UiAccountEncoding::JsonParsed, additional_data, None, ); if let UiAccountData::Json(_) = &maybe_encoded_account.data { Some(RpcKeyedAccount { pubkey: pubkey.to_string(), account: maybe_encoded_account, }) } else { None } }) } /// Analyze a passed Pubkey that may be a Token program id or Mint address to determine the program /// id and optional Mint fn get_token_program_id_and_mint( bank: &Arc<Bank>, token_account_filter: TokenAccountsFilter, ) -> Result<(Pubkey, Option<Pubkey>)> { match token_account_filter { TokenAccountsFilter::Mint(mint) => { let (mint_owner, _) = get_mint_owner_and_decimals(&bank, &mint)?; if mint_owner != spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token mint".to_string(), )); } Ok((mint_owner, Some(mint))) } TokenAccountsFilter::ProgramId(program_id) => { if program_id == spl_token_id_v2_0() { Ok((program_id, None)) } else { Err(Error::invalid_params( "Invalid param: unrecognized Token program id".to_string(), )) } } } } /// Analyze a mint Pubkey that may be the native_mint and get the mint-account owner (token /// program_id) and decimals fn get_mint_owner_and_decimals(bank: &Arc<Bank>, mint: &Pubkey) -> Result<(Pubkey, u8)> { if mint == &spl_token_v2_0_native_mint() { Ok((spl_token_id_v2_0(), spl_token_v2_0::native_mint::DECIMALS)) } else { let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find mint".to_string()) })?; let decimals = get_mint_decimals(&mint_account.data())?; Ok((*mint_account.owner(), decimals)) } } fn get_mint_decimals(data: &[u8]) -> Result<u8> { Mint::unpack(data) .map_err(|_| { Error::invalid_params("Invalid param: Token mint could not be unpacked".to_string()) }) .map(|mint| mint.decimals) } fn _send_transaction( meta: JsonRpcRequestProcessor, transaction: Transaction, wire_transaction: Vec<u8>, last_valid_slot: Slot, durable_nonce_info: Option<(Pubkey, Hash)>, ) -> Result<String> { if transaction.signatures.is_empty() { return Err(RpcCustomError::TransactionSignatureVerificationFailure.into()); } let signature = transaction.signatures[0]; let transaction_info = TransactionInfo::new( signature, wire_transaction, last_valid_slot, durable_nonce_info, ); meta.transaction_sender .lock() .unwrap() .send(transaction_info) .unwrap_or_else(|err| warn!("Failed to enqueue transaction: {}", err)); Ok(signature.to_string()) } // Minimal RPC interface that trusted validators are expected to provide pub mod rpc_minimal { use super::*; #[rpc] pub trait Minimal { type Metadata; #[rpc(meta, name = "getBalance")] fn get_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<u64>>; #[rpc(meta, name = "getEpochInfo")] fn get_epoch_info( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<EpochInfo>; #[rpc(meta, name = "getHealth")] fn get_health(&self, meta: Self::Metadata) -> Result<String>; #[rpc(meta, name = "getIdentity")] fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity>; #[rpc(meta, name = "getSlot")] fn get_slot( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<Slot>; #[rpc(meta, name = "getSnapshotSlot")] fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getTransactionCount")] fn get_transaction_count( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64>; #[rpc(meta, name = "getVersion")] fn get_version(&self, meta: Self::Metadata) -> Result<RpcVersionInfo>; // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getVoteAccounts")] fn get_vote_accounts( &self, meta: Self::Metadata, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus>; // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal #[rpc(meta, name = "getLeaderSchedule")] fn get_leader_schedule( &self, meta: Self::Metadata, options: Option<RpcLeaderScheduleConfigWrapper>, config: Option<RpcLeaderScheduleConfig>, ) -> Result<Option<RpcLeaderSchedule>>; } pub struct MinimalImpl; impl Minimal for MinimalImpl { type Metadata = JsonRpcRequestProcessor; fn get_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<u64>> { debug!("get_balance rpc request received: {:?}", pubkey_str); let pubkey = verify_pubkey(&pubkey_str)?; Ok(meta.get_balance(&pubkey, commitment)) } fn get_epoch_info( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<EpochInfo> { debug!("get_epoch_info rpc request received"); let bank = meta.bank(commitment); Ok(bank.get_epoch_info()) } fn get_health(&self, meta: Self::Metadata) -> Result<String> { match meta.health.check() { RpcHealthStatus::Ok => Ok("ok".to_string()), RpcHealthStatus::Unknown => Err(RpcCustomError::NodeUnhealthy { num_slots_behind: None, } .into()), RpcHealthStatus::Behind { num_slots } => Err(RpcCustomError::NodeUnhealthy { num_slots_behind: Some(num_slots), } .into()), } } fn get_identity(&self, meta: Self::Metadata) -> Result<RpcIdentity> { debug!("get_identity rpc request received"); Ok(RpcIdentity { identity: meta.config.identity_pubkey.to_string(), }) } fn get_slot( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<Slot> { debug!("get_slot rpc request received"); Ok(meta.get_slot(commitment)) } fn get_snapshot_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_snapshot_slot rpc request received"); meta.snapshot_config .and_then(|snapshot_config| { get_highest_snapshot_archive_path(&snapshot_config.snapshot_package_output_path) .map(|(_, (slot, _, _))| slot) }) .ok_or_else(|| RpcCustomError::NoSnapshot.into()) } fn get_transaction_count( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!("get_transaction_count rpc request received"); Ok(meta.get_transaction_count(commitment)) } fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> { debug!("get_version rpc request received"); let version = solana_version::Version::default(); Ok(RpcVersionInfo { solana_core: version.to_string(), feature_set: Some(version.feature_set), }) } // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_vote_accounts( &self, meta: Self::Metadata, config: Option<RpcGetVoteAccountsConfig>, ) -> Result<RpcVoteAccountStatus> { debug!("get_vote_accounts rpc request received"); meta.get_vote_accounts(config) } // TODO: Refactor `solana-validator wait-for-restart-window` to not require this method, so // it can be removed from rpc_minimal fn get_leader_schedule( &self, meta: Self::Metadata, options: Option<RpcLeaderScheduleConfigWrapper>, config: Option<RpcLeaderScheduleConfig>, ) -> Result<Option<RpcLeaderSchedule>> { let (slot, maybe_config) = options.map(|options| options.unzip()).unwrap_or_default(); let config = maybe_config.or(config).unwrap_or_default(); if let Some(ref identity) = config.identity { let _ = verify_pubkey(identity)?; } let bank = meta.bank(config.commitment); let slot = slot.unwrap_or_else(|| bank.slot()); let epoch = bank.epoch_schedule().get_epoch(slot); debug!("get_leader_schedule rpc request received: {:?}", slot); Ok(meta .leader_schedule_cache .get_epoch_leader_schedule(epoch) .map(|leader_schedule| { let mut schedule_by_identity = solana_ledger::leader_schedule_utils::leader_schedule_by_identity( leader_schedule.get_slot_leaders().iter().enumerate(), ); if let Some(identity) = config.identity { schedule_by_identity.retain(|k, _| *k == identity); } schedule_by_identity })) } } } // Full RPC interface that an API node is expected to provide // (rpc_minimal should also be provided by an API node) pub mod rpc_full { use super::*; #[rpc] pub trait Full { type Metadata; #[rpc(meta, name = "getAccountInfo")] fn get_account_info( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>>; #[rpc(meta, name = "getMultipleAccounts")] fn get_multiple_accounts( &self, meta: Self::Metadata, pubkey_strs: Vec<String>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>>; #[rpc(meta, name = "getProgramAccounts")] fn get_program_accounts( &self, meta: Self::Metadata, program_id_str: String, config: Option<RpcProgramAccountsConfig>, ) -> Result<Vec<RpcKeyedAccount>>; #[rpc(meta, name = "getMinimumBalanceForRentExemption")] fn get_minimum_balance_for_rent_exemption( &self, meta: Self::Metadata, data_len: usize, commitment: Option<CommitmentConfig>, ) -> Result<u64>; #[rpc(meta, name = "getInflationReward")] fn get_inflation_reward( &self, meta: Self::Metadata, address_strs: Vec<String>, config: Option<RpcEpochConfig>, ) -> Result<Vec<Option<RpcInflationReward>>>; #[rpc(meta, name = "getInflationGovernor")] fn get_inflation_governor( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcInflationGovernor>; #[rpc(meta, name = "getInflationRate")] fn get_inflation_rate(&self, meta: Self::Metadata) -> Result<RpcInflationRate>; #[rpc(meta, name = "getEpochSchedule")] fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result<EpochSchedule>; #[rpc(meta, name = "getClusterNodes")] fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>>; #[rpc(meta, name = "getRecentPerformanceSamples")] fn get_recent_performance_samples( &self, meta: Self::Metadata, limit: Option<usize>, ) -> Result<Vec<RpcPerfSample>>; #[rpc(meta, name = "getBlockCommitment")] fn get_block_commitment( &self, meta: Self::Metadata, block: Slot, ) -> Result<RpcBlockCommitment<BlockCommitmentArray>>; #[rpc(meta, name = "getGenesisHash")] fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>; #[rpc(meta, name = "getRecentBlockhash")] fn get_recent_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhashFeeCalculator>>; #[rpc(meta, name = "getFees")] fn get_fees( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcFees>>; #[rpc(meta, name = "getFeeCalculatorForBlockhash")] fn get_fee_calculator_for_blockhash( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<RpcFeeCalculator>>>; #[rpc(meta, name = "getFeeRateGovernor")] fn get_fee_rate_governor( &self, meta: Self::Metadata, ) -> Result<RpcResponse<RpcFeeRateGovernor>>; #[rpc(meta, name = "getSignatureStatuses")] fn get_signature_statuses( &self, meta: Self::Metadata, signature_strs: Vec<String>, config: Option<RpcSignatureStatusConfig>, ) -> Result<RpcResponse<Vec<Option<TransactionStatus>>>>; #[rpc(meta, name = "getMaxRetransmitSlot")] fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getMaxShredInsertSlot")] fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getLargestAccounts")] fn get_largest_accounts( &self, meta: Self::Metadata, config: Option<RpcLargestAccountsConfig>, ) -> Result<RpcResponse<Vec<RpcAccountBalance>>>; #[rpc(meta, name = "getSupply")] fn get_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcSupply>>; #[rpc(meta, name = "requestAirdrop")] fn request_airdrop( &self, meta: Self::Metadata, pubkey_str: String, lamports: u64, config: Option<RpcRequestAirdropConfig>, ) -> Result<String>; #[rpc(meta, name = "sendTransaction")] fn send_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSendTransactionConfig>, ) -> Result<String>; #[rpc(meta, name = "simulateTransaction")] fn simulate_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSimulateTransactionConfig>, ) -> Result<RpcResponse<RpcSimulateTransactionResult>>; #[rpc(meta, name = "getSlotLeader")] fn get_slot_leader( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<String>; #[rpc(meta, name = "getSlotLeaders")] fn get_slot_leaders( &self, meta: Self::Metadata, start_slot: Slot, limit: u64, ) -> Result<Vec<String>>; #[rpc(meta, name = "minimumLedgerSlot")] fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getBlock")] fn get_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>>; #[rpc(meta, name = "getBlockTime")] fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>>; #[rpc(meta, name = "getBlocks")] fn get_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>>; #[rpc(meta, name = "getBlocksWithLimit")] fn get_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>>; #[rpc(meta, name = "getTransaction")] fn get_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>>; #[rpc(meta, name = "getSignaturesForAddress")] fn get_signatures_for_address( &self, meta: Self::Metadata, address: String, config: Option<RpcSignaturesForAddressConfig>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>>; #[rpc(meta, name = "getFirstAvailableBlock")] fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot>; #[rpc(meta, name = "getStakeActivation")] fn get_stake_activation( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation>; // SPL Token-specific RPC endpoints // See https://github.com/solana-labs/solana-program-library/releases/tag/token-v2.0.0 for // program details #[rpc(meta, name = "getTokenAccountBalance")] fn get_token_account_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>>; #[rpc(meta, name = "getTokenSupply")] fn get_token_supply( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>>; #[rpc(meta, name = "getTokenLargestAccounts")] fn get_token_largest_accounts( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>>; #[rpc(meta, name = "getTokenAccountsByOwner")] fn get_token_accounts_by_owner( &self, meta: Self::Metadata, owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>>; #[rpc(meta, name = "getTokenAccountsByDelegate")] fn get_token_accounts_by_delegate( &self, meta: Self::Metadata, delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>>; } pub struct FullImpl; impl Full for FullImpl { type Metadata = JsonRpcRequestProcessor; fn get_account_info( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Option<UiAccount>>> { debug!("get_account_info rpc request received: {:?}", pubkey_str); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_account_info(&pubkey, config) } fn get_multiple_accounts( &self, meta: Self::Metadata, pubkey_strs: Vec<String>, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<Option<UiAccount>>>> { debug!( "get_multiple_accounts rpc request received: {:?}", pubkey_strs.len() ); let max_multiple_accounts = meta .config .max_multiple_accounts .unwrap_or(MAX_MULTIPLE_ACCOUNTS); if pubkey_strs.len() > max_multiple_accounts { return Err(Error::invalid_params(format!( "Too many inputs provided; max {}", max_multiple_accounts ))); } let mut pubkeys: Vec<Pubkey> = vec![]; for pubkey_str in pubkey_strs { pubkeys.push(verify_pubkey(&pubkey_str)?); } meta.get_multiple_accounts(pubkeys, config) } fn get_minimum_balance_for_rent_exemption( &self, meta: Self::Metadata, data_len: usize, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!( "get_minimum_balance_for_rent_exemption rpc request received: {:?}", data_len ); if data_len as u64 > system_instruction::MAX_PERMITTED_DATA_LENGTH { return Err(Error::invalid_request()); } Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) } fn get_program_accounts( &self, meta: Self::Metadata, program_id_str: String, config: Option<RpcProgramAccountsConfig>, ) -> Result<Vec<RpcKeyedAccount>> { debug!( "get_program_accounts rpc request received: {:?}", program_id_str ); let program_id = verify_pubkey(&program_id_str)?; let (config, filters) = if let Some(config) = config { ( Some(config.account_config), config.filters.unwrap_or_default(), ) } else { (None, vec![]) }; if filters.len() > MAX_GET_PROGRAM_ACCOUNT_FILTERS { return Err(Error::invalid_params(format!( "Too many filters provided; max {}", MAX_GET_PROGRAM_ACCOUNT_FILTERS ))); } for filter in &filters { verify_filter(filter)?; } meta.get_program_accounts(&program_id, config, filters) } fn get_inflation_governor( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcInflationGovernor> { debug!("get_inflation_governor rpc request received"); Ok(meta.get_inflation_governor(commitment)) } fn get_inflation_rate(&self, meta: Self::Metadata) -> Result<RpcInflationRate> { debug!("get_inflation_rate rpc request received"); Ok(meta.get_inflation_rate()) } fn get_epoch_schedule(&self, meta: Self::Metadata) -> Result<EpochSchedule>
fn get_recent_performance_samples( &self, meta: Self::Metadata, limit: Option<usize>, ) -> Result<Vec<RpcPerfSample>> { debug!("get_recent_performance_samples request received"); let limit = limit.unwrap_or(PERFORMANCE_SAMPLES_LIMIT); if limit > PERFORMANCE_SAMPLES_LIMIT { return Err(Error::invalid_params(format!( "Invalid limit; max {}", PERFORMANCE_SAMPLES_LIMIT ))); } Ok(meta .blockstore .get_recent_perf_samples(limit) .map_err(|err| { warn!("get_recent_performance_samples failed: {:?}", err); Error::invalid_request() })? .iter() .map(|(slot, sample)| RpcPerfSample { slot: *slot, num_transactions: sample.num_transactions, num_slots: sample.num_slots, sample_period_secs: sample.sample_period_secs, }) .collect()) } fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> { debug!("get_cluster_nodes rpc request received"); let cluster_info = &meta.cluster_info; fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> { if ContactInfo::is_valid_address(addr) { Some(*addr) } else { None } } let my_shred_version = cluster_info.my_shred_version(); Ok(cluster_info .all_peers() .iter() .filter_map(|(contact_info, _)| { if my_shred_version == contact_info.shred_version && ContactInfo::is_valid_address(&contact_info.gossip) { let (version, feature_set) = if let Some(version) = cluster_info.get_node_version(&contact_info.id) { (Some(version.to_string()), Some(version.feature_set)) } else { (None, None) }; Some(RpcContactInfo { pubkey: contact_info.id.to_string(), gossip: Some(contact_info.gossip), tpu: valid_address_or_none(&contact_info.tpu), rpc: valid_address_or_none(&contact_info.rpc), version, feature_set, }) } else { None // Exclude spy nodes } }) .collect()) } fn get_block_commitment( &self, meta: Self::Metadata, block: Slot, ) -> Result<RpcBlockCommitment<BlockCommitmentArray>> { debug!("get_block_commitment rpc request received"); Ok(meta.get_block_commitment(block)) } fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String> { debug!("get_genesis_hash rpc request received"); Ok(meta.genesis_hash.to_string()) } fn get_recent_blockhash( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcBlockhashFeeCalculator>> { debug!("get_recent_blockhash rpc request received"); Ok(meta.get_recent_blockhash(commitment)) } fn get_fees( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcFees>> { debug!("get_fees rpc request received"); Ok(meta.get_fees(commitment)) } fn get_fee_calculator_for_blockhash( &self, meta: Self::Metadata, blockhash: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Option<RpcFeeCalculator>>> { debug!("get_fee_calculator_for_blockhash rpc request received"); let blockhash = Hash::from_str(&blockhash) .map_err(|e| Error::invalid_params(format!("{:?}", e)))?; Ok(meta.get_fee_calculator_for_blockhash(&blockhash, commitment)) } fn get_fee_rate_governor( &self, meta: Self::Metadata, ) -> Result<RpcResponse<RpcFeeRateGovernor>> { debug!("get_fee_rate_governor rpc request received"); Ok(meta.get_fee_rate_governor()) } fn get_signature_statuses( &self, meta: Self::Metadata, signature_strs: Vec<String>, config: Option<RpcSignatureStatusConfig>, ) -> Result<RpcResponse<Vec<Option<TransactionStatus>>>> { debug!( "get_signature_statuses rpc request received: {:?}", signature_strs.len() ); if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS { return Err(Error::invalid_params(format!( "Too many inputs provided; max {}", MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS ))); } let mut signatures: Vec<Signature> = vec![]; for signature_str in signature_strs { signatures.push(verify_signature(&signature_str)?); } meta.get_signature_statuses(signatures, config) } fn get_max_retransmit_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_max_retransmit_slot rpc request received"); Ok(meta.get_max_retransmit_slot()) } fn get_max_shred_insert_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_max_shred_insert_slot rpc request received"); Ok(meta.get_max_shred_insert_slot()) } fn get_largest_accounts( &self, meta: Self::Metadata, config: Option<RpcLargestAccountsConfig>, ) -> Result<RpcResponse<Vec<RpcAccountBalance>>> { debug!("get_largest_accounts rpc request received"); Ok(meta.get_largest_accounts(config)) } fn get_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<RpcSupply>> { debug!("get_supply rpc request received"); Ok(meta.get_supply(commitment)) } fn request_airdrop( &self, meta: Self::Metadata, pubkey_str: String, lamports: u64, config: Option<RpcRequestAirdropConfig>, ) -> Result<String> { debug!("request_airdrop rpc request received"); trace!( "request_airdrop id={} lamports={} config: {:?}", pubkey_str, lamports, &config ); let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; let pubkey = verify_pubkey(&pubkey_str)?; let config = config.unwrap_or_default(); let bank = meta.bank(config.commitment); let blockhash = if let Some(blockhash) = config.recent_blockhash { verify_hash(&blockhash)? } else { bank.confirmed_last_blockhash().0 }; let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap_or(0); let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash).map_err( |err| { info!("request_airdrop_transaction failed: {:?}", err); Error::internal_error() }, )?; let wire_transaction = serialize(&transaction).map_err(|err| { info!("request_airdrop: serialize error: {:?}", err); Error::internal_error() })?; _send_transaction(meta, transaction, wire_transaction, last_valid_slot, None) } fn send_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSendTransactionConfig>, ) -> Result<String> { debug!("send_transaction rpc request received"); let config = config.unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let (wire_transaction, transaction) = deserialize_transaction(data, encoding)?; let preflight_commitment = config .preflight_commitment .map(|commitment| CommitmentConfig { commitment }); let preflight_bank = &*meta.bank(preflight_commitment); let mut last_valid_slot = preflight_bank .get_blockhash_last_valid_slot(&transaction.message.recent_blockhash) .unwrap_or(0); let durable_nonce_info = solana_sdk::transaction::uses_durable_nonce(&transaction) .and_then(|nonce_ix| { solana_sdk::transaction::get_nonce_pubkey_from_instruction( &nonce_ix, &transaction, ) }) .map(|&pubkey| (pubkey, transaction.message.recent_blockhash)); if durable_nonce_info.is_some() { // While it uses a defined constant, this last_valid_slot value is chosen arbitrarily. // It provides a fallback timeout for durable-nonce transaction retries in case of // malicious packing of the retry queue. Durable-nonce transactions are otherwise // retried until the nonce is advanced. last_valid_slot = preflight_bank.slot() + MAX_RECENT_BLOCKHASHES as u64; } if !config.skip_preflight { if let Err(e) = verify_transaction(&transaction) { return Err(e); } match meta.health.check() { RpcHealthStatus::Ok => (), RpcHealthStatus::Unknown => { return Err(RpcCustomError::NodeUnhealthy { num_slots_behind: None, } .into()); } RpcHealthStatus::Behind { num_slots } => { return Err(RpcCustomError::NodeUnhealthy { num_slots_behind: Some(num_slots), } .into()); } } if let (Err(err), logs) = preflight_bank.simulate_transaction(transaction.clone()) { return Err(RpcCustomError::SendTransactionPreflightFailure { message: format!("Transaction simulation failed: {}", err), result: RpcSimulateTransactionResult { err: Some(err), logs: Some(logs), }, } .into()); } } _send_transaction( meta, transaction, wire_transaction, last_valid_slot, durable_nonce_info, ) } fn simulate_transaction( &self, meta: Self::Metadata, data: String, config: Option<RpcSimulateTransactionConfig>, ) -> Result<RpcResponse<RpcSimulateTransactionResult>> { debug!("simulate_transaction rpc request received"); let config = config.unwrap_or_default(); let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base58); let (_, transaction) = deserialize_transaction(data, encoding)?; if config.sig_verify { if let Err(e) = verify_transaction(&transaction) { return Err(e); } } let bank = &*meta.bank(config.commitment); let (result, logs) = bank.simulate_transaction(transaction); Ok(new_response( &bank, RpcSimulateTransactionResult { err: result.err(), logs: Some(logs), }, )) } fn get_slot_leader( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<String> { debug!("get_slot_leader rpc request received"); Ok(meta.get_slot_leader(commitment)) } fn get_slot_leaders( &self, meta: Self::Metadata, start_slot: Slot, limit: u64, ) -> Result<Vec<String>> { debug!( "get_slot_leaders rpc request received (start: {} limit: {})", start_slot, limit ); let limit = limit as usize; if limit > MAX_GET_SLOT_LEADERS { return Err(Error::invalid_params(format!( "Invalid limit; max {}", MAX_GET_SLOT_LEADERS ))); } let bank = meta.bank(None); let (mut epoch, mut slot_index) = bank.epoch_schedule().get_epoch_and_slot_index(start_slot); let mut slot_leaders = Vec::with_capacity(limit); while slot_leaders.len() < limit { if let Some(leader_schedule) = meta.leader_schedule_cache.get_epoch_leader_schedule(epoch) { slot_leaders.extend( leader_schedule .get_slot_leaders() .iter() .skip(slot_index as usize) .take(limit.saturating_sub(slot_leaders.len())) .map(|pubkey| pubkey.to_string()), ); } else { return Err(Error::invalid_params(format!( "Invalid slot range: leader schedule for epoch {} is unavailable", epoch ))); } epoch += 1; slot_index = 0; } Ok(slot_leaders) } fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> { debug!("minimum_ledger_slot rpc request received"); meta.minimum_ledger_slot() } fn get_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>> { debug!("get_block rpc request received: {:?}", slot); meta.get_block(slot, config) } fn get_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let (end_slot, maybe_commitment) = config.map(|config| config.unzip()).unwrap_or_default(); debug!( "get_blocks rpc request received: {}-{:?}", start_slot, end_slot ); meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) } fn get_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { debug!( "get_blocks_with_limit rpc request received: {}-{}", start_slot, limit, ); meta.get_blocks_with_limit(start_slot, limit, commitment) } fn get_block_time( &self, meta: Self::Metadata, slot: Slot, ) -> Result<Option<UnixTimestamp>> { meta.get_block_time(slot) } fn get_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>> { debug!("get_transaction rpc request received: {:?}", signature_str); let signature = verify_signature(&signature_str)?; meta.get_transaction(signature, config) } fn get_signatures_for_address( &self, meta: Self::Metadata, address: String, config: Option<RpcSignaturesForAddressConfig>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> { let address = verify_pubkey(&address)?; let config = config.unwrap_or_default(); let before = config .before .map(|ref before| verify_signature(before)) .transpose()?; let until = config .until .map(|ref until| verify_signature(until)) .transpose()?; let limit = config .limit .unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { return Err(Error::invalid_params(format!( "Invalid limit; max {}", MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT ))); } meta.get_signatures_for_address(address, before, until, limit, config.commitment) } fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot> { debug!("get_first_available_block rpc request received"); Ok(meta.get_first_available_block()) } fn get_stake_activation( &self, meta: Self::Metadata, pubkey_str: String, config: Option<RpcEpochConfig>, ) -> Result<RpcStakeActivation> { debug!( "get_stake_activation rpc request received: {:?}", pubkey_str ); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_stake_activation(&pubkey, config) } fn get_inflation_reward( &self, meta: Self::Metadata, address_strs: Vec<String>, config: Option<RpcEpochConfig>, ) -> Result<Vec<Option<RpcInflationReward>>> { debug!( "get_inflation_reward rpc request received: {:?}", address_strs.len() ); let mut addresses: Vec<Pubkey> = vec![]; for address_str in address_strs { addresses.push(verify_pubkey(&address_str)?); } meta.get_inflation_reward(addresses, config) } fn get_token_account_balance( &self, meta: Self::Metadata, pubkey_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { debug!( "get_token_account_balance rpc request received: {:?}", pubkey_str ); let pubkey = verify_pubkey(&pubkey_str)?; meta.get_token_account_balance(&pubkey, commitment) } fn get_token_supply( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<UiTokenAmount>> { debug!("get_token_supply rpc request received: {:?}", mint_str); let mint = verify_pubkey(&mint_str)?; meta.get_token_supply(&mint, commitment) } fn get_token_largest_accounts( &self, meta: Self::Metadata, mint_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<Vec<RpcTokenAccountBalance>>> { debug!( "get_token_largest_accounts rpc request received: {:?}", mint_str ); let mint = verify_pubkey(&mint_str)?; meta.get_token_largest_accounts(&mint, commitment) } fn get_token_accounts_by_owner( &self, meta: Self::Metadata, owner_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { debug!( "get_token_accounts_by_owner rpc request received: {:?}", owner_str ); let owner = verify_pubkey(&owner_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; meta.get_token_accounts_by_owner(&owner, token_account_filter, config) } fn get_token_accounts_by_delegate( &self, meta: Self::Metadata, delegate_str: String, token_account_filter: RpcTokenAccountsFilter, config: Option<RpcAccountInfoConfig>, ) -> Result<RpcResponse<Vec<RpcKeyedAccount>>> { debug!( "get_token_accounts_by_delegate rpc request received: {:?}", delegate_str ); let delegate = verify_pubkey(&delegate_str)?; let token_account_filter = verify_token_account_filter(token_account_filter)?; meta.get_token_accounts_by_delegate(&delegate, token_account_filter, config) } } } // Deprecated RPC methods, collected for easy deactivation and removal in v1.8 pub mod rpc_deprecated_v1_7 { #![allow(deprecated)] use super::*; #[rpc] pub trait DeprecatedV1_7 { type Metadata; // DEPRECATED #[rpc(meta, name = "getConfirmedBlock")] fn get_confirmed_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>>; // DEPRECATED #[rpc(meta, name = "getConfirmedBlocks")] fn get_confirmed_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcConfirmedBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>>; // DEPRECATED #[rpc(meta, name = "getConfirmedBlocksWithLimit")] fn get_confirmed_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>>; // DEPRECATED #[rpc(meta, name = "getConfirmedTransaction")] fn get_confirmed_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcConfirmedTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>>; // DEPRECATED #[rpc(meta, name = "getConfirmedSignaturesForAddress2")] fn get_confirmed_signatures_for_address2( &self, meta: Self::Metadata, address: String, config: Option<RpcGetConfirmedSignaturesForAddress2Config>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>>; } pub struct DeprecatedV1_7Impl; impl DeprecatedV1_7 for DeprecatedV1_7Impl { type Metadata = JsonRpcRequestProcessor; fn get_confirmed_block( &self, meta: Self::Metadata, slot: Slot, config: Option<RpcEncodingConfigWrapper<RpcConfirmedBlockConfig>>, ) -> Result<Option<UiConfirmedBlock>> { debug!("get_confirmed_block rpc request received: {:?}", slot); meta.get_block(slot, config.map(|config| config.convert())) } fn get_confirmed_blocks( &self, meta: Self::Metadata, start_slot: Slot, config: Option<RpcConfirmedBlocksConfigWrapper>, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { let (end_slot, maybe_commitment) = config.map(|config| config.unzip()).unwrap_or_default(); debug!( "get_confirmed_blocks rpc request received: {}-{:?}", start_slot, end_slot ); meta.get_blocks(start_slot, end_slot, commitment.or(maybe_commitment)) } fn get_confirmed_blocks_with_limit( &self, meta: Self::Metadata, start_slot: Slot, limit: usize, commitment: Option<CommitmentConfig>, ) -> Result<Vec<Slot>> { debug!( "get_confirmed_blocks_with_limit rpc request received: {}-{}", start_slot, limit, ); meta.get_blocks_with_limit(start_slot, limit, commitment) } fn get_confirmed_transaction( &self, meta: Self::Metadata, signature_str: String, config: Option<RpcEncodingConfigWrapper<RpcConfirmedTransactionConfig>>, ) -> Result<Option<EncodedConfirmedTransaction>> { debug!( "get_confirmed_transaction rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str)?; meta.get_transaction(signature, config.map(|config| config.convert())) } fn get_confirmed_signatures_for_address2( &self, meta: Self::Metadata, address: String, config: Option<RpcGetConfirmedSignaturesForAddress2Config>, ) -> Result<Vec<RpcConfirmedTransactionStatusWithSignature>> { let address = verify_pubkey(&address)?; let config = config.unwrap_or_default(); let before = config .before .map(|ref before| verify_signature(before)) .transpose()?; let until = config .until .map(|ref until| verify_signature(until)) .transpose()?; let limit = config .limit .unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT); if limit == 0 || limit > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT { return Err(Error::invalid_params(format!( "Invalid limit; max {}", MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT ))); } meta.get_signatures_for_address(address, before, until, limit, config.commitment) } } } // Obsolete RPC methods, collected for easy deactivation and removal pub mod rpc_obsolete_v1_7 { use super::*; #[rpc] pub trait ObsoleteV1_7 { type Metadata; // DEPRECATED #[rpc(meta, name = "confirmTransaction")] fn confirm_transaction( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>>; // DEPRECATED #[rpc(meta, name = "getSignatureStatus")] fn get_signature_status( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<transaction::Result<()>>>; // DEPRECATED (used by Trust Wallet) #[rpc(meta, name = "getSignatureConfirmation")] fn get_signature_confirmation( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<RpcSignatureConfirmation>>; // DEPRECATED #[rpc(meta, name = "getTotalSupply")] fn get_total_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64>; // DEPRECATED #[rpc(meta, name = "getConfirmedSignaturesForAddress")] fn get_confirmed_signatures_for_address( &self, meta: Self::Metadata, pubkey_str: String, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<String>>; } pub struct ObsoleteV1_7Impl; impl ObsoleteV1_7 for ObsoleteV1_7Impl { type Metadata = JsonRpcRequestProcessor; fn confirm_transaction( &self, meta: Self::Metadata, id: String, commitment: Option<CommitmentConfig>, ) -> Result<RpcResponse<bool>> { debug!("confirm_transaction rpc request received: {:?}", id); let signature = verify_signature(&id)?; Ok(meta.confirm_transaction(&signature, commitment)) } fn get_signature_status( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<transaction::Result<()>>> { debug!( "get_signature_status rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str)?; Ok(meta.get_signature_status(signature, commitment)) } fn get_signature_confirmation( &self, meta: Self::Metadata, signature_str: String, commitment: Option<CommitmentConfig>, ) -> Result<Option<RpcSignatureConfirmation>> { debug!( "get_signature_confirmation rpc request received: {:?}", signature_str ); let signature = verify_signature(&signature_str)?; Ok(meta.get_signature_confirmation_status(signature, commitment)) } fn get_total_supply( &self, meta: Self::Metadata, commitment: Option<CommitmentConfig>, ) -> Result<u64> { debug!("get_total_supply rpc request received"); Ok(meta.get_total_supply(commitment)) } fn get_confirmed_signatures_for_address( &self, meta: Self::Metadata, pubkey_str: String, start_slot: Slot, end_slot: Slot, ) -> Result<Vec<String>> { debug!( "get_confirmed_signatures_for_address rpc request received: {:?} {:?}-{:?}", pubkey_str, start_slot, end_slot ); let pubkey = verify_pubkey(&pubkey_str)?; if end_slot < start_slot { return Err(Error::invalid_params(format!( "start_slot {} must be less than or equal to end_slot {}", start_slot, end_slot ))); } if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE { return Err(Error::invalid_params(format!( "Slot range too large; max {}", MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE ))); } Ok(meta .get_confirmed_signatures_for_address(pubkey, start_slot, end_slot) .iter() .map(|signature| signature.to_string()) .collect()) } } } const WORST_CASE_BASE58_TX: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes const WORST_CASE_BASE64_TX: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes fn deserialize_transaction( encoded_transaction: String, encoding: UiTransactionEncoding, ) -> Result<(Vec<u8>, Transaction)> { let wire_transaction = match encoding { UiTransactionEncoding::Base58 => { inc_new_counter_info!("rpc-base58_encoded_tx", 1); if encoded_transaction.len() > WORST_CASE_BASE58_TX { return Err(Error::invalid_params(format!( "encoded transaction too large: {} bytes (max: encoded/raw {}/{})", encoded_transaction.len(), WORST_CASE_BASE58_TX, PACKET_DATA_SIZE, ))); } bs58::decode(encoded_transaction) .into_vec() .map_err(|e| Error::invalid_params(format!("{:?}", e)))? } UiTransactionEncoding::Base64 => { inc_new_counter_info!("rpc-base64_encoded_tx", 1); if encoded_transaction.len() > WORST_CASE_BASE64_TX { return Err(Error::invalid_params(format!( "encoded transaction too large: {} bytes (max: encoded/raw {}/{})", encoded_transaction.len(), WORST_CASE_BASE64_TX, PACKET_DATA_SIZE, ))); } base64::decode(encoded_transaction) .map_err(|e| Error::invalid_params(format!("{:?}", e)))? } _ => { return Err(Error::invalid_params(format!( "unsupported transaction encoding: {}. Supported encodings: base58, base64", encoding ))) } }; if wire_transaction.len() > PACKET_DATA_SIZE { let err = format!( "transaction too large: {} bytes (max: {} bytes)", wire_transaction.len(), PACKET_DATA_SIZE ); info!("{}", err); return Err(Error::invalid_params(&err)); } bincode::options() .with_limit(PACKET_DATA_SIZE as u64) .with_fixint_encoding() .allow_trailing_bytes() .deserialize_from(&wire_transaction[..]) .map_err(|err| { info!("transaction deserialize error: {:?}", err); Error::invalid_params(&err.to_string()) }) .and_then(|transaction: Transaction| { if let Err(err) = transaction.sanitize() { Err(Error::invalid_params(format!( "invalid transaction: {}", err ))) } else { Ok(transaction) } }) .map(|transaction| (wire_transaction, transaction)) } pub(crate) fn create_validator_exit(exit: &Arc<AtomicBool>) -> Arc<RwLock<ValidatorExit>> { let mut validator_exit = ValidatorExit::default(); let exit_ = exit.clone(); validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed))); Arc::new(RwLock::new(validator_exit)) } #[cfg(test)] pub mod tests { use super::{rpc_full::*, rpc_minimal::*, *}; use crate::{ contact_info::ContactInfo, non_circulating_supply::non_circulating_accounts, optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBankTracker, }, replay_stage::tests::create_test_transactions_and_populate_blockstore, rpc_subscriptions::RpcSubscriptions, }; use bincode::deserialize; use jsonrpc_core::{futures, ErrorCode, MetaIoHandler, Output, Response, Value}; use jsonrpc_core_client::transports::local; use solana_client::rpc_filter::{Memcmp, MemcmpEncodedBytes}; use solana_ledger::{ blockstore_meta::PerfSample, blockstore_processor::fill_blockstore_slot_with_ticks, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }; use solana_runtime::{ accounts_background_service::AbsRequestSender, commitment::BlockCommitment, }; use solana_sdk::{ account::Account, clock::MAX_RECENT_BLOCKHASHES, fee_calculator::DEFAULT_BURN_PERCENT, hash::{hash, Hash}, instruction::InstructionError, message::Message, nonce, rpc_port, signature::{Keypair, Signer}, system_program, system_transaction, timing::slot_duration_from_slots_per_year, transaction::{self, TransactionError}, }; use solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta, TransactionDetails, UiMessage, }; use solana_vote_program::{ vote_instruction, vote_state::{BlockTimestamp, Vote, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, }; use spl_token_v2_0::{ solana_program::{program_option::COption, pubkey::Pubkey as SplTokenPubkey}, state::AccountState as TokenAccountState, state::Mint, }; use std::collections::HashMap; const TEST_MINT_LAMPORTS: u64 = 1_000_000; const TEST_SLOTS_PER_EPOCH: u64 = DELINQUENT_VALIDATOR_SLOT_DISTANCE + 1; struct RpcHandler { io: MetaIoHandler<JsonRpcRequestProcessor>, meta: JsonRpcRequestProcessor, bank: Arc<Bank>, bank_forks: Arc<RwLock<BankForks>>, blockhash: Hash, alice: Keypair, leader_pubkey: Pubkey, leader_vote_keypair: Arc<Keypair>, block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>, confirmed_block_signatures: Vec<Signature>, } fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler { start_rpc_handler_with_tx_and_blockstore(pubkey, vec![]) } fn start_rpc_handler_with_tx_and_blockstore( pubkey: &Pubkey, blockstore_roots: Vec<Slot>, ) -> RpcHandler { let (bank_forks, alice, leader_vote_keypair) = new_bank_forks(); let bank = bank_forks.read().unwrap().working_bank(); let vote_pubkey = leader_vote_keypair.pubkey(); let mut vote_account = bank.get_account(&vote_pubkey).unwrap_or_default(); let mut vote_state = VoteState::from(&vote_account).unwrap_or_default(); vote_state.last_timestamp = BlockTimestamp { slot: bank.slot(), timestamp: bank.clock().unix_timestamp, }; let versioned = VoteStateVersions::new_current(vote_state); VoteState::to(&versioned, &mut vote_account).unwrap(); bank.store_account(&vote_pubkey, &vote_account); let ledger_path = get_tmp_ledger_path!(); let blockstore = Blockstore::open(&ledger_path).unwrap(); let blockstore = Arc::new(blockstore); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); bank.transfer(4, &alice, &keypair2.pubkey()).unwrap(); let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); let confirmed_block_signatures = create_test_transactions_and_populate_blockstore( vec![&alice, &keypair1, &keypair2, &keypair3], 0, bank.clone(), blockstore.clone(), max_complete_transaction_status_slot.clone(), ); let mut commitment_slot0 = BlockCommitment::default(); commitment_slot0.increase_confirmation_stake(2, 9); let mut commitment_slot1 = BlockCommitment::default(); commitment_slot1.increase_confirmation_stake(1, 9); let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new(); block_commitment.entry(0).or_insert(commitment_slot0); block_commitment.entry(1).or_insert(commitment_slot1); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new( block_commitment, 10, CommitmentSlots::new_from_slot(bank.slot()), ))); let mut roots = blockstore_roots; if !roots.is_empty() { roots.retain(|&x| x > 0); let mut parent_bank = bank; for (i, root) in roots.iter().enumerate() { let new_bank = Bank::new_from_parent(&parent_bank, parent_bank.collector_id(), *root); parent_bank = bank_forks.write().unwrap().insert(new_bank); let parent = if i > 0 { roots[i - 1] } else { 0 }; fill_blockstore_slot_with_ticks(&blockstore, 5, *root, parent, Hash::default()); } blockstore.set_roots(&roots).unwrap(); let new_bank = Bank::new_from_parent( &parent_bank, parent_bank.collector_id(), roots.iter().max().unwrap() + 1, ); bank_forks.write().unwrap().insert(new_bank); for root in roots.iter() { bank_forks .write() .unwrap() .set_root(*root, &AbsRequestSender::default(), Some(0)); let mut stakes = HashMap::new(); stakes.insert( leader_vote_keypair.pubkey(), (1, AccountSharedData::default()), ); let block_time = bank_forks .read() .unwrap() .get(*root) .unwrap() .clock() .unix_timestamp; blockstore.cache_block_time(*root, block_time).unwrap(); } } let bank = bank_forks.read().unwrap().working_bank(); let leader_pubkey = *bank.collector_id(); let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let blockhash = bank.confirmed_last_blockhash().0; let tx = system_transaction::transfer(&alice, pubkey, 20, blockhash); bank.process_transaction(&tx).expect("process transaction"); let tx = system_transaction::transfer(&alice, &non_circulating_accounts()[0], 20, blockhash); bank.process_transaction(&tx).expect("process transaction"); let tx = system_transaction::transfer(&alice, pubkey, std::u64::MAX, blockhash); let _ = bank.process_transaction(&tx); let cluster_info = Arc::new(ClusterInfo::default()); let tpu_address = cluster_info.my_contact_info().tpu; cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr( &leader_pubkey, &socketaddr!("127.0.0.1:1234"), )); let sample1 = PerfSample { num_slots: 1, num_transactions: 4, sample_period_secs: 60, }; blockstore .write_perf_sample(0, &sample1) .expect("write to blockstore"); let max_slots = Arc::new(MaxSlots::default()); max_slots.retransmit.store(42, Ordering::Relaxed); max_slots.shred_insert.store(43, Ordering::Relaxed); let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig { enable_rpc_transaction_history: true, identity_pubkey: *pubkey, ..JsonRpcConfig::default() }, None, bank_forks.clone(), block_commitment_cache.clone(), blockstore, validator_exit, RpcHealth::stub(), cluster_info.clone(), Hash::default(), Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), max_slots, Arc::new(LeaderScheduleCache::new_from_bank(&bank)), max_complete_transaction_status_slot, ); SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr( &leader_pubkey, &socketaddr!("127.0.0.1:1234"), )); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); RpcHandler { io, meta, bank, bank_forks, blockhash, alice, leader_pubkey, leader_vote_keypair, block_commitment_cache, confirmed_block_signatures, } } #[test] fn test_rpc_request_processor_new() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(100); let bank = Arc::new(Bank::new(&genesis.genesis_config)); bank.transfer(20, &genesis.mint_keypair, &bob_pubkey) .unwrap(); let request_processor = JsonRpcRequestProcessor::new_from_bank(&bank); assert_eq!(request_processor.get_transaction_count(None), 1); } #[test] fn test_rpc_get_balance() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); let bank = Arc::new(Bank::new(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, mint_pubkey ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":20, }, "id": 1, }); let result = serde_json::from_str::<Value>(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_balance_via_client() { let genesis = create_genesis_config(20); let mint_pubkey = genesis.mint_keypair.pubkey(); let bank = Arc::new(Bank::new(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); async fn use_client(client: rpc_minimal::gen_client::Client, mint_pubkey: Pubkey) -> u64 { client .get_balance(mint_pubkey.to_string(), None) .await .unwrap() .value } let fut = async { let (client, server) = local::connect_with_metadata::<rpc_minimal::gen_client::Client, _, _>(&io, meta); let client = use_client(client, mint_pubkey); futures::join!(client, server) }; let (response, _) = futures::executor::block_on(fut); assert_eq!(response, 20); } #[test] fn test_rpc_get_cluster_nodes() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, leader_pubkey, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}"#; let res = io.handle_request_sync(&req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = format!( r#"{{"jsonrpc":"2.0","result":[{{"pubkey": "{}", "gossip": "127.0.0.1:1235", "tpu": "127.0.0.1:1234", "rpc": "127.0.0.1:{}", "version": null, "featureSet": null}}],"id":1}}"#, leader_pubkey, rpc_port::DEFAULT_RPC_PORT ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_recent_performance_samples() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples"}"#; let res = io.handle_request_sync(&req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = json!({ "jsonrpc": "2.0", "id": 1, "result": [ { "slot": 0, "numSlots": 1, "numTransactions": 4, "samplePeriodSecs": 60 } ], }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_recent_performance_samples_invalid_limit() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples","params":[10000]}"#; let res = io.handle_request_sync(&req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let expected = json!({ "jsonrpc": "2.0", "error": { "code": -32602, "message": "Invalid limit; max 720" }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_slot_leader() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, leader_pubkey, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}"#; let res = io.handle_request_sync(&req, meta); let expected = format!(r#"{{"jsonrpc":"2.0","result":"{}","id":1}}"#, leader_pubkey); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_tx_count() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let genesis = create_genesis_config(10); let bank = Arc::new(Bank::new(&genesis.genesis_config)); // Add 4 transactions bank.transfer(1, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(2, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(3, &genesis.mint_keypair, &bob_pubkey) .unwrap(); bank.transfer(4, &genesis.mint_keypair, &bob_pubkey) .unwrap(); let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_minimum_ledger_slot() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}"#; let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_get_supply() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply"}"#; let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(supply.non_circulating, 20); assert!(supply.circulating >= TEST_MINT_LAMPORTS); assert!(supply.total >= TEST_MINT_LAMPORTS + 20); let expected_accounts: Vec<String> = non_circulating_accounts() .iter() .map(|pubkey| pubkey.to_string()) .collect(); assert_eq!( supply.non_circulating_accounts.len(), expected_accounts.len() ); for address in supply.non_circulating_accounts { assert!(expected_accounts.contains(&address)); } } #[test] fn test_get_largest_accounts() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, alice, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts"}"#; let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); // Get Alice balance let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, alice.pubkey() ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let alice_balance: u64 = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert!(largest_accounts.contains(&RpcAccountBalance { address: alice.pubkey().to_string(), lamports: alice_balance, })); // Get Bob balance let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBalance","params":["{}"]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let bob_balance: u64 = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert!(largest_accounts.contains(&RpcAccountBalance { address: bob_pubkey.to_string(), lamports: bob_balance, })); // Test Circulating/NonCirculating Filter let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"circulating"}]}"#; let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"nonCirculating"}]}"#; let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec<RpcAccountBalance> = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 1); } #[test] fn test_rpc_get_minimum_balance_for_rent_exemption() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let data_len = 50; let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getMinimumBalanceForRentExemption","params":[{}]}}"#, data_len ); let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let minimum_balance: u64 = if let Response::Single(res) = res { if let Output::Success(res) = res { if let Value::Number(num) = res.result { num.as_u64().unwrap() } else { panic!("Expected number"); } } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!( minimum_balance, bank.get_minimum_balance_for_rent_exemption(data_len) ); } #[test] fn test_rpc_get_inflation() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationGovernor"}"#; let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_governor: RpcInflationGovernor = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; let expected_inflation_governor: RpcInflationGovernor = bank.inflation().into(); assert_eq!(inflation_governor, expected_inflation_governor); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationRate"}"#; // Queries current epoch let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_rate: RpcInflationRate = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; let inflation = bank.inflation(); let epoch = bank.epoch(); let slot_in_year = bank.slot_in_year_for_inflation(); let expected_inflation_rate = RpcInflationRate { total: inflation.total(slot_in_year), validator: inflation.validator(slot_in_year), foundation: inflation.foundation(slot_in_year), epoch, }; assert_eq!(inflation_rate, expected_inflation_rate); } #[test] fn test_rpc_get_epoch_schedule() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getEpochSchedule"}"#; let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let epoch_schedule: EpochSchedule = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(epoch_schedule, *bank.epoch_schedule()); } #[test] fn test_rpc_get_leader_schedule() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); for req in [ r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [0]}"#, r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule"}"#, &format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [null, {{ "identity": "{}" }}]}}"#, bank.collector_id().to_string() ), &format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}" }}]}}"#, bank.collector_id().to_string() ), ] .iter() { let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success for {}", req); } } else { panic!("Expected single response"); }; let schedule = schedule.expect("leader schedule"); let bob_schedule = schedule .get(&bank.collector_id().to_string()) .expect("leader not in the leader schedule"); assert_eq!( bob_schedule.len(), solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank) .unwrap() .get_slot_leaders() .len() ); } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#; let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(schedule, None); // `bob` is not in the leader schedule, look for an empty response let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [{{ "identity": "{}"}}]}}"#, bob_pubkey ); let rep = io.handle_request_sync(&req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(schedule, Some(HashMap::default())); } #[test] fn test_rpc_get_slot_leaders() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); // Test that slot leaders will be returned across epochs let query_start = 0; let query_limit = 2 * bank.epoch_schedule().slots_per_epoch; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let slot_leaders: Vec<String> = if let Response::Single(res) = res { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success for {} but received: {:?}", req, res); } } else { panic!("Expected single response"); }; assert_eq!(slot_leaders.len(), query_limit as usize); // Test that invalid limit returns an error let query_start = 0; let query_limit = 5001; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta.clone()); let res: Value = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); assert!(res.get("error").is_some()); // Test that invalid epoch returns an error let query_start = 2 * bank.epoch_schedule().slots_per_epoch; let query_limit = 10; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSlotLeaders", "params": [{}, {}]}}"#, query_start, query_limit ); let rep = io.handle_request_sync(&req, meta); let res: Value = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); assert!(res.get("error").is_some()); } #[test] fn test_rpc_get_account_info() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}"]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "owner": "11111111111111111111111111111111", "lamports": 20, "data": "", "executable": false, "rentEpoch": 0 }, }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let address = solana_sdk::pubkey::new_rand(); let data = vec![1, 2, 3, 4, 5]; let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.set_data(data.clone()); bank.store_account(&address, &account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"base64"}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!([base64::encode(&data), "base64"]), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"base64", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!([base64::encode(&data[1..3]), "base64"]), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"binary", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], bs58::encode(&data[1..3]).into_string(), ); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding":"jsonParsed", "dataSlice": {{"length": 2, "offset": 1}}}}]}}"#, address ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); result["error"].as_object().unwrap(); } #[test] fn test_rpc_get_multiple_accounts() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let address = Pubkey::new(&[9; 32]); let data = vec![1, 2, 3, 4, 5]; let mut account = AccountSharedData::new(42, 5, &Pubkey::default()); account.set_data(data.clone()); bank.store_account(&address, &account); let non_existent_address = Pubkey::new(&[8; 32]); // Test 3 accounts, one non-existent, and one with data let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[["{}", "{}", "{}"]]}}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":[{ "owner": "11111111111111111111111111111111", "lamports": 20, "data": ["", "base64"], "executable": false, "rentEpoch": 0 }, null, { "owner": "11111111111111111111111111111111", "lamports": 42, "data": [base64::encode(&data), "base64"], "executable": false, "rentEpoch": 0 }], }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Test config settings still work with multiple accounts let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"base58"}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], json!([bs58::encode(&data).into_string(), "base58"]), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"base64", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], json!([base64::encode(&data[1..3]), "base64"]), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"binary", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(result["result"]["value"].as_array().unwrap().len(), 3); assert_eq!( result["result"]["value"][2]["data"], bs58::encode(&data[1..3]).into_string(), ); let req = format!( r#"{{ "jsonrpc":"2.0","id":1,"method":"getMultipleAccounts","params":[ ["{}", "{}", "{}"], {{"encoding":"jsonParsed", "dataSlice": {{"length": 2, "offset": 1}}}} ] }}"#, bob_pubkey, non_existent_address, address, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); result["error"].as_object().unwrap(); } #[test] fn test_rpc_get_program_accounts() { let bob = Keypair::new(); let RpcHandler { io, meta, bank, blockhash, alice, .. } = start_rpc_handler_with_tx(&bob.pubkey()); let new_program_id = solana_sdk::pubkey::new_rand(); let tx = system_transaction::assign(&bob, blockhash, &new_program_id); bank.process_transaction(&tx).unwrap(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getProgramAccounts","params":["{}"]}}"#, new_program_id ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!( r#"{{ "jsonrpc":"2.0", "result":[ {{ "pubkey": "{}", "account": {{ "owner": "{}", "lamports": 20, "data": "", "executable": false, "rentEpoch": 0 }} }} ], "id":1}} "#, bob.pubkey(), new_program_id ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Set up nonce accounts to test filters let nonce_keypair0 = Keypair::new(); let instruction = system_instruction::create_nonce_account( &alice.pubkey(), &nonce_keypair0.pubkey(), &bob.pubkey(), 100_000, ); let message = Message::new(&instruction, Some(&alice.pubkey())); let tx = Transaction::new(&[&alice, &nonce_keypair0], message, blockhash); bank.process_transaction(&tx).unwrap(); let nonce_keypair1 = Keypair::new(); let authority = solana_sdk::pubkey::new_rand(); let instruction = system_instruction::create_nonce_account( &alice.pubkey(), &nonce_keypair1.pubkey(), &authority, 100_000, ); let message = Message::new(&instruction, Some(&alice.pubkey())); let tx = Transaction::new(&[&alice, &nonce_keypair1], message, blockhash); bank.process_transaction(&tx).unwrap(); // Test memcmp filter; filter on Initialized state let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 2); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 0,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); // Test dataSize filter let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "dataSize": {} }} ]}}] }}"#, system_program::id(), nonce::State::size(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 2); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "dataSize": 1 }} ]}}] }}"#, system_program::id(), ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); // Test multiple filters let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }}, {{ "memcmp": {{"offset": 8,"bytes": "{}"}} }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), authority, ); // Filter on Initialized and Nonce authority let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 1); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}",{{"filters": [ {{ "memcmp": {{"offset": 4,"bytes": "{}"}} }}, {{ "dataSize": 1 }} ]}}] }}"#, system_program::id(), bs58::encode(vec![1]).into_string(), ); // Filter on Initialized and non-matching data size let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(json["result"].clone()) .expect("actual response deserialization"); assert_eq!(accounts.len(), 0); } #[test] fn test_rpc_simulate_transaction() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, alice, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let mut tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash); let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); tx.signatures[0] = Signature::default(); let tx_badsig_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); bank.freeze(); // Ensure the root bank is frozen, `start_rpc_handler_with_tx()` doesn't do this // Good signature with sigVerify=true let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{"err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ]} }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with sigVerify=true let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, tx_badsig_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc":"2.0", "error": { "code": -32003, "message": "Transaction signature verification failure" }, "id":1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with sigVerify=false let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": false}}]}}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{"err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ]} }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Bad signature with default sigVerify setting (false) let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}"]}}"#, tx_serialized_encoded, ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{"err":null, "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" ]} }, "id": 1, }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] #[should_panic] fn test_rpc_simulate_transaction_panic_on_unfrozen_bank() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, alice, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let tx = system_transaction::transfer(&alice, &bob_pubkey, 1234, blockhash); let tx_serialized_encoded = bs58::encode(serialize(&tx).unwrap()).into_string(); assert!(!bank.is_frozen()); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"simulateTransaction","params":["{}", {{"sigVerify": true}}]}}"#, tx_serialized_encoded, ); // should panic because `bank` is not frozen let _ = io.handle_request_sync(&req, meta); } #[test] fn test_rpc_get_signature_statuses() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, alice, confirmed_block_signatures, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, confirmed_block_signatures[0] ); let res = io.handle_request_sync(&req, meta.clone()); let expected_res: transaction::Result<()> = Ok(()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); let result = result.as_ref().unwrap(); assert_eq!(expected_res, result.status); assert_eq!(None, result.confirmations); // Test getSignatureStatus request on unprocessed tx let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, tx.signatures[0] ); let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); assert!(result.is_none()); // Test getSignatureStatus request on a TransactionError let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getSignatureStatuses","params":[["{}"]]}}"#, confirmed_block_signatures[1] ); let res = io.handle_request_sync(&req, meta); let expected_res: transaction::Result<()> = Err(TransactionError::InstructionError( 0, InstructionError::Custom(1), )); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let result: Option<TransactionStatus> = serde_json::from_value(json["result"]["value"][0].clone()) .expect("actual response deserialization"); assert_eq!(expected_res, result.as_ref().unwrap().status); } #[test] fn test_rpc_get_recent_blockhash() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}"#; let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "blockhash": blockhash.to_string(), "feeCalculator": { "lamportsPerSignature": 0, } }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fees() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFees"}"#; let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "blockhash": blockhash.to_string(), "feeCalculator": { "lamportsPerSignature": 0, }, "lastValidSlot": MAX_RECENT_BLOCKHASHES, }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fee_calculator_for_blockhash() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator(); let fee_calculator = RpcFeeCalculator { fee_calculator }; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#, blockhash ); let res = io.handle_request_sync(&req, meta.clone()); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":fee_calculator, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); // Expired (non-existent) blockhash let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getFeeCalculatorForBlockhash","params":["{:?}"]}}"#, Hash::default() ); let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":Value::Null, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_get_fee_rate_governor() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#; let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "context":{"slot":0}, "value":{ "feeRateGovernor": { "burnPercent": DEFAULT_BURN_PERCENT, "maxLamportsPerSignature": 0, "minLamportsPerSignature": 0, "targetLamportsPerSignature": 0, "targetSignaturesPerSlot": 0 } }}, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_fail_request_airdrop() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); // Expect internal error because no faucet is available let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"requestAirdrop","params":["{}", 50]}}"#, bob_pubkey ); let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32600,"message":"Invalid request"},"id":1}"#; let expected: Response = serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_send_bad_tx() { let genesis = create_genesis_config(100); let bank = Arc::new(Bank::new(&genesis.genesis_config)); let meta = JsonRpcRequestProcessor::new_from_bank(&bank); let mut io = MetaIoHandler::default(); io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["37u9WtQpcm6ULa3Vmu7ySnANv"]}"#; let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let error = &json["error"]; assert_eq!(error["code"], ErrorCode::InvalidParams.code()); } #[test] fn test_rpc_send_transaction_preflight() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let (bank_forks, mint_keypair, ..) = new_bank_forks(); let health = RpcHealth::stub(); // Freeze bank 0 to prevent a panic in `run_transaction_simulation()` bank_forks.write().unwrap().get(0).unwrap().freeze(); let mut io = MetaIoHandler::default(); io.extend_with(rpc_full::FullImpl.to_delegate()); let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair( ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")), )); let tpu_address = cluster_info.my_contact_info().tpu; let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, health.clone(), cluster_info, Hash::default(), Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); let mut bad_transaction = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, Hash::default(), ); // sendTransaction will fail because the blockhash is invalid let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"err":"BlockhashNotFound","logs":[]}},"id":1}"#.to_string(), ) ); // sendTransaction will fail due to insanity bad_transaction.message.instructions[0].program_id_index = 0u8; let recent_blockhash = bank_forks.read().unwrap().root_bank().last_blockhash(); bad_transaction.sign(&[&mint_keypair], recent_blockhash); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: index out of bounds"},"id":1}"#.to_string(), ) ); let mut bad_transaction = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, recent_blockhash, ); // sendTransaction will fail due to poor node health health.stub_set_health_status(Some(RpcHealthStatus::Behind { num_slots: 42 })); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32005,"message":"Node is behind by 42 slots","data":{"numSlotsBehind":42}},"id":1}"#.to_string(), ) ); health.stub_set_health_status(None); // sendTransaction will fail due to invalid signature bad_transaction.signatures[0] = Signature::default(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Transaction signature verification failure"},"id":1}"#.to_string(), ) ); // sendTransaction will now succeed because skipPreflight=true even though it's a bad // transaction let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}", {{"skipPreflight": true}}]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","result":"1111111111111111111111111111111111111111111111111111111111111111","id":1}"#.to_string(), ) ); // sendTransaction will fail due to sanitization failure bad_transaction.signatures.clear(); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"sendTransaction","params":["{}"]}}"#, bs58::encode(serialize(&bad_transaction).unwrap()).into_string() ); let res = io.handle_request_sync(&req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"invalid transaction: index out of bounds"},"id":1}"#.to_string(), ) ); } #[test] fn test_rpc_verify_filter() { let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary( "13LeFbG6m2EP1fqCj9k66fcXsoTHMMtgr7c78AivUrYD".to_string(), ), encoding: None, }); assert_eq!(verify_filter(&filter), Ok(())); // Invalid base-58 let filter = RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary("III".to_string()), encoding: None, }); assert!(verify_filter(&filter).is_err()); } #[test] fn test_rpc_verify_pubkey() { let pubkey = solana_sdk::pubkey::new_rand(); assert_eq!(verify_pubkey(&pubkey.to_string()).unwrap(), pubkey); let bad_pubkey = "a1b2c3d4"; assert_eq!( verify_pubkey(&bad_pubkey.to_string()), Err(Error::invalid_params("Invalid param: WrongSize")) ); } #[test] fn test_rpc_verify_signature() { let tx = system_transaction::transfer( &Keypair::new(), &solana_sdk::pubkey::new_rand(), 20, hash(&[0]), ); assert_eq!( verify_signature(&tx.signatures[0].to_string()).unwrap(), tx.signatures[0] ); let bad_signature = "a1b2c3d4"; assert_eq!( verify_signature(&bad_signature.to_string()), Err(Error::invalid_params("Invalid param: WrongSize")) ); } fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Arc<Keypair>) { let GenesisConfigInfo { mut genesis_config, mint_keypair, voting_keypair, } = create_genesis_config(TEST_MINT_LAMPORTS); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; genesis_config.epoch_schedule = EpochSchedule::custom(TEST_SLOTS_PER_EPOCH, TEST_SLOTS_PER_EPOCH, false); let bank = Bank::new(&genesis_config); ( Arc::new(RwLock::new(BankForks::new(bank))), mint_keypair, Arc::new(voting_keypair), ) } #[test] fn test_rpc_get_identity() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getIdentity"}"#; let res = io.handle_request_sync(&req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { "identity": bob_pubkey.to_string() }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } fn test_basic_slot(method: &str, expected: Slot) { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!("{{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"{}\"}}", method); let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, expected); } #[test] fn test_rpc_get_max_slots() { test_basic_slot("getMaxRetransmitSlot", 42); test_basic_slot("getMaxShredInsertSlot", 43); } #[test] fn test_rpc_get_version() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVersion"}"#; let res = io.handle_request_sync(&req, meta); let version = solana_version::Version::default(); let expected = json!({ "jsonrpc": "2.0", "result": { "solana-core": version.to_string(), "feature-set": version.feature_set, }, "id": 1 }); let expected: Response = serde_json::from_value(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } #[test] fn test_rpc_processor_get_block_commitment() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let bank_forks = new_bank_forks().0; let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY + 1]); let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY + 1]); let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new(); block_commitment .entry(0) .or_insert_with(|| commitment_slot0.clone()); block_commitment .entry(1) .or_insert_with(|| commitment_slot1.clone()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new( block_commitment, 42, CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()), ))); let cluster_info = Arc::new(ClusterInfo::default()); let tpu_address = cluster_info.my_contact_info().tpu; let (request_processor, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, RpcHealth::stub(), cluster_info, Hash::default(), Arc::new(tokio::runtime::Runtime::new().unwrap()), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); SendTransactionService::new(tpu_address, &bank_forks, None, receiver, 1000, 1); assert_eq!( request_processor.get_block_commitment(0), RpcBlockCommitment { commitment: Some(commitment_slot0.commitment), total_stake: 42, } ); assert_eq!( request_processor.get_block_commitment(1), RpcBlockCommitment { commitment: Some(commitment_slot1.commitment), total_stake: 42, } ); assert_eq!( request_processor.get_block_commitment(2), RpcBlockCommitment { commitment: None, total_stake: 42, } ); } #[test] fn test_rpc_get_block_commitment() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[0]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let RpcBlockCommitment { commitment, total_stake, } = if let Response::Single(res) = result { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!( commitment, block_commitment_cache .read() .unwrap() .get_block_commitment(0) .map(|block_commitment| block_commitment.commitment) ); assert_eq!(total_stake, 10); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}"#; let res = io.handle_request_sync(&req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let commitment_response: RpcBlockCommitment<BlockCommitmentArray> = if let Response::Single(res) = result { if let Output::Success(res) = res { serde_json::from_value(res.result).unwrap() } else { panic!("Expected success"); } } else { panic!("Expected single response"); }; assert_eq!(commitment_response.commitment, None); assert_eq!(commitment_response.total_stake, 10); } #[test] fn test_get_block() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, confirmed_block_signatures, blockhash, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<EncodedConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.rewards, vec![]); for EncodedTransactionWithStatusMeta { transaction, meta } in confirmed_block.transactions.into_iter() { if let EncodedTransaction::Json(transaction) = transaction { if transaction.signatures[0] == confirmed_block_signatures[0].to_string() { let meta = meta.unwrap(); let transaction_recent_blockhash = match transaction.message { UiMessage::Parsed(message) => message.recent_blockhash, UiMessage::Raw(message) => message.recent_blockhash, }; assert_eq!(transaction_recent_blockhash, blockhash.to_string()); assert_eq!(meta.status, Ok(())); assert_eq!(meta.err, None); } else if transaction.signatures[0] == confirmed_block_signatures[1].to_string() { let meta = meta.unwrap(); assert_eq!( meta.err, Some(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); assert_eq!( meta.status, Err(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); } else { assert_eq!(meta, None); } } } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,"binary"]}"#; let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<EncodedConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert_eq!(confirmed_block.transactions.len(), 3); assert_eq!(confirmed_block.rewards, vec![]); for EncodedTransactionWithStatusMeta { transaction, meta } in confirmed_block.transactions.into_iter() { if let EncodedTransaction::LegacyBinary(transaction) = transaction { let decoded_transaction: Transaction = deserialize(&bs58::decode(&transaction).into_vec().unwrap()).unwrap(); if decoded_transaction.signatures[0] == confirmed_block_signatures[0] { let meta = meta.unwrap(); assert_eq!(decoded_transaction.message.recent_blockhash, blockhash); assert_eq!(meta.status, Ok(())); assert_eq!(meta.err, None); } else if decoded_transaction.signatures[0] == confirmed_block_signatures[1] { let meta = meta.unwrap(); assert_eq!( meta.err, Some(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); assert_eq!( meta.status, Err(TransactionError::InstructionError( 0, InstructionError::Custom(1) )) ); } else { assert_eq!(meta, None); } } } } #[test] fn test_get_block_config() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, confirmed_block_signatures, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,{}]}}"#, json!(RpcBlockConfig { encoding: None, transaction_details: Some(TransactionDetails::Signatures), rewards: Some(false), commitment: None, }) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<UiConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert!(confirmed_block.transactions.is_none()); assert!(confirmed_block.rewards.is_none()); for (i, signature) in confirmed_block.signatures.unwrap()[..2].iter().enumerate() { assert_eq!(*signature, confirmed_block_signatures[i].to_string()); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,{}]}}"#, json!(RpcBlockConfig { encoding: None, transaction_details: Some(TransactionDetails::None), rewards: Some(true), commitment: None, }) ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option<UiConfirmedBlock> = serde_json::from_value(result["result"].clone()).unwrap(); let confirmed_block = confirmed_block.unwrap(); assert!(confirmed_block.transactions.is_none()); assert!(confirmed_block.signatures.is_none()); assert_eq!(confirmed_block.rewards.unwrap(), vec![]); } #[test] fn test_get_blocks() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots.clone()); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, roots[1..].to_vec()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[2]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,4]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,7]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[9,11]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, Vec::<Slot>::new()); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(std::u64::MAX); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,{}]}}"#, MAX_GET_CONFIRMED_BLOCKS_RANGE ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4, 8]); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,{}]}}"#, MAX_GET_CONFIRMED_BLOCKS_RANGE + 1 ); let res = io.handle_request_sync(&req, meta); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Slot range too large; max 500000"},"id":1}"#.to_string(), ) ); } #[test] fn test_get_blocks_with_limit() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let roots = vec![0, 1, 3, 4, 8]; let RpcHandler { io, meta, block_commitment_cache, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, roots); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,500001]}"#; let res = io.handle_request_sync(&req, meta.clone()); assert_eq!( res, Some( r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Limit too large; max 500000"},"id":1}"#.to_string(), ) ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,0]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert!(confirmed_blocks.is_empty()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,2]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,3]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,500000]}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[9,500000]}"#; let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, Vec::<Slot>::new()); } #[test] fn test_get_block_time() { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, bank, block_commitment_cache, bank_forks, .. } = start_rpc_handler_with_tx_and_blockstore(&bob_pubkey, vec![1, 2, 3, 4, 5, 6, 7]); let base_timestamp = bank_forks .read() .unwrap() .get(0) .unwrap() .unix_timestamp_from_genesis(); block_commitment_cache .write() .unwrap() .set_highest_confirmed_root(7); let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year()); let slot = 2; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!(r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, base_timestamp); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let slot = 7; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta.clone()); let expected = format!( r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#, base_timestamp + (7 * slot_duration).as_secs() as i64 ); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); let slot = 12345; let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#, slot ); let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32004,"message":"Block not available for slot 12345"},"id":1}"#; let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); } fn advance_block_commitment_cache( block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>, bank_forks: &Arc<RwLock<BankForks>>, ) { let mut new_block_commitment = BlockCommitmentCache::new( HashMap::new(), 0, CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()), ); let mut w_block_commitment_cache = block_commitment_cache.write().unwrap(); std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment); } #[test] fn test_get_vote_accounts() { let RpcHandler { io, meta, mut bank, bank_forks, alice, leader_vote_keypair, block_commitment_cache, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); assert_eq!(bank.vote_accounts().len(), 1); // Create a vote account with no stake. let alice_vote_keypair = Keypair::new(); let instructions = vote_instruction::create_account( &alice.pubkey(), &alice_vote_keypair.pubkey(), &VoteInit { node_pubkey: alice.pubkey(), authorized_voter: alice_vote_keypair.pubkey(), authorized_withdrawer: alice_vote_keypair.pubkey(), commission: 0, }, bank.get_minimum_balance_for_rent_exemption(VoteState::size_of()), ); let message = Message::new(&instructions, Some(&alice.pubkey())); let transaction = Transaction::new( &[&alice, &alice_vote_keypair], message, bank.last_blockhash(), ); bank.process_transaction(&transaction) .expect("process transaction"); assert_eq!(bank.vote_accounts().len(), 2); // Check getVoteAccounts: the bootstrap validator vote account will be delinquent as it has // stake but has never voted, and the vote account with no stake should not be present. { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}"#; let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.current.is_empty()); assert_eq!(vote_account_status.delinquent.len(), 1); for vote_account_info in vote_account_status.delinquent { assert_ne!(vote_account_info.activated_stake, 0); } } let mut advance_bank = || { bank.freeze(); // Votes let instructions = [ vote_instruction::vote( &leader_vote_keypair.pubkey(), &leader_vote_keypair.pubkey(), Vote { slots: vec![bank.slot()], hash: bank.hash(), timestamp: None, }, ), vote_instruction::vote( &alice_vote_keypair.pubkey(), &alice_vote_keypair.pubkey(), Vote { slots: vec![bank.slot()], hash: bank.hash(), timestamp: None, }, ), ]; bank = bank_forks.write().unwrap().insert(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + 1, )); advance_block_commitment_cache(&block_commitment_cache, &bank_forks); let transaction = Transaction::new_signed_with_payer( &instructions, Some(&alice.pubkey()), &[&alice, &leader_vote_keypair, &alice_vote_keypair], bank.last_blockhash(), ); bank.process_transaction(&transaction) .expect("process transaction"); }; // Advance bank to the next epoch for _ in 0..TEST_SLOTS_PER_EPOCH { advance_bank(); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); // The vote account with no stake should not be present. assert!(vote_account_status.delinquent.is_empty()); // Both accounts should be active and have voting history. assert_eq!(vote_account_status.current.len(), 2); let leader_info = vote_account_status .current .iter() .find(|x| x.vote_pubkey == leader_vote_keypair.pubkey().to_string()) .unwrap(); assert_ne!(leader_info.activated_stake, 0); // Subtract one because the last vote always carries over to the next epoch let expected_credits = TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1; assert_eq!( leader_info.epoch_credits, vec![ (0, expected_credits, 0), (1, expected_credits + 1, expected_credits) // one vote in current epoch ] ); // Filter request based on the leader: { let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([RpcGetVoteAccountsConfig { vote_pubkey: Some(leader_vote_keypair.pubkey().to_string()), commitment: Some(CommitmentConfig::processed()) }]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(vote_account_status.current.len(), 1); assert_eq!(vote_account_status.delinquent.len(), 0); for vote_account_info in vote_account_status.current { assert_eq!( vote_account_info.vote_pubkey, leader_vote_keypair.pubkey().to_string() ); } } // Overflow the epoch credits history and ensure only `MAX_RPC_EPOCH_CREDITS_HISTORY` // results are returned for _ in 0..(TEST_SLOTS_PER_EPOCH * (MAX_RPC_EPOCH_CREDITS_HISTORY) as u64) { advance_bank(); } let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.delinquent.is_empty()); assert!(!vote_account_status .current .iter() .any(|x| x.epoch_credits.len() != MAX_RPC_EPOCH_CREDITS_HISTORY)); // Advance bank with no voting bank.freeze(); bank_forks.write().unwrap().insert(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + TEST_SLOTS_PER_EPOCH, )); advance_block_commitment_cache(&block_commitment_cache, &bank_forks); // The leader vote account should now be delinquent, and the other vote account disappears // because it's inactive with no stake { let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#, json!([CommitmentConfig::processed()]) ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let vote_account_status: RpcVoteAccountStatus = serde_json::from_value(result["result"].clone()).unwrap(); assert!(vote_account_status.current.is_empty()); assert_eq!(vote_account_status.delinquent.len(), 1); for vote_account_info in vote_account_status.delinquent { assert_eq!( vote_account_info.vote_pubkey, leader_vote_keypair.pubkey().to_string() ); } } } #[test] fn test_is_finalized() { let bank = Arc::new(Bank::default()); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); blockstore.set_roots(&[0, 1]).unwrap(); // Build BlockCommitmentCache with rooted slots let mut cache0 = BlockCommitment::default(); cache0.increase_rooted_stake(50); let mut cache1 = BlockCommitment::default(); cache1.increase_rooted_stake(40); let mut cache2 = BlockCommitment::default(); cache2.increase_rooted_stake(20); let mut block_commitment = HashMap::new(); block_commitment.entry(1).or_insert(cache0); block_commitment.entry(2).or_insert(cache1); block_commitment.entry(3).or_insert(cache2); let highest_confirmed_root = 1; let block_commitment_cache = BlockCommitmentCache::new( block_commitment, 50, CommitmentSlots { slot: bank.slot(), highest_confirmed_root, ..CommitmentSlots::default() }, ); assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 0)); assert!(is_finalized(&block_commitment_cache, &bank, &blockstore, 1)); assert!(!is_finalized( &block_commitment_cache, &bank, &blockstore, 2 )); assert!(!is_finalized( &block_commitment_cache, &bank, &blockstore, 3 )); } #[test] fn test_token_rpcs() { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let mint = SplTokenPubkey::new(&[2; 32]); let owner = SplTokenPubkey::new(&[3; 32]); let delegate = SplTokenPubkey::new(&[4; 32]); let token_account = TokenAccount { mint, owner, delegate: COption::Some(delegate), amount: 420, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); // Add the mint let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#, token_account_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let balance: UiTokenAmount = serde_json::from_value(result["result"]["value"].clone()).unwrap(); let error = f64::EPSILON; assert!((balance.ui_amount.unwrap() - 4.2).abs() < error); assert_eq!(balance.amount, 420.to_string()); assert_eq!(balance.decimals, 2); assert_eq!(balance.ui_amount_string, "4.2".to_string()); // Test non-existent token account let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenAccountBalance","params":["{}"]}}"#, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test get token supply, pulls supply from mint let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let supply: UiTokenAmount = serde_json::from_value(result["result"]["value"].clone()).unwrap(); let error = f64::EPSILON; assert!((supply.ui_amount.unwrap() - 5.0).abs() < error); assert_eq!(supply.amount, 500.to_string()); assert_eq!(supply.decimals, 2); assert_eq!(supply.ui_amount_string, "5".to_string()); // Test non-existent mint address let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenSupply","params":["{}"]}}"#, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Add another token account with the same owner, delegate, and mint let other_token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&other_token_account_pubkey, &token_account); // Add another token account with the same owner and delegate but different mint let mut account_data = vec![0; TokenAccount::get_packed_len()]; let new_mint = SplTokenPubkey::new(&[5; 32]); let token_account = TokenAccount { mint: new_mint, owner, delegate: COption::Some(delegate), amount: 42, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); let token_with_different_mint_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_different_mint_pubkey, &token_account); // Test getTokenAccountsByOwner with Token program id returns all accounts, regardless of Mint address let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, owner, spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 3); // Test getTokenAccountsByOwner with jsonParsed encoding doesn't return accounts with invalid mints let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}, {{"encoding": "jsonParsed"}}] }}"#, owner, spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test getProgramAccounts with jsonParsed encoding returns mints, but doesn't return accounts with invalid mints let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getProgramAccounts", "params":["{}", {{"encoding": "jsonParsed"}}] }}"#, spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(accounts.len(), 4); // Test returns only mint accounts let req = format!( r#"{{ "jsonrpc":"2.0", "id":1,"method":"getTokenAccountsByOwner", "params":["{}", {{"mint": "{}"}}] }}"#, owner, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test non-existent Mint/program id let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, owner, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"mint": "{}"}}] }}"#, owner, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test non-existent Owner let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByOwner", "params":["{}", {{"programId": "{}"}}] }}"#, solana_sdk::pubkey::new_rand(), spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert!(accounts.is_empty()); // Test getTokenAccountsByDelegate with Token program id returns all accounts, regardless of Mint address let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, delegate, spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 3); // Test returns only mint accounts let req = format!( r#"{{ "jsonrpc":"2.0", "id":1,"method": "getTokenAccountsByDelegate", "params":["{}", {{"mint": "{}"}}] }}"#, delegate, mint, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!(accounts.len(), 2); // Test non-existent Mint/program id let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, delegate, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"mint": "{}"}}] }}"#, delegate, solana_sdk::pubkey::new_rand(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert!(result.get("error").is_some()); // Test non-existent Delegate let req = format!( r#"{{ "jsonrpc":"2.0", "id":1, "method":"getTokenAccountsByDelegate", "params":["{}", {{"programId": "{}"}}] }}"#, solana_sdk::pubkey::new_rand(), spl_token_id_v2_0(), ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let accounts: Vec<RpcKeyedAccount> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert!(accounts.is_empty()); // Add new_mint, and another token account on new_mint with different balance let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); bank.store_account( &Pubkey::from_str(&new_mint.to_string()).unwrap(), &mint_account, ); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let token_account = TokenAccount { mint: new_mint, owner, delegate: COption::Some(delegate), amount: 10, state: TokenAccountState::Initialized, is_native: COption::None, delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); let token_with_smaller_balance = solana_sdk::pubkey::new_rand(); bank.store_account(&token_with_smaller_balance, &token_account); // Test largest token accounts let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getTokenLargestAccounts","params":["{}"]}}"#, new_mint, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let largest_accounts: Vec<RpcTokenAccountBalance> = serde_json::from_value(result["result"]["value"].clone()).unwrap(); assert_eq!( largest_accounts, vec![ RpcTokenAccountBalance { address: token_with_different_mint_pubkey.to_string(), amount: UiTokenAmount { ui_amount: Some(0.42), decimals: 2, amount: "42".to_string(), ui_amount_string: "0.42".to_string(), } }, RpcTokenAccountBalance { address: token_with_smaller_balance.to_string(), amount: UiTokenAmount { ui_amount: Some(0.1), decimals: 2, amount: "10".to_string(), ui_amount_string: "0.1".to_string(), } } ] ); } #[test] fn test_token_parsing() { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&solana_sdk::pubkey::new_rand()); let mut account_data = vec![0; TokenAccount::get_packed_len()]; let mint = SplTokenPubkey::new(&[2; 32]); let owner = SplTokenPubkey::new(&[3; 32]); let delegate = SplTokenPubkey::new(&[4; 32]); let token_account = TokenAccount { mint, owner, delegate: COption::Some(delegate), amount: 420, state: TokenAccountState::Initialized, is_native: COption::Some(10), delegated_amount: 30, close_authority: COption::Some(owner), }; TokenAccount::pack(token_account, &mut account_data).unwrap(); let token_account = AccountSharedData::from(Account { lamports: 111, data: account_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); let token_account_pubkey = solana_sdk::pubkey::new_rand(); bank.store_account(&token_account_pubkey, &token_account); // Add the mint let mut mint_data = vec![0; Mint::get_packed_len()]; let mint_state = Mint { mint_authority: COption::Some(owner), supply: 500, decimals: 2, is_initialized: true, freeze_authority: COption::Some(owner), }; Mint::pack(mint_state, &mut mint_data).unwrap(); let mint_account = AccountSharedData::from(Account { lamports: 111, data: mint_data.to_vec(), owner: spl_token_id_v2_0(), ..Account::default() }); bank.store_account(&Pubkey::from_str(&mint.to_string()).unwrap(), &mint_account); let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding": "jsonParsed"}}]}}"#, token_account_pubkey, ); let res = io.handle_request_sync(&req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!({ "program": "spl-token", "space": TokenAccount::get_packed_len(), "parsed": { "type": "account", "info": { "mint": mint.to_string(), "owner": owner.to_string(), "tokenAmount": { "uiAmount": 4.2, "decimals": 2, "amount": "420", "uiAmountString": "4.2", }, "delegate": delegate.to_string(), "state": "initialized", "isNative": true, "rentExemptReserve": { "uiAmount": 0.1, "decimals": 2, "amount": "10", "uiAmountString": "0.1", }, "delegatedAmount": { "uiAmount": 0.3, "decimals": 2, "amount": "30", "uiAmountString": "0.3", }, "closeAuthority": owner.to_string(), } } }) ); // Test Mint let req = format!( r#"{{"jsonrpc":"2.0","id":1,"method":"getAccountInfo","params":["{}", {{"encoding": "jsonParsed"}}]}}"#, mint, ); let res = io.handle_request_sync(&req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!( result["result"]["value"]["data"], json!({ "program": "spl-token", "space": Mint::get_packed_len(), "parsed": { "type": "mint", "info": { "mintAuthority": owner.to_string(), "decimals": 2, "supply": "500".to_string(), "isInitialized": true, "freezeAuthority": owner.to_string(), } } }) ); } #[test] fn test_get_spl_token_owner_filter() { let owner = Pubkey::new_unique(); assert_eq!( get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ RpcFilterType::Memcmp(Memcmp { offset: 32, bytes: MemcmpEncodedBytes::Binary(owner.to_string()), encoding: None }), RpcFilterType::DataSize(165) ], ) .unwrap(), owner ); // Filtering on mint instead of owner assert!(get_spl_token_owner_filter( &Pubkey::from_str("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA").unwrap(), &[ RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Binary(owner.to_string()), encoding: None }), RpcFilterType::DataSize(165) ], ) .is_none()); // Wrong program id assert!(get_spl_token_owner_filter( &Pubkey::new_unique(), &[ RpcFilterType::Memcmp(Memcmp { offset: 32, bytes: MemcmpEncodedBytes::Binary(owner.to_string()), encoding: None }), RpcFilterType::DataSize(165) ], ) .is_none()); } #[test] fn test_rpc_single_gossip() { let exit = Arc::new(AtomicBool::new(false)); let validator_exit = create_validator_exit(&exit); let ledger_path = get_tmp_ledger_path!(); let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); let cluster_info = Arc::new(ClusterInfo::default()); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100); let bank = Bank::new(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bank1 = bank_forks.read().unwrap().get(1).unwrap().clone(); let bank2 = Bank::new_from_parent(&bank1, &Pubkey::default(), 2); bank_forks.write().unwrap().insert(bank2); let bank2 = bank_forks.read().unwrap().get(2).unwrap().clone(); let bank3 = Bank::new_from_parent(&bank2, &Pubkey::default(), 3); bank_forks.write().unwrap().insert(bank3); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); let subscriptions = Arc::new(RpcSubscriptions::new( &exit, bank_forks.clone(), block_commitment_cache.clone(), optimistically_confirmed_bank.clone(), )); let (meta, _receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, bank_forks.clone(), block_commitment_cache, blockstore, validator_exit, RpcHealth::stub(), cluster_info, Hash::default(), Arc::new(tokio::runtime::Runtime::new().unwrap()), None, optimistically_confirmed_bank.clone(), Arc::new(RwLock::new(LargestAccountsCache::new(30))), Arc::new(MaxSlots::default()), Arc::new(LeaderScheduleCache::default()), Arc::new(AtomicU64::default()), ); let mut io = MetaIoHandler::default(); io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); io.extend_with(rpc_full::FullImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment":"confirmed"}]}"#; let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 0); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(2), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test rollback does not appear to happen, even if slots are notified out of order OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(1), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test bank will only be cached when frozen OptimisticallyConfirmedBankTracker::process_notification( BankNotification::OptimisticallyConfirmed(3), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(&req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); // Test freezing an optimistically confirmed bank will update cache let bank3 = bank_forks.read().unwrap().get(3).unwrap().clone(); OptimisticallyConfirmedBankTracker::process_notification( BankNotification::Frozen(bank3), &bank_forks, &optimistically_confirmed_bank, &subscriptions, &mut pending_optimistically_confirmed_banks, ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; let res = io.handle_request_sync(&req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 3); } #[test] fn test_worst_case_encoded_tx_goldens() { let ff_tx = vec![0xffu8; PACKET_DATA_SIZE]; let tx58 = bs58::encode(&ff_tx).into_string(); assert_eq!(tx58.len(), WORST_CASE_BASE58_TX); let tx64 = base64::encode(&ff_tx); assert_eq!(tx64.len(), WORST_CASE_BASE64_TX); } #[test] fn test_deserialize_transaction_too_large_payloads_fail() { // +2 because +1 still fits in base64 encoded worst-case let too_big = PACKET_DATA_SIZE + 2; let tx_ser = vec![0xffu8; too_big]; let tx58 = bs58::encode(&tx_ser).into_string(); let tx58_len = tx58.len(); let expect58 = Error::invalid_params(format!( "encoded transaction too large: {} bytes (max: encoded/raw {}/{})", tx58_len, WORST_CASE_BASE58_TX, PACKET_DATA_SIZE, )); assert_eq!( deserialize_transaction(tx58, UiTransactionEncoding::Base58).unwrap_err(), expect58 ); let tx64 = base64::encode(&tx_ser); let tx64_len = tx64.len(); let expect64 = Error::invalid_params(format!( "encoded transaction too large: {} bytes (max: encoded/raw {}/{})", tx64_len, WORST_CASE_BASE64_TX, PACKET_DATA_SIZE, )); assert_eq!( deserialize_transaction(tx64, UiTransactionEncoding::Base64).unwrap_err(), expect64 ); let too_big = PACKET_DATA_SIZE + 1; let tx_ser = vec![0x00u8; too_big]; let tx58 = bs58::encode(&tx_ser).into_string(); let expect = Error::invalid_params(format!( "transaction too large: {} bytes (max: {} bytes)", too_big, PACKET_DATA_SIZE )); assert_eq!( deserialize_transaction(tx58, UiTransactionEncoding::Base58).unwrap_err(), expect ); let tx64 = base64::encode(&tx_ser); assert_eq!( deserialize_transaction(tx64, UiTransactionEncoding::Base64).unwrap_err(), expect ); } #[test] fn test_deserialize_transaction_unsanitary() { let unsanitary_tx58 = "ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\ FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\ pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\ hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK" .to_string(); let expect58 = Error::invalid_params("invalid transaction: index out of bounds".to_string()); assert_eq!( deserialize_transaction(unsanitary_tx58, UiTransactionEncoding::Base58).unwrap_err(), expect58 ); } }
{ debug!("get_epoch_schedule rpc request received"); Ok(meta.get_epoch_schedule()) }
mpisupport.py
""" OpenMPI support wrapper """ import threading import queue import logging import time import resource logger = logging.getLogger() try: from mpi4py import MPI except ImportError: logger.warning("MPI support unavailable") def is_parent(): return MPI.COMM_WORLD.Get_rank() == 0 CHILD_RETRY_HELLO = 60 class MpiParent(object): mpicomm = None mpi_queue = queue.Queue() mpi_child_threads = [] mpi_child_status = {} mpi_child_meminfo = {} mpi_child_timeout = 3600 mpi_child_ready_timeout = 30 mpi_parent_status = "" # WARNING - this operates as a singleton class - always using the # latest instance created. latest_instance = None def __init__(self): logger.debug("Initialising MpiParent") self.__class__.latest_instance = self self.mpi_run() @classmethod def mpi_wait(cls, *, stop=True): """ Wait for all work to be done, then tell things to stop. Make sure you've put things in the queue before calling this... or it will all just exit and move on. """ if cls.mpicomm is None: # We haven't launched any MPI workers - we need to launch the local # management threads, so that the remote MPI processes will quit. cls._mpi_init() # When the queue is done, we can continue. cls.update_parent_stats("Waiting for work to finish") # This waits for an empty queue AND task_done to have been called # for each item. cls.mpi_queue.join() if not stop: # Nothing more to do for now return cls.update_parent_stats("Telling children to exit") for _ in cls.mpi_child_threads: cls.mpi_queue.put(None) # Clean up the threads, in case we run out cls.update_parent_stats("Waiting for threads to exit") running_threads = [t for t in cls.mpi_child_threads] while running_threads: t = running_threads.pop(0) t.join(0.1) if t.is_alive(): running_threads.append(t) else: cls.update_parent_stats("Thread {} joined - waiting for {} more" .format(t, len(running_threads))) # Set the list as empty, so it'll be re-made if more work is required. cls.mpi_child_threads = [] cls.update_parent_stats("Work done") # We need to let threads, remote MPI processes etc. all clean up # properly - and a second seems to be ample time for this. time.sleep(1) @classmethod def show_stats(cls): child_stats = '\n\t'.join(['{} ({}): {}'.format(k, cls.mpi_child_meminfo.get(k, "-"), cls.mpi_child_status[k]) for k in sorted(cls.mpi_child_status.keys())]) logger.info("Status:\n\tParent: %s\n\tQueue: %s\n\t%s", cls.mpi_parent_status, cls.mpi_queue.qsize(), child_stats) @classmethod def update_parent_stats(cls, msg): logger.debug(msg) cls.mpi_parent_status = msg @classmethod def mpi_manage_child(cls, child): """ Manage communications with the specified MPI child """ logger.info("Child manager {} starting".format(child))
if meminfo: cls.mpi_child_meminfo[child] = meminfo logger.debug("Child {}: {}".format(child, status)) waiting_for_results = False while True: # Wait for the child to be ready start = time.time() abort = False while not cls.mpicomm.Iprobe(source=child): time.sleep(0.1) if time.time() - start > cls.mpi_child_ready_timeout: logger.error("Child {} took too long to be ready. Aborting.".format(child)) stat(child, "child not ready") abort = True break if not abort: ready = cls.mpicomm.recv(source=child) if ready is not True: stat(child, "unexpected response ({}...)".format(str(ready[:30]))) abort = True if abort: time.sleep(5) continue # wait for something to do stat(child, "waiting for queue") args = cls.mpi_queue.get() # send it to the remote child stat(child, "sending data to child") cls.mpicomm.send(args, dest=child) if args is None: # That's the call to quit stat(child, "quitting") return stat(child, "waiting for results ({})".format(args)) # get the results back start = time.time() while True: while not cls.mpicomm.Iprobe(source=child): time.sleep(1) if time.time() - start > cls.mpi_child_timeout: logger.error("Child {} took too long to return. Aborting.".format(child)) stat(child, "timeout - task returned to the queue") # Put it back on the queue for someone else to do cls.mpi_queue.put(args) cls.mpi_queue.task_done() time.sleep(5) return data = cls.mpicomm.recv(source=child) if data is True: # This is just a "hello" stat(child, "recv hello ({})".format(time.ctime())) continue # This must be real data back... break ret, meminfo = data stat(child, "sent results back", meminfo) # process the result by handing it to the latest_instance's # mpi_handle_result method. cls.latest_instance.mpi_handle_result(args, ret) cls.mpi_queue.task_done() stat(child, "task done") def mpi_handle_result(self, args, ret): """ Handle an MPI result @param args: original args sent to the child @param ret: response from the child """ raise NotImplemented @classmethod def mpi_run(cls): """ Top-level MPI parent method. """ return cls._mpi_init() @classmethod def _mpi_init(cls): """ Start up the MPI management threads etc. """ cls.mpicomm = MPI.COMM_WORLD rank = cls.mpicomm.Get_rank() assert rank == 0 # parent if cls.mpi_child_threads: logger.debug("We've already got child processes - so just using them") return logger.info("MPI-enabled version with {} processors available" .format(cls.mpicomm.size)) assert cls.mpicomm.size > 1, "Please run this under MPI with more than one processor" for child in range(1, cls.mpicomm.size): t = threading.Thread(target=cls.mpi_manage_child, args=(child,), daemon=True) t.start() cls.mpi_child_threads.append(t) t = threading.Thread(target=cls.stats_thread, daemon=True) t.start() @classmethod def stats_thread(cls): while True: cls.show_stats() time.sleep(60) def mpi_child(fn): """ An MPI child wrapper that will call the supplied function in a child context - reading its arguments from mpicomm.recv(source=0). """ rank = MPI.COMM_WORLD.Get_rank() logger.debug("Child {} (remote) starting".format(rank)) while True: # A little sleep to let everything start... time.sleep(3) # Send ready logger.debug("Child {} (remote) sending hello".format(rank)) try: MPI.COMM_WORLD.send(True, dest=0) except Exception: # Sometimes we see messages like this: # [bb2a3c26][[4455,1],95][btl_tcp_endpoint.c:818:mca_btl_tcp_endpoint_complete_connect] connect() to 169.254.95.120 failed: Connection refused (111) # That seems to kill the process... and we're lost. logger.warning("Error saying hello", exc_info=True) time.sleep(5) continue else: logger.debug("Child {} (remote) sent hello".format(rank)) start = time.time() retry = False # child - wait to be given a data structure while not MPI.COMM_WORLD.Iprobe(source=0): if time.time() - start > CHILD_RETRY_HELLO: retry = True break time.sleep(1) if retry: logger.debug("Child {} (remote) heard nothing from parent - will send another hello".format(rank)) continue try: args = MPI.COMM_WORLD.recv(source=0) except EOFError: logger.exception("Child {} error receiving instructions - carrying on".format(rank)) continue if args is None: logger.info("Child {} (remote) exiting - no args received".format(rank)) break logger.debug("Child {} (remote) received data".format(rank)) ret = fn(*args) mem_raw = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss mem_size = resource.getpagesize() mem_bytes = mem_raw * mem_size meminfo = "{:.2f} MB".format(mem_bytes / 1024 ** 2) if ret is None: # Nothing was generated MPI.COMM_WORLD.send((None, meminfo), dest=0) logger.info("Child {} (remote) aborted job".format(rank)) else: logger.debug("Child {} (remote) sending results back".format(rank)) MPI.COMM_WORLD.send((ret, meminfo), dest=0) logger.debug("Child {} (remote) completed job".format(rank)) # Show leaking objects... uncomment this to track them... # tracker.print_diff()
def stat(child, status, meminfo=None): cls.mpi_child_status[child] = "[{}]: {}".format(time.ctime(), status)
controller_utils.go
/* Copyright 2020 The Rook Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controller import ( "context" "fmt" "reflect" "time" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "github.com/rook/rook/pkg/clusterd" "github.com/rook/rook/pkg/operator/k8sutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // OperatorSettingConfigMapName refers to ConfigMap that configures rook ceph operator const OperatorSettingConfigMapName string = "rook-ceph-operator-config" var ( // ImmediateRetryResult Return this for a immediate retry of the reconciliation loop with the same request object. ImmediateRetryResult = reconcile.Result{Requeue: true} // WaitForRequeueIfCephClusterNotReady waits for the CephCluster to be ready WaitForRequeueIfCephClusterNotReady = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} // WaitForRequeueIfFinalizerBlocked waits for resources to be cleaned up before the finalizer can be removed WaitForRequeueIfFinalizerBlocked = reconcile.Result{Requeue: true, RequeueAfter: 10 * time.Second} // OperatorCephBaseImageVersion is the ceph version in the operator image OperatorCephBaseImageVersion string ) // IsReadyToReconcile determines if a controller is ready to reconcile or not func
(c client.Client, clustercontext *clusterd.Context, namespacedName types.NamespacedName, controllerName string) (cephv1.CephCluster, bool, bool, reconcile.Result) { cephClusterExists := false // Running ceph commands won't work and the controller will keep re-queuing so I believe it's fine not to check // Make sure a CephCluster exists before doing anything var cephCluster cephv1.CephCluster clusterList := &cephv1.CephClusterList{} err := c.List(context.TODO(), clusterList, client.InNamespace(namespacedName.Namespace)) if err != nil { logger.Errorf("%q: failed to fetch CephCluster %v", controllerName, err) return cephCluster, false, cephClusterExists, ImmediateRetryResult } if len(clusterList.Items) == 0 { logger.Debugf("%q: no CephCluster resource found in namespace %q", controllerName, namespacedName.Namespace) return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady } cephClusterExists = true cephCluster = clusterList.Items[0] logger.Debugf("%q: CephCluster resource %q found in namespace %q", controllerName, cephCluster.Name, namespacedName.Namespace) // read the CR status of the cluster if cephCluster.Status.CephStatus != nil { if cephCluster.Status.CephStatus.Health == "HEALTH_OK" || cephCluster.Status.CephStatus.Health == "HEALTH_WARN" { logger.Debugf("%q: ceph status is %q, operator is ready to run ceph command, reconciling", controllerName, cephCluster.Status.CephStatus.Health) return cephCluster, true, cephClusterExists, WaitForRequeueIfCephClusterNotReady } logger.Infof("%s: CephCluster %q found but skipping reconcile since ceph health is %q", controllerName, cephCluster.Name, cephCluster.Status.CephStatus) } return cephCluster, false, cephClusterExists, WaitForRequeueIfCephClusterNotReady } // ClusterOwnerRef represents the owner reference of the CephCluster CR func ClusterOwnerRef(clusterName, clusterID string) metav1.OwnerReference { blockOwner := true return metav1.OwnerReference{ APIVersion: fmt.Sprintf("%s/%s", ClusterResource.Group, ClusterResource.Version), Kind: ClusterResource.Kind, Name: clusterName, UID: types.UID(clusterID), BlockOwnerDeletion: &blockOwner, } } // ClusterResource operator-kit Custom Resource Definition var ClusterResource = k8sutil.CustomResource{ Name: "cephcluster", Plural: "cephclusters", Group: cephv1.CustomResourceGroup, Version: cephv1.Version, Kind: reflect.TypeOf(cephv1.CephCluster{}).Name(), APIVersion: fmt.Sprintf("%s/%s", cephv1.CustomResourceGroup, cephv1.Version), }
IsReadyToReconcile
olive.rs
use druid::Color; pub const EBONY: Color = Color::rgb8(85,93, 80); pub const BLACK_OLIVE: Color = Color::rgb8(59, 60, 54); pub const OLIVE: Color = Color::rgb8(128, 128, 0); pub const ARMY_GREEN: Color = Color::rgb8(75, 83, 32);
pub const FELDGRAU_YELLOW: Color = Color::rgb8(92, 92, 61);
test_models.py
from django.test import TestCase, Client from django.contrib.auth import get_user_model class ModelTests(TestCase):
class UserModelTests(TestCase): ''' Test whether the user characteristics are saved well ''' def setUp(self): self.client = Client() self.client.force_login(self.admin_user) self.user = get_user_model().objects.create_user( email = '[email protected]', username = 'Test username' ) user.set_password(password)
def test_create_user_with_email_successful(self): ''' Test that creating a user with an email is successful ''' email = '[email protected]' password = '456@3' username = 'test1' user = get_user_model().objects.create_user( email = email, username = username ) user.set_password(password) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password)) def test_user_email_is_normalised(self): ''' Test that user email used to sign in is normalized ''' email = '[email protected]' user = get_user_model().objects.create_user(email, 'test123') self.assertEqual(user.email, email.lower()) def test_create_user_invalid_email(self): ''' Test creating user with no email raises an error ''' with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'test123') def test_create_new_super_user(self): '''Test creating a superuser''' user = get_user_model().objects.create_superuser( '[email protected]', 'test123' ) self.assertTrue(user.is_superuser) # is_superuser is added by PermissionsMixin self.assertTrue(user.is_staff)
xpi.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os import zipfile import simplejson as json from cuddlefish.util import filter_filenames, filter_dirnames class HarnessOptionAlreadyDefinedError(Exception): """You cannot use --harness-option on keys that already exist in harness-options.json""" ZIPSEP = "/" # always use "/" in zipfiles def make_zipfile_path(localroot, localpath): return ZIPSEP.join(localpath[len(localroot)+1:].split(os.sep)) def mkzipdir(zf, path): dirinfo = zipfile.ZipInfo(path) dirinfo.external_attr = int("040755", 8) << 16L zf.writestr(dirinfo, "") def build_xpi(template_root_dir, manifest, xpi_path, harness_options, limit_to=None, extra_harness_options={}, bundle_sdk=True, pkgdir=""):
IGNORED_FILES = [".hgignore", ".DS_Store", "install.rdf", "application.ini", xpi_path] files_to_copy = {} # maps zipfile path to local-disk abspath dirs_to_create = set() # zipfile paths, no trailing slash zf = zipfile.ZipFile(xpi_path, "w", zipfile.ZIP_DEFLATED) open('.install.rdf', 'w').write(str(manifest)) zf.write('.install.rdf', 'install.rdf') os.remove('.install.rdf') # Handle add-on icon if 'icon' in harness_options: zf.write(str(harness_options['icon']), 'icon.png') del harness_options['icon'] if 'icon64' in harness_options: zf.write(str(harness_options['icon64']), 'icon64.png') del harness_options['icon64'] # chrome.manifest if os.path.isfile(os.path.join(pkgdir, 'chrome.manifest')): files_to_copy['chrome.manifest'] = os.path.join(pkgdir, 'chrome.manifest') # chrome folder (would contain content, skin, and locale folders typically) folder = 'chrome' if os.path.exists(os.path.join(pkgdir, folder)): dirs_to_create.add('chrome') # cp -r folder abs_dirname = os.path.join(pkgdir, folder) for dirpath, dirnames, filenames in os.walk(abs_dirname): goodfiles = list(filter_filenames(filenames, IGNORED_FILES)) dirnames[:] = filter_dirnames(dirnames) for dirname in dirnames: arcpath = make_zipfile_path(template_root_dir, os.path.join(dirpath, dirname)) dirs_to_create.add(arcpath) for filename in goodfiles: abspath = os.path.join(dirpath, filename) arcpath = ZIPSEP.join( [folder, make_zipfile_path(abs_dirname, os.path.join(dirpath, filename)), ]) files_to_copy[str(arcpath)] = str(abspath) # Handle simple-prefs if 'preferences' in harness_options: from options_xul import parse_options, validate_prefs validate_prefs(harness_options["preferences"]) opts_xul = parse_options(harness_options["preferences"], harness_options["jetpackID"], harness_options["preferencesBranch"]) open('.options.xul', 'wb').write(opts_xul.encode("utf-8")) zf.write('.options.xul', 'options.xul') os.remove('.options.xul') from options_defaults import parse_options_defaults prefs_js = parse_options_defaults(harness_options["preferences"], harness_options["preferencesBranch"]) open('.prefs.js', 'wb').write(prefs_js.encode("utf-8")) else: open('.prefs.js', 'wb').write("") zf.write('.prefs.js', 'defaults/preferences/prefs.js') os.remove('.prefs.js') for dirpath, dirnames, filenames in os.walk(template_root_dir): filenames = list(filter_filenames(filenames, IGNORED_FILES)) dirnames[:] = filter_dirnames(dirnames) for dirname in dirnames: arcpath = make_zipfile_path(template_root_dir, os.path.join(dirpath, dirname)) dirs_to_create.add(arcpath) for filename in filenames: abspath = os.path.join(dirpath, filename) arcpath = make_zipfile_path(template_root_dir, abspath) files_to_copy[arcpath] = abspath # `packages` attribute contains a dictionnary of dictionnary # of all packages sections directories for packageName in harness_options['packages']: base_arcpath = ZIPSEP.join(['resources', packageName]) # Eventually strip sdk files. We need to do that in addition to the # whilelist as the whitelist is only used for `cfx xpi`: if not bundle_sdk and packageName == 'addon-sdk': continue # Always write the top directory, even if it contains no files, since # the harness will try to access it. dirs_to_create.add(base_arcpath) for sectionName in harness_options['packages'][packageName]: abs_dirname = harness_options['packages'][packageName][sectionName] base_arcpath = ZIPSEP.join(['resources', packageName, sectionName]) # Always write the top directory, even if it contains no files, since # the harness will try to access it. dirs_to_create.add(base_arcpath) # cp -r stuff from abs_dirname/ into ZIP/resources/RESOURCEBASE/ for dirpath, dirnames, filenames in os.walk(abs_dirname): goodfiles = list(filter_filenames(filenames, IGNORED_FILES)) dirnames[:] = filter_dirnames(dirnames) for filename in goodfiles: abspath = os.path.join(dirpath, filename) if limit_to is not None and abspath not in limit_to: continue # strip unused files arcpath = ZIPSEP.join( ['resources', packageName, sectionName, make_zipfile_path(abs_dirname, os.path.join(dirpath, filename)), ]) files_to_copy[str(arcpath)] = str(abspath) del harness_options['packages'] locales_json_data = {"locales": []} mkzipdir(zf, "locale/") for language in sorted(harness_options['locale']): locales_json_data["locales"].append(language) locale = harness_options['locale'][language] # Be carefull about strings, we need to always ensure working with UTF-8 jsonStr = json.dumps(locale, indent=1, sort_keys=True, ensure_ascii=False) info = zipfile.ZipInfo('locale/' + language + '.json') info.external_attr = 0644 << 16L zf.writestr(info, jsonStr.encode( "utf-8" )) del harness_options['locale'] jsonStr = json.dumps(locales_json_data, ensure_ascii=True) +"\n" info = zipfile.ZipInfo('locales.json') info.external_attr = 0644 << 16L zf.writestr(info, jsonStr.encode("utf-8")) # now figure out which directories we need: all retained files parents for arcpath in files_to_copy: bits = arcpath.split("/") for i in range(1,len(bits)): parentpath = ZIPSEP.join(bits[0:i]) dirs_to_create.add(parentpath) # Create zipfile in alphabetical order, with each directory before its # files for name in sorted(dirs_to_create.union(set(files_to_copy))): if name in dirs_to_create: mkzipdir(zf, name+"/") if name in files_to_copy: zf.write(files_to_copy[name], name) # Add extra harness options harness_options = harness_options.copy() for key,value in extra_harness_options.items(): if key in harness_options: msg = "Can't use --harness-option for existing key '%s'" % key raise HarnessOptionAlreadyDefinedError(msg) harness_options[key] = value # Write harness-options.json open('.options.json', 'w').write(json.dumps(harness_options, indent=1, sort_keys=True)) zf.write('.options.json', 'harness-options.json') os.remove('.options.json') zf.close()
test_orthogonal_projection.py
from __future__ import division import pytest import numpy as np from random import randint from fairml.orthogonal_projection import audit_model from fairml.orthogonal_projection import get_orthogonal_vector from fairml.utils import mse from fairml.utils import accuracy from fairml.utils import detect_feature_sign from fairml.perturbation_strategies import constant_zero # let's define a black-box function def black_box_function(input_data): if not (input_data.shape[1] == weights.shape[0]): raise Exception("problem, misaligned dimensions") output = np.dot(input_data, weights) return output def test_orthogonal_projection(number_of_tries=20, size=10000): """Orthogonal projection function. """ for i in range(number_of_tries): a = np.random.normal(0, 1, size) b = np.random.normal(0, 1, size) c = np.random.binomial(10, 0.1, size) d = np.random.uniform(0, 10, size) # normal-normal check orth_b = get_orthogonal_vector(a, b) assert np.dot(orth_b, a) < 1e-8 # normal- normal check ortho_c = get_orthogonal_vector(a, c) assert np.dot(ortho_c, a) < 1e-8 # normal - uniform check ortho_d = get_orthogonal_vector(a, d) assert np.dot(ortho_d, a) < 1e-8
test_mse = mse(y_true, y_pred) assert test_mse == 0.375 def test_accuracy(): y_pred = [0, 2, 1, 3] y_true = [0, 1, 2, 3] test_acc = accuracy(y_pred, y_true) print(test_acc) assert test_acc == 0.5
def test_mse(): y_true = [3, -0.5, 2, 7] y_pred = [2.5, 0.0, 2, 8]
sample_go_test.go
package sample import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Sample", func() { Describe("Calculating square root", func() { Context("of the number 4", func() { It("should result 2", func() { Setup() Expect(GetResult()).To(Equal(2.0)) })
})
}) })
jodit.ts
/*! * Jodit Editor (https://xdsoft.net/jodit/) * Released under MIT see LICENSE.txt in the project root for license information. * Copyright (c) 2013-2022 Valeriy Chupurnov. All rights reserved. https://xdsoft.net */ /** * [[include:README.md]] * @packageDocumentation * @module jodit */ import type { CustomCommand, ExecCommandCallback, IDictionary, IPluginSystem, IStatusBar, IViewOptions, IWorkPlace, MarkerInfo, Modes, IFileBrowser, IJodit, IUploader, ICreate, IFileBrowserCallBackData, IStorage, CanPromise, IObserver } from './types'; import { Config } from './config'; import * as consts from './core/constants'; import { Create, Dom, Observer, Plugin, Select, StatusBar, STATUSES } from './modules/'; import { asArray, css, isPromise, normalizeKeyAliases, error, isString, attr, isFunction, resolveElement, isVoid, callPromise, toArray, markAsAtomic, ConfigProto, kebabCase } from './core/helpers/'; import { Storage } from './core/storage/'; import { ViewWithToolbar } from './core/view/view-with-toolbar'; import { instances, pluginSystem, modules, lang } from './core/global'; import { autobind, cache } from './core/decorators'; /** * Class Jodit. Main class */ export class Jodit extends ViewWithToolbar implements IJodit { /** @override */ className(): string { return 'Jodit'; } /** * Return promise for ready actions * @example * ```js * const jodit = Jodit.make('#editor'); * await jodit.waitForReady(); * jodit.e.fire('someAsyncLoadedPluginEvent', (test) => { * alert(test); * }); * ``` */ waitForReady(): Promise<IJodit> { if (this.isReady) { return Promise.resolve(this); } return this.async.promise(resolve => { this.hookStatus('ready', () => resolve(this)); }); } /** * Define if object is Jodit */ override readonly isJodit: true = true; /** * Plain text editor's value */ get text(): string { if (this.editor) { return this.editor.innerText || ''; } const div = this.createInside.div(); div.innerHTML = this.getElementValue(); return div.innerText || ''; } /** * Return default timeout period in milliseconds for some debounce or throttle functions. * By default return `{observer.timeout}` options */ override get defaultTimeout(): number { return this.options && this.o.observer ? this.o.observer.timeout : Config.defaultOptions.observer.timeout; } /** * Method wrap usual Has Object in Object helper for prevent deep object merging in options* */ static atom<T>(object: T): T { return markAsAtomic(object); } /** * Factory for creating Jodit instance */ static make(element: HTMLElement | string, options?: object): Jodit { return new Jodit(element, options); } /** * Default settings */ static override get defaultOptions(): Config { return Config.defaultOptions; } static fatMode: boolean = false; static plugins: IPluginSystem = pluginSystem; static modules: IDictionary<Function> = modules; static ns: IDictionary<Function> = modules; static decorators: IDictionary<Function> = {}; static instances: IDictionary<IJodit> = instances; static lang: any = lang; static core = { Plugin }; private __defaultStyleDisplayKey = 'data-jodit-default-style-display'; private __defaultClassesKey = 'data-jodit-default-classes'; private commands: IDictionary<Array<CustomCommand<IJodit>>> = {}; private __selectionLocked: MarkerInfo[] | null = null; private __wasReadOnly = false; /** * Container for set/get value */ override readonly storage!: IStorage; readonly createInside: ICreate = new Create( () => this.ed, this.o.createAttributes ); /** * Editor has focus in this time */ editorIsActive = false; private setPlaceField(field: keyof IWorkPlace, value: any): void { if (!this.currentPlace) { this.currentPlace = {} as any; this.places = [this.currentPlace]; } this.currentPlace[field] = value; } /** * element It contains source element */ get element(): HTMLElement { return this.currentPlace.element; } /** * editor It contains the root element editor */ get editor(): HTMLDivElement | HTMLBodyElement { return this.currentPlace.editor; } set editor(editor: HTMLDivElement | HTMLBodyElement) { this.setPlaceField('editor', editor); } /** * Container for all staff */ override get container(): HTMLDivElement { return this.currentPlace.container; } override set container(container: HTMLDivElement) { this.setPlaceField('container', container); } /** * workplace It contains source and wysiwyg editors */ get workplace(): HTMLDivElement { return this.currentPlace.workplace; } /** * Statusbar module */ get statusbar(): IStatusBar { return this.currentPlace.statusbar; } /** * iframe Iframe for iframe mode */ get iframe(): HTMLIFrameElement | void { return this.currentPlace.iframe; } set iframe(iframe: HTMLIFrameElement | void) { this.setPlaceField('iframe', iframe); } get observer(): IObserver { return this.currentPlace.observer; } /** * In iframe mode editor's window can be different by owner */ get editorWindow(): Window { return this.currentPlace.editorWindow; } set editorWindow(win: Window) { this.setPlaceField('editorWindow', win); } /** * Alias for this.ew */ get ew(): this['editorWindow'] { return this.editorWindow; } /** * In iframe mode editor's window can be different by owner */ get editorDocument(): Document { return this.currentPlace.editorWindow.document; } /** * Alias for this.ew */ get ed(): this['editorDocument'] { return this.editorDocument; } /** * options All Jodit settings default + second arguments of constructor */ override get options(): Config { return this.currentPlace.options as Config; } override set options(opt: Config) { this.setPlaceField('options', opt); } readonly selection: Select; /** * Alias for this.selection */ get s(): this['selection'] { return this.selection; } @cache get uploader(): IUploader { return this.getInstance('Uploader', this.o.uploader); } @cache get filebrowser(): IFileBrowser { const jodit = this; const options = ConfigProto( { defaultTimeout: jodit.defaultTimeout, uploader: jodit.o.uploader, language: jodit.o.language, license: jodit.o.license, theme: jodit.o.theme, defaultCallback(data: IFileBrowserCallBackData): void { if (data.files && data.files.length) { data.files.forEach((file, i) => { const url = data.baseurl + file; const isImage = data.isImages ? data.isImages[i] : false; if (isImage) { jodit.s.insertImage( url, null, jodit.o.imageDefaultWidth ); } else { jodit.s.insertNode( jodit.createInside.fromHTML( `<a href='${url}' title='${url}'>${url}</a>` ) ); } }); } } }, this.o.filebrowser ); return jodit.getInstance<IFileBrowser>('FileBrowser', options); } private __mode: Modes = consts.MODE_WYSIWYG; /** * Editor's mode */ get mode(): Modes { return this.__mode; } set mode(mode: Modes) { this.setMode(mode); } /** * Return real HTML value from WYSIWYG editor. */ getNativeEditorValue(): string { const value: string = this.e.fire('beforeGetNativeEditorValue'); if (isString(value)) { return value; } if (this.editor) { return this.editor.innerHTML; } return this.getElementValue(); } /** * Set value to native editor */ setNativeEditorValue(value: string): void { const data = { value }; if (this.e.fire('beforeSetNativeEditorValue', data)) { return; } if (this.editor) { this.editor.innerHTML = data.value; } } /** * HTML value */ get value(): string { return this.getEditorValue(); } set value(html: string) { this.setEditorValue(html); } /** * Return editor value */ getEditorValue( removeSelectionMarkers: boolean = true, consumer?: string ): string { /** * Triggered before getEditorValue executed. * If returned not undefined getEditorValue will return this value * @example * ```javascript * var editor = new Jodit("#redactor"); * editor.e.on('beforeGetValueFromEditor', function () { * return editor.editor.innerHTML.replace(/a/g, 'b'); * }); * ``` */ let value: string; value = this.e.fire('beforeGetValueFromEditor', consumer); if (value !== undefined) { return value; } value = this.getNativeEditorValue().replace( consts.INVISIBLE_SPACE_REG_EXP(), '' ); if (removeSelectionMarkers) { value = value.replace( /<span[^>]+id="jodit-selection_marker_[^>]+><\/span>/g, '' ); } if (value === '<br>') { value = ''; } /** * Triggered after getEditorValue got value from wysiwyg. * It can change new_value.value * * @example * ```javascript * var editor = new Jodit("#redactor"); * editor.e.on('afterGetValueFromEditor', function (new_value) { * new_value.value = new_value.value.replace('a', 'b'); * }); * ``` */ const new_value: { value: string } = { value }; this.e.fire('afterGetValueFromEditor', new_value, consumer); return new_value.value; } private __callChangeCount = 0; /** * Set editor html value and if set sync fill source element value * When method was called without arguments - it is simple way to synchronize editor to element */ setEditorValue(value?: string): void { /** * Triggered before getEditorValue set value to wysiwyg. * @example * ```javascript * var editor = new Jodit("#redactor"); * editor.e.on('beforeSetValueToEditor', function (old_value) { * return old_value.value.replace('a', 'b'); * }); * editor.e.on('beforeSetValueToEditor', function () { * return false; // disable setEditorValue method * }); * ``` */ const newValue: string | undefined | false = this.e.fire( 'beforeSetValueToEditor', value ); if (newValue === false) { return; } if (isString(newValue)) { value = newValue; } if (!this.editor) { if (value !== undefined) { this.setElementValue(value); } return; // try change value before init or after destruct } if (!isString(value) && !isVoid(value)) { throw error('value must be string'); } if (value !== undefined && this.getNativeEditorValue() !== value) { this.setNativeEditorValue(value); } this.e.fire('postProcessSetEditorValue'); const old_value = this.getElementValue(), new_value = this.getEditorValue(); if ( !this.isSilentChange && old_value !== new_value && this.__callChangeCount < consts.SAFE_COUNT_CHANGE_CALL ) { this.setElementValue(new_value); this.__callChangeCount += 1; try { this.observer.upTick(); this.e.fire('change', new_value, old_value); this.e.fire(this.observer, 'change', new_value, old_value); } finally { this.__callChangeCount = 0; } } } /** * Return source element value */ getElementValue(): string { return (this.element as HTMLInputElement).value !== undefined ? (this.element as HTMLInputElement).value : this.element.innerHTML; } /** * Set source element value and if set sync fill editor value * When method was called without arguments - it is simple way to synchronize element to editor */ setElementValue(value?: string): void { if (!isString(value) && value !== undefined) { throw error('value must be string'); } if (value !== undefined) { if (this.element !== this.container) { if ((this.element as HTMLInputElement).value !== undefined) { (this.element as HTMLInputElement).value = value; } else { this.element.innerHTML = value; } } } else { value = this.getElementValue(); } if (value !== this.getEditorValue()) { this.setEditorValue(value); } } /** * Register custom handler for command * * @example * ```javascript * var jodit = new Jodit('#editor); * * jodit.setEditorValue('test test test'); * * jodit.registerCommand('replaceString', function (command, needle, replace) { * var value = this.getEditorValue(); * this.setEditorValue(value.replace(needle, replace)); * return false; // stop execute native command * }); * * jodit.execCommand('replaceString', 'test', 'stop'); * * console.log(jodit.value); // stop test test * * // and you can add hotkeys for command * jodit.registerCommand('replaceString', { * hotkeys: 'ctrl+r', * exec: function (command, needle, replace) { * var value = this.getEditorValue(); * this.setEditorValue(value.replace(needle, replace)); * } * }); * * ``` */ registerCommand( commandNameOriginal: string, command: CustomCommand<IJodit>, options?: { stopPropagation: boolean; } ): IJodit { const commandName: string = commandNameOriginal.toLowerCase(); if (this.commands[commandName] === undefined) { this.commands[commandName] = []; } this.commands[commandName].push(command); if (!isFunction(command)) { const hotkeys: string | string[] | void = this.o.commandToHotkeys[commandName] || this.o.commandToHotkeys[commandNameOriginal] || command.hotkeys; if (hotkeys) { this.registerHotkeyToCommand( hotkeys, commandName, options?.stopPropagation ); } } return this; } /** * Register hotkey for command */ registerHotkeyToCommand( hotkeys: string | string[], commandName: string, shouldStop: boolean = true ): void { const shortcuts: string = asArray(hotkeys) .map(normalizeKeyAliases) .map(hotkey => hotkey + '.hotkey') .join(' '); this.e .off(shortcuts) .on(shortcuts, (type: string, stop: { shouldStop: boolean }) => { stop.shouldStop = shouldStop ?? true; return this.execCommand(commandName); // because need `beforeCommand` }); } /** * Execute command editor * * @param command - command. It supports all the * {@link https://developer.mozilla.org/ru/docs/Web/API/Document/execCommand#commands} and a number of its own * for example applyStyleProperty. Comand fontSize receives the second parameter px, * formatBlock and can take several options * @example * ```javascript * this.execCommand('fontSize', 12); // sets the size of 12 px * this.execCommand('underline'); * this.execCommand('formatBlock', 'p'); // will be inserted paragraph * ``` */ execCommand( command: string, showUI: boolean = false, value: null | any = null ): void { if (this.o.readonly && command !== 'selectall') { return; } let result: any; command = command.toLowerCase(); /** * Called before any command * @param command - Command name in lowercase * @param second - The second parameter for the command * @param third - The third option is for the team * @example * ```javascript * parent.e.on('beforeCommand', function (command) { * if (command === 'justifyCenter') { * var p = parent.c.element('p') * parent.s.insertNode(p) * parent.s.setCursorIn(p); * p.style.textAlign = 'justyfy'; * return false; // break execute native command * } * }) * ``` */ result = this.e.fire('beforeCommand', command, showUI, value); if (result !== false) { result = this.execCustomCommands(command, showUI, value); } if (result !== false) { this.s.focus(); if (command === 'selectall') { this.s.select(this.editor, true); } else { try { result = this.nativeExecCommand(command, showUI, value); } catch (e) { if (!isProd) { throw e; } } } } /** * It called after any command * @param command - name command * @param second - The second parameter for the command * @param third - The third option is for the team */ this.e.fire('afterCommand', command, showUI, value); this.setEditorValue(); // synchrony return result; } /** * Don't raise a change event */ private isSilentChange: boolean = false; /** * Exec native command */ nativeExecCommand( command: string, showUI: boolean = false, value: null | any = null ): boolean { this.isSilentChange = true; try { return this.ed.execCommand(command, showUI, value); } finally { this.isSilentChange = false; } } private execCustomCommands( commandName: string, second: any = false, third: null | any = null ): false | void { commandName = commandName.toLowerCase(); if (this.commands[commandName] !== undefined) { let result: any; const exec = (command: CustomCommand<Jodit>) => { let callback: ExecCommandCallback<Jodit>; if (isFunction(command)) { callback = command; } else { callback = command.exec; } const resultCurrent: any = (callback as any).call( this, commandName, second, third ); if (resultCurrent !== undefined) { result = resultCurrent; } }; for (let i = 0; i < this.commands[commandName].length; i += 1) { exec(this.commands[commandName][i]); } return result; } } /** * Disable selecting */ override lock(name = 'any'): boolean { if (super.lock(name)) { this.__selectionLocked = this.s.save(); this.s.clear(); this.editor.classList.add('jodit_disabled'); this.e.fire('lock', true); return true; } return false; } /** * Enable selecting */ override unlock(): boolean { if (super.unlock()) { this.editor.classList.remove('jodit_disabled'); if (this.__selectionLocked) { this.s.restore(); } this.e.fire('lock', false); return true; } return false; } /** * Return current editor mode: Jodit.MODE_WYSIWYG, Jodit.MODE_SOURCE or Jodit.MODE_SPLIT */ getMode(): Modes { return this.mode; } isEditorMode(): boolean { return this.getRealMode() === consts.MODE_WYSIWYG; } /** * Return current real work mode. When editor in MODE_SOURCE or MODE_WYSIWYG it will * return them, but then editor in MODE_SPLIT it will return MODE_SOURCE if * Textarea(CodeMirror) focused or MODE_WYSIWYG otherwise * * @example * ```javascript * var editor = new Jodit('#editor'); * console.log(editor.getRealMode()); * ``` */ getRealMode(): Modes { if (this.getMode() !== consts.MODE_SPLIT) { return this.getMode(); } const active = this.od.activeElement; if ( active && (active === this.iframe || Dom.isOrContains(this.editor, active) || Dom.isOrContains(this.toolbar.container, active)) ) { return consts.MODE_WYSIWYG; } return consts.MODE_SOURCE; } /** * Set current mode */ setMode(mode: number | string): void { const oldmode: Modes = this.getMode(); const data = { mode: parseInt(mode.toString(), 10) as Modes }, modeClasses = [ 'jodit-wysiwyg_mode', 'jodit-source__mode', 'jodit_split_mode' ]; /** * Triggered before setMode executed. If returned false method stopped * @param data - PlainObject `{mode: {string}}` In handler you can change data.mode * @example * ```javascript * var editor = new Jodit("#redactor"); * editor.e.on('beforeSetMode', function (data) { * data.mode = Jodit.MODE_SOURCE; // not respond to the mode change. Always make the source code mode * }); * ``` */ if (this.e.fire('beforeSetMode', data) === false) { return; } this.__mode = [ consts.MODE_SOURCE, consts.MODE_WYSIWYG, consts.MODE_SPLIT ].includes(data.mode) ? data.mode : consts.MODE_WYSIWYG; if (this.o.saveModeInStorage) { this.storage.set('jodit_default_mode', this.mode); } modeClasses.forEach(className => { this.container.classList.remove(className); }); this.container.classList.add(modeClasses[this.mode - 1]); /** * Triggered after setMode executed * @example * ```javascript * var editor = new Jodit("#redactor"); * editor.e.on('afterSetMode', function () { * editor.setEditorValue(''); // clear editor's value after change mode * }); * ``` */ if (oldmode !== this.getMode()) { this.e.fire('afterSetMode'); } } /** * Toggle editor mode WYSIWYG to TEXTAREA(CodeMirror) to SPLIT(WYSIWYG and TEXTAREA) to again WYSIWYG * * @example * ```javascript * var editor = new Jodit('#editor'); * editor.toggleMode(); * ``` */ toggleMode(): void { let mode = this.getMode(); if ( [ consts.MODE_SOURCE, consts.MODE_WYSIWYG, this.o.useSplitMode ? consts.MODE_SPLIT : 9 ].includes(mode + 1) ) { mode += 1; } else { mode = consts.MODE_WYSIWYG; } this.setMode(mode); } /** * Switch on/off the editor into the disabled state. * When in disabled, the user is not able to change the editor content * This function firing the `disabled` event. */ setDisabled(isDisabled: boolean): void { this.o.disabled = isDisabled; const readOnly: boolean = this.__wasReadOnly; this.setReadOnly(isDisabled || readOnly); this.__wasReadOnly = readOnly; if (this.editor) { this.editor.setAttribute('aria-disabled', isDisabled.toString()); this.container.classList.toggle('jodit_disabled', isDisabled); this.e.fire('disabled', isDisabled); } } /** * Return true if editor in disabled mode */ getDisabled(): boolean { return this.o.disabled; } /** * Switch on/off the editor into the read-only state. * When in readonly, the user is not able to change the editor content, but can still * use some editor functions (show source code, print content, or seach). * This function firing the `readonly` event. */ setReadOnly(isReadOnly: boolean): void { if (this.__wasReadOnly === isReadOnly) { return; } this.__wasReadOnly = isReadOnly; this.o.readonly = isReadOnly; if (isReadOnly) { this.editor && this.editor.removeAttribute('contenteditable'); } else { this.editor && this.editor.setAttribute('contenteditable', 'true'); } this.e && this.e.fire('readonly', isReadOnly); } /** * Return true if editor in read-only mode */ getReadOnly(): boolean { return this.o.readonly; } /** * Hook before init */ beforeInitHook(): CanPromise<void> { // do nothing } /** * Hook after init */ afterInitHook(): void { // do nothing } /** @override **/ protected override initOptions(options?: object): void { this.options = <Config>( ConfigProto(options || {}, Config.defaultOptions) ); } /** @override **/ protected override initOwners(): void { // in iframe it can be changed this.editorWindow = this.o.ownerWindow; this.ownerWindow = this.o.ownerWindow; } /** * Create instance of Jodit * * @param element - Selector or HTMLElement * @param options - Editor's options */ constructor(element: HTMLElement | string, options?: object) { super(options as IViewOptions, true); try { resolveElement(element, this.o.shadowRoot || this.od); // check element valid } catch (e) { this.destruct(); throw e; } this.setStatus(STATUSES.beforeInit); this.id = attr(resolveElement(element, this.o.shadowRoot || this.od), 'id') || new Date().getTime().toString(); instances[this.id] = this; this.storage = Storage.makeStorage(true, this.id); this.attachEvents(options as IViewOptions); this.e.on(this.ow, 'resize', () => { if (this.e) { this.e.fire('resize'); } }); this.e.on('prepareWYSIWYGEditor', this.prepareWYSIWYGEditor); this.selection = new Select(this); const beforeInitHookResult = this.beforeInitHook(); callPromise(beforeInitHookResult, (): void => { this.e.fire('beforeInit', this); const initPluginsResult = pluginSystem.init(this); callPromise(initPluginsResult, () => { this.e.fire('afterPluginSystemInit', this); this.e.on('changePlace', () => { this.setReadOnly(this.o.readonly); this.setDisabled(this.o.disabled); }); this.places.length = 0; const addPlaceResult = this.addPlace(element, options); instances[this.id] = this; const init = () => { if (this.e) { this.e.fire('afterInit', this); } this.afterInitHook(); this.setStatus(STATUSES.ready); this.e.fire('afterConstructor', this); }; callPromise(addPlaceResult, init); }); }); } currentPlace!: IWorkPlace; places!: IWorkPlace[]; private elementToPlace: Map<HTMLElement, IWorkPlace> = new Map(); /** * Create and init current editable place */ addPlace( source: HTMLElement | string, options?: object ): void | Promise<any> { const element = resolveElement(source, this.o.shadowRoot || this.od); this.attachEvents(options as IViewOptions); if (element.attributes) { toArray(element.attributes).forEach((attr: Attr) => { const name: string = attr.name; let value: string | boolean | number = attr.value; if ( (Config.defaultOptions as any)[name] !== undefined && (!options || (options as any)[name] === undefined) ) { if (['readonly', 'disabled'].indexOf(name) !== -1) { value = value === '' || value === 'true'; } if (/^[0-9]+(\.)?([0-9]+)?$/.test(value.toString())) { value = Number(value); } (this.options as any)[name] = value; } }); } let container = this.c.div('jodit-container'); container.classList.add('jodit'); container.classList.add('jodit-container'); container.classList.add(`jodit_theme_${this.o.theme || 'default'}`); const { styleValues } = this.o; Object.keys(styleValues).forEach(key => { const property = kebabCase(key); container.style.setProperty(`--jd-${property}`, styleValues[key]); }); container.setAttribute('contenteditable', 'false'); let buffer: null | string = null; if (this.o.inline) { if (['TEXTAREA', 'INPUT'].indexOf(element.nodeName) === -1) { container = element as HTMLDivElement; element.setAttribute( this.__defaultClassesKey, element.className.toString() ); buffer = container.innerHTML; container.innerHTML = ''; } container.classList.add('jodit_inline'); container.classList.add('jodit-container'); } // actual for inline mode if (element !== container) { // hide source element if (element.style.display) { element.setAttribute( this.__defaultStyleDisplayKey, element.style.display ); } element.style.display = 'none'; } const workplace = this.c.div('jodit-workplace', { contenteditable: false }); container.appendChild(workplace); const statusbar = new StatusBar(this, container); if (element.parentNode && element !== container) { element.parentNode.insertBefore(container, element); } Object.defineProperty(element, 'component', { enumerable: false, configurable: true, value: this }); const editor = this.c.div('jodit-wysiwyg', { contenteditable: true, 'aria-disabled': false, tabindex: this.o.tabIndex }); workplace.appendChild(editor); const currentPlace: IWorkPlace = { editor, element, container, workplace, statusbar, options: this.isReady ? (ConfigProto( options || {}, Config.defaultOptions ) as IWorkPlace['options']) : this.options, observer: new Observer(this), editorWindow: this.ow }; this.elementToPlace.set(editor, currentPlace); this.setCurrentPlace(currentPlace); this.places.push(currentPlace); this.setNativeEditorValue(this.getElementValue()); // Init value const initResult = this.initEditor(buffer); const opt = this.options; const init = () => { if ( opt.enableDragAndDropFileToEditor && opt.uploader && (opt.uploader.url || opt.uploader.insertImageAsBase64URI) ) { this.uploader.bind(this.editor); } // in initEditor - the editor could change if (!this.elementToPlace.get(this.editor)) { this.elementToPlace.set(this.editor, currentPlace); } this.e.fire('afterAddPlace', currentPlace); }; return callPromise(initResult, init); } /** @override */ protected override addDisclaimer(elm: HTMLElement): void { this.workplace.appendChild(elm); } /** * Set current place object */ setCurrentPlace(place: IWorkPlace): void { if (this.currentPlace === place) { return; } if (!this.isEditorMode()) { this.setMode(consts.MODE_WYSIWYG); } this.currentPlace = place; this.buildToolbar(); if (this.isReady) { this.e.fire('changePlace', place); } } private initEditor(buffer: null | string): void | Promise<any> { const result = this.createEditor(); return callPromise(result, () => { if (this.isInDestruct) { return; } // syncro if (this.element !== this.container) { this.setElementValue(); } else { buffer != null && this.setEditorValue(buffer); // inline mode } let mode = this.o.defaultMode; if (this.o.saveModeInStorage) { const localMode = this.storage.get('jodit_default_mode'); if (typeof localMode === 'string') { mode = parseInt(localMode, 10); } } this.setMode(mode); if (this.o.readonly) { this.__wasReadOnly = false; this.setReadOnly(true); } if (this.o.disabled) { this.setDisabled(true); } // if enter plugin not installed try { this.ed.execCommand( 'defaultParagraphSeparator', false, this.o.enter.toLowerCase() ); } catch {} // fix for native resizing try { this.ed.execCommand('enableObjectResizing', false, 'false'); } catch {} try { this.ed.execCommand('enableInlineTableEditing', false, 'false'); } catch {} }); } /** * Create main DIV element and replace source textarea */ private createEditor(): void | Promise<any> { const defaultEditorArea = this.editor; const stayDefault: boolean | undefined | Promise<void> = this.e.fire( 'createEditor', this ); return callPromise(stayDefault, () => { if (this.isInDestruct) { return; } if (stayDefault === false || isPromise(stayDefault)) { Dom.safeRemove(defaultEditorArea); } if (this.o.editorCssClass) { this.editor.classList.add(this.o.editorCssClass); } if (this.o.style) { css(this.editor, this.o.style); } this.e .on('synchro', () => { this.setEditorValue(); }) .on('focus', () => { this.editorIsActive = true; }) .on('blur', () => (this.editorIsActive = false)); this.prepareWYSIWYGEditor(); // direction if (this.o.direction) { const direction = this.o.direction.toLowerCase() === 'rtl' ? 'rtl' : 'ltr'; this.container.style.direction = direction; this.container.setAttribute('dir', direction); this.toolbar.setDirection(direction); } if (this.o.triggerChangeEvent) { this.e.on( 'change', this.async.debounce(() => { this.e && this.e.fire(this.element, 'change'); }, this.defaultTimeout) ); } }); } /** * Attach some native event listeners */ @autobind private prepareWYSIWYGEditor() { const { editor } = this; if (this.o.spellcheck) { this.editor.setAttribute('spellcheck', 'true'); } else { this.editor.setAttribute('spellcheck', 'false'); } // direction if (this.o.direction) { const direction = this.o.direction.toLowerCase() === 'rtl' ? 'rtl' : 'ltr'; this.editor.style.direction = direction; this.editor.setAttribute('dir', direction); } // proxy events this.e .on(editor, 'mousedown touchstart focus', () => { const place = this.elementToPlace.get(editor); if (place) { this.setCurrentPlace(place); } }) .on(editor, 'compositionend', () => { this.setEditorValue(); }) .on( editor, 'selectionchange selectionstart keydown keyup input keypress dblclick mousedown mouseup ' + 'click copy cut dragstart drop dragover paste resize touchstart touchend focus blur', (event: Event): false | void => { if (this.o.readonly || this.isSilentChange) { return; } const w = this.ew; if ( event instanceof (w as any).KeyboardEvent && (event as KeyboardEvent).isComposing ) { return; } if (this.e && this.e.fire) { if (this.e.fire(event.type, event) === false) { return false; } this.setEditorValue(); } } ); } /** * Jodit's Destructor. Remove editor, and return source input */ override destruct(): void { if (this.isInDestruct) { return; } this.setStatus(STATUSES.beforeDestruct); this.elementToPlace.clear(); if (!this.editor) { return; } const buffer = this.getEditorValue(); this.storage.clear(); this.buffer.clear(); this.commands = {}; this.__selectionLocked = null; this.e.off(this.ow, 'resize'); this.e.off(this.ow); this.e.off(this.od); this.e.off(this.od.body); this.places.forEach( ({ container, workplace, statusbar, element, iframe, editor, observer }) => { if (element !== container) { if (element.hasAttribute(this.__defaultStyleDisplayKey)) { const display = attr( element, this.__defaultStyleDisplayKey ); if (display) { element.style.display = display; element.removeAttribute( this.__defaultStyleDisplayKey ); } } else { element.style.display = ''; } } else { if (element.hasAttribute(this.__defaultClassesKey)) { element.className = attr(element, this.__defaultClassesKey) || ''; element.removeAttribute(this.__defaultClassesKey); } } if (element.hasAttribute('style') && !attr(element, 'style')) { element.removeAttribute('style');
this.e.off(container); this.e.off(element); this.e.off(editor); Dom.safeRemove(workplace); Dom.safeRemove(editor); if (container !== element) { Dom.safeRemove(container); } Object.defineProperty(element, 'component', { enumerable: false, configurable: true, value: null }); Dom.safeRemove(iframe); // inline mode if (container === element) { element.innerHTML = buffer; } !observer.isInDestruct && observer.destruct(); } ); this.places.length = 0; this.currentPlace = {} as any; delete instances[this.id]; super.destruct(); } }
} !statusbar.isInDestruct && statusbar.destruct();
graph.rs
use std::cell::RefCell; use std::collections::BTreeMap; use std::fmt; use serde::{Deserialize, Serialize}; use super::fmt::FmtGuard; use super::variable::{Keywords, Value}; pub type Outs = BTreeMap<String, Out>; #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct OutDim { pub out: Out, pub dim: usize, } impl From<OutDim> for Value { fn from(value: OutDim) -> Self { Self::Dim(value) } } impl fmt::Debug for OutDim { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}[{}]", &self.out, self.dim) } } #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Out { pub id: Option<u64>, pub name: String, } impl Out { pub fn with_name(name: String) -> Self { Self { id: None, name } } pub fn new(id: u64, name: String) -> Self { Self { id: Some(id), name } } } impl fmt::Debug for Out { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.name)?; write!(f, "$")?; if let Some(id) = &self.id { write!(f, "{}", id)?; } Ok(()) } } #[derive(Clone, PartialEq)] pub struct Shape(pub Vec<Value>); impl Shape { pub fn sum(&self) -> Value { self.0.iter().sum() } pub fn product(&self) -> Value { self.0.iter().product() } } impl fmt::Debug for Shape { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for dim in &self.0 { write!(f, "{:?}, ", dim)?; } Ok(()) } } #[derive(Clone, PartialEq)] pub struct Shapes(pub RefCell<ShapesInner>); type ShapesInner = BTreeMap<String, Option<Shape>>; impl Shapes { pub fn new(shapes: ShapesInner) -> Self { Self(RefCell::new(shapes)) } pub fn to_outs(&self, id: u64) -> Outs { self.0 .borrow() .keys() .map(|n| (n.clone(), Out::new(id, n.clone()))) .collect() } } crate::impl_debug_no_guard!(Shapes); impl<'a> fmt::Debug for FmtGuard<'a, Shapes> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let borrowed = self.0.borrow(); if borrowed.len() == 1 { if let Some(Some(shape)) = borrowed.get("x") { return writeln!(f, " = {:?}", shape); } } let indent = self.indent(); writeln!(f, ":")?; for (name, shape) in borrowed.iter() { write!(f, "{}{}", &indent, name)?; match shape { Some(shape) => writeln!(f, " = {:?}", shape)?, None => writeln!(f)?, } writeln!(f)?; } Ok(()) } } #[derive(Clone)] pub enum GraphInputs { Dict(Outs), List(Vec<Out>), } impl Default for GraphInputs { fn default() -> Self { Self::Dict(Outs::default()) } } impl GraphInputs { pub fn ty(&self) -> GraphInputsType { match self { Self::Dict(_) => GraphInputsType::Dict, Self::List(_) => GraphInputsType::List, } } pub fn unwrap_dict(self) -> Option<Outs> { match self { Self::Dict(outs) => Some(outs), _ => None, } } pub fn unwrap_list(self) -> Option<Vec<Out>> { match self { Self::List(outs) => Some(outs), _ => None, } } } impl fmt::Debug for GraphInputs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
} #[derive(Copy, Clone, Debug, PartialEq)] pub enum GraphInputsType { UseLast, Dict, List, } #[derive(Clone)] pub struct GraphCall { pub name: String, pub inputs: Option<GraphInputs>, pub args: Option<Keywords>, pub repeat: Option<Value>, } impl GraphCall { pub fn get_inputs_ty(&self) -> GraphInputsType { match &self.inputs { Some(inputs) => inputs.ty(), None => GraphInputsType::UseLast, } } } impl fmt::Debug for GraphCall { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.name)?; if let Some(inputs) = &self.inputs { inputs.fmt(f)?; } if let Some(args) = &self.args { write!(f, "(")?; for (name, value) in args { write!(f, "{}={:?}, ", name, value)?; } write!(f, ")")?; } if let Some(repeat) = &self.repeat { write!(f, " * {:?}", repeat)?; } Ok(()) } } #[derive(Clone)] pub struct GraphNode { pub id: u64, pub calls: Vec<GraphCall>, pub shapes: Option<Shapes>, } crate::impl_debug_no_guard!(GraphNode); impl<'a> fmt::Debug for FmtGuard<'a, GraphNode> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let indent = self.indent(); write!(f, "{}{}. ", &indent, self.id)?; for value in &self.calls { write!(f, "{:?} + ", value)?; } if let Some(shapes) = &self.shapes { self.child(shapes).fmt(f) } else { writeln!(f) } } }
{ match self { Self::Dict(dict) => { write!(f, "{{")?; for (k, x) in dict { write!(f, "{}={:?}, ", k, x)?; } write!(f, "}}") } Self::List(list) => { write!(f, "[")?; for x in list { write!(f, "{:?}, ", x)?; } write!(f, "]") } } }
config.js
/*global define:false*/ define(['text!plugins/keyvalue/template.html', 'plugins/keyvalue/model', 'text!plugins/keyvalue/setupTemplate.html'], function (keyvaluePluginTemplate, keyvaluePluginModel, setupTemplate) { 'use strict'; return { name : 'keyvaluePlugin', ModelType : keyvaluePluginModel, modelData : { min : 1, max : 1, options : false, label : '', type : 'keyvalue', dataType : 'object', validation : [], value : {'' : ''} }, template : keyvaluePluginTemplate, setupTemplate : setupTemplate, events : { 'blur .keyValueInput' : 'buildObj' }, wrapper: false
}; });
output-plugin.js
'use strict'; const _ = require('underscore'); const changeCase = require('change-case'); class
{ constructor(options) { _.extend(this, options); } save(data) { throw new Error('UNIMPLEMENTED'); } } module.exports = OutputPlugin;
OutputPlugin
onion.py
from veggies import Veggies class Onion(Veggies): def __str__(self):
return 'Onion'
config.rs
use dirs; use clap::{Arg, ArgMatches}; use super::term; use std::path::PathBuf; const APPLICATION_DIRECTORY_NAME: &'static str = "anonify"; const APPLICATION_ENVIRONMENT_ROOT_DIR: &'static str = "ANONIFY_ROOT_DIR"; pub const VERSION: u32 = 1; pub const ITERS: u32 = 1024; /// root directory configuration pub(crate) fn
() -> PathBuf { match dirs::data_local_dir() { Some(dir) => dir.join(APPLICATION_DIRECTORY_NAME), None => panic!("Undefined the local data directory."), } } pub(crate) fn global_rootdir_definition<'a, 'b>(default: &'a PathBuf) -> Arg<'a, 'b> { Arg::with_name("ROOT_DIR") .long("root_dir") .help("the zface root direction") .default_value(default.to_str().unwrap()) .env(APPLICATION_ENVIRONMENT_ROOT_DIR) } pub(crate) fn global_rootdir_match<'a>(default: &'a PathBuf, matches: &ArgMatches<'a>) -> PathBuf { match matches.value_of("ROOT_DIR") { Some(dir) => PathBuf::from(dir), None => PathBuf::from(default), } } // quiet configuration pub(crate) fn global_quiet_definition<'a, 'b>() -> Arg<'a, 'b> { Arg::with_name("QUIET") .long("quiet") .global(true) .help("run the command quietly, do not print anything to the command line output") } pub(crate) fn global_quiet_option(matches: &ArgMatches) -> bool { matches.is_present("QUIET") } // color configuration pub(crate) fn global_color_definition<'a, 'b>() -> Arg<'a, 'b> { Arg::with_name("COLOR") .long("color") .takes_value(true) .default_value("auto") .possible_values(&["auto", "always", "never"]) .global(true) .help("enable output colors or not") } pub(crate) fn global_color_option(matches: &ArgMatches) -> term::ColorChoice { match matches.value_of("COLOR") { None => term::ColorChoice::Auto, Some("auto") => term::ColorChoice::Auto, Some("always") => term::ColorChoice::Always, Some("never") => term::ColorChoice::Never, Some(&_) => unreachable!(), } } // verbosity configuration pub(crate) fn global_verbose_definition<'a, 'b>() -> Arg<'a, 'b> { Arg::with_name("VERBOSITY") .long("verbose") .short("v") .multiple(true) .global(true) .help("set the verbosity mode, multiple occurrences means more verbosity") } pub(crate) fn global_verbose_option<'a>(matches: &ArgMatches<'a>) -> u64 { matches.occurrences_of("VERBOSITY") } pub(crate) fn config_terminal(matches: &ArgMatches) -> term::Config { let quiet = global_quiet_option(matches); let color = global_color_option(matches); let verbosity = global_verbose_option(matches); if !quiet { let log_level = match verbosity { 0 => log::LevelFilter::Warn, 1 => log::LevelFilter::Info, 2 => log::LevelFilter::Debug, _ => log::LevelFilter::Trace, }; env_logger::Builder::from_default_env() .filter_level(log_level) .init(); } term::Config { color, quiet, } }
get_default_root_dir
RedisStore.js
'use strict'; const defaults = require('defaults'); const redis = require('sota-core').load('cache/foundation/RedisCache'); const RedisStore = function (options) { options = defaults(options, { expiry: 60, // default expiry is one minute prefix: "rl:", resetExpiryOnChange: false }); let expiryMs = Math.round(1000 * options.expiry); // create the client if one isn't provided options.client = options.client || redis.getClient(); let setExpire = function (replies, rdskey) { // if this is new or has no expiry
} }; const processReplies = function (replies) { // in ioredis, every reply consists of an array [err, value]. // We don't need the error here, and if we aren't dealing with an array, // nothing is changed. return replies.map(function (val) { if (Array.isArray(val) && val.length >= 2) { return val[1]; } return val; }); }; this.incr = function (key, cb) { let rdskey = options.prefix + key; options.client.multi() .incr(rdskey) .pttl(rdskey) .exec(function (err, replies) { if (err) { return cb(err); } replies = processReplies(replies); setExpire(replies, rdskey); cb(null, replies[0]); }); }; this.decrement = function (key) { let rdskey = options.prefix + key; options.client.multi() .decr(rdskey) .pttl(rdskey) .exec(function (err, replies) { if (err) { return; } replies = processReplies(replies); setExpire(replies, rdskey); }); }; this.resetKey = function (key) { let rdskey = options.prefix + key; options.client.del(rdskey); }; }; module.exports = RedisStore;
if (options.resetExpiryOnChange || replies[0] === 1 || replies[1] === -1) { // then expire it after the timeout options.client.pexpire(rdskey, expiryMs);
trainer_test.py
# pylint: disable=invalid-name import glob import os import re import time import torch import pytest from allennlp.common.testing import AllenNlpTestCase from allennlp.training.trainer import Trainer, sparse_clip_norm, is_sparse from allennlp.data import Vocabulary from allennlp.common.params import Params from allennlp.common.checks import ConfigurationError from allennlp.models.simple_tagger import SimpleTagger from allennlp.data.iterators import BasicIterator from allennlp.data.dataset_readers import SequenceTaggingDatasetReader class TestTrainer(AllenNlpTestCase): def setUp(self): super(TestTrainer, self).setUp() self.instances = SequenceTaggingDatasetReader().read('tests/fixtures/data/sequence_tagging.tsv') vocab = Vocabulary.from_instances(self.instances) self.vocab = vocab self.model_params = Params({ "text_field_embedder": { "tokens": { "type": "embedding", "embedding_dim": 5 } }, "encoder": { "type": "lstm", "input_size": 5, "hidden_size": 7, "num_layers": 2 } }) self.model = SimpleTagger.from_params(self.vocab, self.model_params) self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01) self.iterator = BasicIterator(batch_size=2)
trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) # Making sure that both increasing and decreasing validation metrics work. trainer = Trainer(model=self.model, optimizer=self.optimizer, iterator=self.iterator, train_dataset=self.instances, validation_dataset=self.instances, validation_metric='+loss', num_epochs=2) metrics = trainer.train() assert 'best_validation_loss' in metrics assert isinstance(metrics['best_validation_loss'], float) assert 'best_epoch' in metrics assert isinstance(metrics['best_epoch'], int) @pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.") def test_trainer_can_run_cuda(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, cuda_device=0) trainer.train() @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need multiple GPUs.") def test_trainer_can_run_multiple_gpu(self): multigpu_iterator = BasicIterator(batch_size=4) multigpu_iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, multigpu_iterator, self.instances, num_epochs=2, cuda_device=[0, 1]) trainer.train() def test_trainer_can_resume_training(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=1, serialization_dir=self.TEST_DIR) trainer.train() new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR) epoch, val_metrics_per_epoch = new_trainer._restore_checkpoint() # pylint: disable=protected-access assert epoch == 1 assert len(val_metrics_per_epoch) == 1 assert isinstance(val_metrics_per_epoch[0], float) assert val_metrics_per_epoch[0] != 0. new_trainer.train() def test_should_stop_early_with_increasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="+test") assert new_trainer._should_stop_early([.5, .3, .2, .1, .4, .4]) # pylint: disable=protected-access assert not new_trainer._should_stop_early([.3, .3, .3, .2, .5, .1]) # pylint: disable=protected-access def test_should_stop_early_with_decreasing_metric(self): new_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, validation_dataset=self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, patience=5, validation_metric="-test") assert new_trainer._should_stop_early([.02, .3, .2, .1, .4, .4]) # pylint: disable=protected-access assert not new_trainer._should_stop_early([.3, .3, .2, .1, .4, .5]) # pylint: disable=protected-access def test_train_driver_raises_on_model_with_no_loss_key(self): class FakeModel(torch.nn.Module): def forward(self, **kwargs): # pylint: disable=arguments-differ,unused-argument return {} with pytest.raises(ConfigurationError): trainer = Trainer(FakeModel(), self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR) trainer.train() def test_trainer_can_log_histograms(self): # enable activation logging for module in self.model.modules(): module.should_log_activations = True trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=3, serialization_dir=self.TEST_DIR, histogram_interval=2) trainer.train() def test_trainer_respects_num_serialized_models_to_keep(self): trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=5, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=3) trainer.train() # Now check the serialized files for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] assert sorted(epochs) == [2, 3, 4] def test_trainer_respects_keep_serialized_model_every_num_seconds(self): # To test: # Create an iterator that sleeps for 0.5 second per epoch, so the total training # time for one epoch is slightly greater then 0.5 seconds. # Run for 6 epochs, keeping the last 2 models, models also kept every 1 second. # Check the resulting checkpoints. Should then have models at epochs # 2, 4, plus the last two at 5 and 6. class WaitingIterator(BasicIterator): # pylint: disable=arguments-differ def _create_batches(self, *args, **kwargs): time.sleep(0.5) return super(WaitingIterator, self)._create_batches(*args, **kwargs) iterator = WaitingIterator(batch_size=2) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=6, serialization_dir=self.TEST_DIR, num_serialized_models_to_keep=2, keep_serialized_model_every_num_seconds=1) trainer.train() # Now check the serialized files for prefix in ['model_state_epoch_*', 'training_state_epoch_*']: file_names = glob.glob(os.path.join(self.TEST_DIR, prefix)) epochs = [int(re.search(r"_([0-9])\.th", fname).group(1)) for fname in file_names] # epoch N has N-1 in file name assert sorted(epochs) == [1, 3, 4, 5] def test_trainer_saves_models_at_specified_interval(self): iterator = BasicIterator(batch_size=4) iterator.index_with(self.vocab) trainer = Trainer(self.model, self.optimizer, iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) trainer.train() # Now check the serialized files for models saved during the epoch. prefix = 'model_state_epoch_*' file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix))) epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1) for fname in file_names] # We should have checkpoints at the end of each epoch and during each, e.g. # [0.timestamp, 0, 1.timestamp, 1] assert len(epochs) == 4 assert epochs[3] == '1' assert '.' in epochs[0] # Now make certain we can restore from timestamped checkpoint. # To do so, remove the checkpoint from the end of epoch 1&2, so # that we are forced to restore from the timestamped checkpoints. for k in range(2): os.remove(os.path.join(self.TEST_DIR, 'model_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'training_state_epoch_{}.th'.format(k))) os.remove(os.path.join(self.TEST_DIR, 'best.th')) restore_trainer = Trainer(self.model, self.optimizer, self.iterator, self.instances, num_epochs=2, serialization_dir=self.TEST_DIR, model_save_interval=0.0001) epoch, _ = restore_trainer._restore_checkpoint() # pylint: disable=protected-access assert epoch == 2 # One batch per epoch. assert restore_trainer._batch_num_total == 2 # pylint: disable=protected-access class TestSparseClipGrad(AllenNlpTestCase): def test_sparse_clip_grad(self): # create a sparse embedding layer, then take gradient embedding = torch.nn.Embedding(100, 16, sparse=True) embedding.zero_grad() ids = torch.autograd.Variable((torch.rand(17) * 100).long()) # Set some of the ids to the same value so that the sparse gradient # has repeated indices. This tests some additional logic. ids[:5] = 5 loss = embedding(ids).sum() loss.backward() assert is_sparse(embedding.weight.grad) # Now try to clip the gradients. _ = sparse_clip_norm([embedding.weight], 1.5) # Final norm should be 1.5 grad = embedding.weight.grad.data.coalesce() self.assertAlmostEqual(grad._values().norm(2.0), 1.5, places=5) # pylint: disable=protected-access
self.iterator.index_with(vocab) def test_trainer_can_run(self):
closure.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Code for type-checking closure expressions. use super::{check_fn, Expectation, FnCtxt, GeneratorTypes}; use astconv::AstConv; use rustc::hir::def_id::DefId; use rustc::infer::{InferOk, InferResult}; use rustc::infer::LateBoundRegionConversionTime; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::traits::error_reporting::ArgKind; use rustc::ty::{self, ToPolyTraitRef, Ty}; use rustc::ty::subst::Substs; use rustc::ty::TypeFoldable; use std::cmp; use std::iter; use syntax::abi::Abi; use syntax::codemap::Span; use rustc::hir; /// What signature do we *expect* the closure to have from context? #[derive(Debug)] struct ExpectedSig<'tcx> { /// Span that gave us this expectation, if we know that. cause_span: Option<Span>, sig: ty::FnSig<'tcx>, } struct
<'tcx> { bound_sig: ty::PolyFnSig<'tcx>, liberated_sig: ty::FnSig<'tcx>, } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn check_expr_closure( &self, expr: &hir::Expr, _capture: hir::CaptureClause, decl: &'gcx hir::FnDecl, body_id: hir::BodyId, gen: Option<hir::GeneratorMovability>, expected: Expectation<'tcx>, ) -> Ty<'tcx> { debug!( "check_expr_closure(expr={:?},expected={:?})", expr, expected ); // It's always helpful for inference if we know the kind of // closure sooner rather than later, so first examine the expected // type, and see if can glean a closure kind from there. let (expected_sig, expected_kind) = match expected.to_option(self) { Some(ty) => self.deduce_expectations_from_expected_type(ty), None => (None, None), }; let body = self.tcx.hir.body(body_id); self.check_closure(expr, expected_kind, decl, body, gen, expected_sig) } fn check_closure( &self, expr: &hir::Expr, opt_kind: Option<ty::ClosureKind>, decl: &'gcx hir::FnDecl, body: &'gcx hir::Body, gen: Option<hir::GeneratorMovability>, expected_sig: Option<ExpectedSig<'tcx>>, ) -> Ty<'tcx> { debug!( "check_closure(opt_kind={:?}, expected_sig={:?})", opt_kind, expected_sig ); let expr_def_id = self.tcx.hir.local_def_id(expr.id); let ClosureSignatures { bound_sig, liberated_sig, } = self.sig_of_closure(expr_def_id, decl, body, expected_sig); debug!("check_closure: ty_of_closure returns {:?}", liberated_sig); let generator_types = check_fn( self, self.param_env, liberated_sig, decl, expr.id, body, gen, ).1; // Create type variables (for now) to represent the transformed // types of upvars. These will be unified during the upvar // inference phase (`upvar.rs`). let base_substs = Substs::identity_for_item(self.tcx, self.tcx.closure_base_def_id(expr_def_id)); let substs = base_substs.extend_to( self.tcx, expr_def_id, |_, _| span_bug!(expr.span, "closure has region param"), |_, _| { self.infcx .next_ty_var(TypeVariableOrigin::ClosureSynthetic(expr.span)) }, ); let substs = ty::ClosureSubsts { substs }; let closure_type = self.tcx.mk_closure(expr_def_id, substs); if let Some(GeneratorTypes { yield_ty, interior }) = generator_types { self.demand_eqtype( expr.span, yield_ty, substs.generator_yield_ty(expr_def_id, self.tcx), ); self.demand_eqtype( expr.span, liberated_sig.output(), substs.generator_return_ty(expr_def_id, self.tcx), ); return self.tcx.mk_generator(expr_def_id, substs, interior); } debug!( "check_closure: expr.id={:?} closure_type={:?}", expr.id, closure_type ); // Tuple up the arguments and insert the resulting function type into // the `closures` table. let sig = bound_sig.map_bound(|sig| { self.tcx.mk_fn_sig( iter::once(self.tcx.intern_tup(sig.inputs(), false)), sig.output(), sig.variadic, sig.unsafety, sig.abi, ) }); debug!( "check_closure: expr_def_id={:?}, sig={:?}, opt_kind={:?}", expr_def_id, sig, opt_kind ); let sig_fn_ptr_ty = self.tcx.mk_fn_ptr(sig); self.demand_eqtype( expr.span, sig_fn_ptr_ty, substs.closure_sig_ty(expr_def_id, self.tcx), ); if let Some(kind) = opt_kind { self.demand_eqtype( expr.span, kind.to_ty(self.tcx), substs.closure_kind_ty(expr_def_id, self.tcx), ); } closure_type } /// Given the expected type, figures out what it can about this closure we /// are about to type check: fn deduce_expectations_from_expected_type( &self, expected_ty: Ty<'tcx>, ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) { debug!( "deduce_expectations_from_expected_type(expected_ty={:?})", expected_ty ); match expected_ty.sty { ty::TyDynamic(ref object_type, ..) => { let sig = object_type .projection_bounds() .filter_map(|pb| { let pb = pb.with_self_ty(self.tcx, self.tcx.types.err); self.deduce_sig_from_projection(None, &pb) }) .next(); let kind = object_type .principal() .and_then(|p| self.tcx.lang_items().fn_trait_kind(p.def_id())); (sig, kind) } ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), ty::TyFnPtr(sig) => { let expected_sig = ExpectedSig { cause_span: None, sig: sig.skip_binder().clone(), }; (Some(expected_sig), Some(ty::ClosureKind::Fn)) } _ => (None, None), } } fn deduce_expectations_from_obligations( &self, expected_vid: ty::TyVid, ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) { let fulfillment_cx = self.fulfillment_cx.borrow(); // Here `expected_ty` is known to be a type inference variable. let expected_sig = fulfillment_cx .pending_obligations() .iter() .map(|obligation| &obligation.obligation) .filter_map(|obligation| { debug!( "deduce_expectations_from_obligations: obligation.predicate={:?}", obligation.predicate ); match obligation.predicate { // Given a Projection predicate, we can potentially infer // the complete signature. ty::Predicate::Projection(ref proj_predicate) => { let trait_ref = proj_predicate.to_poly_trait_ref(self.tcx); self.self_type_matches_expected_vid(trait_ref, expected_vid) .and_then(|_| { self.deduce_sig_from_projection( Some(obligation.cause.span), proj_predicate, ) }) } _ => None, } }) .next(); // Even if we can't infer the full signature, we may be able to // infer the kind. This can occur if there is a trait-reference // like `F : Fn<A>`. Note that due to subtyping we could encounter // many viable options, so pick the most restrictive. let expected_kind = fulfillment_cx .pending_obligations() .iter() .map(|obligation| &obligation.obligation) .filter_map(|obligation| { let opt_trait_ref = match obligation.predicate { ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref(self.tcx)), ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()), ty::Predicate::Equate(..) => None, ty::Predicate::Subtype(..) => None, ty::Predicate::RegionOutlives(..) => None, ty::Predicate::TypeOutlives(..) => None, ty::Predicate::WellFormed(..) => None, ty::Predicate::ObjectSafe(..) => None, ty::Predicate::ConstEvaluatable(..) => None, // NB: This predicate is created by breaking down a // `ClosureType: FnFoo()` predicate, where // `ClosureType` represents some `TyClosure`. It can't // possibly be referring to the current closure, // because we haven't produced the `TyClosure` for // this closure yet; this is exactly why the other // code is looking for a self type of a unresolved // inference variable. ty::Predicate::ClosureKind(..) => None, }; opt_trait_ref .and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid)) .and_then(|tr| self.tcx.lang_items().fn_trait_kind(tr.def_id())) }) .fold(None, |best, cur| { Some(best.map_or(cur, |best| cmp::min(best, cur))) }); (expected_sig, expected_kind) } /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce /// everything we need to know about a closure. /// /// The `cause_span` should be the span that caused us to /// have this expected signature, or `None` if we can't readily /// know that. fn deduce_sig_from_projection( &self, cause_span: Option<Span>, projection: &ty::PolyProjectionPredicate<'tcx>, ) -> Option<ExpectedSig<'tcx>> { let tcx = self.tcx; debug!("deduce_sig_from_projection({:?})", projection); let trait_ref = projection.to_poly_trait_ref(tcx); if tcx.lang_items().fn_trait_kind(trait_ref.def_id()).is_none() { return None; } let arg_param_ty = trait_ref.substs().type_at(1); let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty); debug!( "deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty ); let input_tys = match arg_param_ty.sty { ty::TyTuple(tys, _) => tys.into_iter(), _ => { return None; } }; let ret_param_ty = projection.0.ty; let ret_param_ty = self.resolve_type_vars_if_possible(&ret_param_ty); debug!( "deduce_sig_from_projection: ret_param_ty {:?}", ret_param_ty ); let sig = self.tcx.mk_fn_sig( input_tys.cloned(), ret_param_ty, false, hir::Unsafety::Normal, Abi::Rust, ); debug!("deduce_sig_from_projection: sig {:?}", sig); Some(ExpectedSig { cause_span, sig }) } fn self_type_matches_expected_vid( &self, trait_ref: ty::PolyTraitRef<'tcx>, expected_vid: ty::TyVid, ) -> Option<ty::PolyTraitRef<'tcx>> { let self_ty = self.shallow_resolve(trait_ref.self_ty()); debug!( "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})", trait_ref, self_ty ); match self_ty.sty { ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref), _ => None, } } fn sig_of_closure( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, expected_sig: Option<ExpectedSig<'tcx>>, ) -> ClosureSignatures<'tcx> { if let Some(e) = expected_sig { self.sig_of_closure_with_expectation(expr_def_id, decl, body, e) } else { self.sig_of_closure_no_expectation(expr_def_id, decl, body) } } /// If there is no expected signature, then we will convert the /// types that the user gave into a signature. fn sig_of_closure_no_expectation( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, ) -> ClosureSignatures<'tcx> { debug!("sig_of_closure_no_expectation()"); let bound_sig = self.supplied_sig_of_closure(decl); self.closure_sigs(expr_def_id, body, bound_sig) } /// Invoked to compute the signature of a closure expression. This /// combines any user-provided type annotations (e.g., `|x: u32| /// -> u32 { .. }`) with the expected signature. /// /// The approach is as follows: /// /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations. /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any. /// - If we have no expectation `E`, then the signature of the closure is `S`. /// - Otherwise, the signature of the closure is E. Moreover: /// - Skolemize the late-bound regions in `E`, yielding `E'`. /// - Instantiate all the late-bound regions bound in the closure within `S` /// with fresh (existential) variables, yielding `S'` /// - Require that `E' = S'` /// - We could use some kind of subtyping relationship here, /// I imagine, but equality is easier and works fine for /// our purposes. /// /// The key intuition here is that the user's types must be valid /// from "the inside" of the closure, but the expectation /// ultimately drives the overall signature. /// /// # Examples /// /// ``` /// fn with_closure<F>(_: F) /// where F: Fn(&u32) -> &u32 { .. } /// /// with_closure(|x: &u32| { ... }) /// ``` /// /// Here: /// - E would be `fn(&u32) -> &u32`. /// - S would be `fn(&u32) -> /// - E' is `&'!0 u32 -> &'!0 u32` /// - S' is `&'?0 u32 -> ?T` /// /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`. /// /// # Arguments /// /// - `expr_def_id`: the def-id of the closure expression /// - `decl`: the HIR declaration of the closure /// - `body`: the body of the closure /// - `expected_sig`: the expected signature (if any). Note that /// this is missing a binder: that is, there may be late-bound /// regions with depth 1, which are bound then by the closure. fn sig_of_closure_with_expectation( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, expected_sig: ExpectedSig<'tcx>, ) -> ClosureSignatures<'tcx> { debug!( "sig_of_closure_with_expectation(expected_sig={:?})", expected_sig ); // Watch out for some surprises and just ignore the // expectation if things don't see to match up with what we // expect. if expected_sig.sig.variadic != decl.variadic { return self.sig_of_closure_no_expectation(expr_def_id, decl, body); } else if expected_sig.sig.inputs_and_output.len() != decl.inputs.len() + 1 { return self.sig_of_closure_with_mismatched_number_of_arguments( expr_def_id, decl, body, expected_sig, ); } // Create a `PolyFnSig`. Note the oddity that late bound // regions appearing free in `expected_sig` are now bound up // in this binder we are creating. assert!(!expected_sig.sig.has_regions_escaping_depth(1)); let bound_sig = ty::Binder(self.tcx.mk_fn_sig( expected_sig.sig.inputs().iter().cloned(), expected_sig.sig.output(), decl.variadic, hir::Unsafety::Normal, Abi::RustCall, )); // `deduce_expectations_from_expected_type` introduces // late-bound lifetimes defined elsewhere, which we now // anonymize away, so as not to confuse the user. let bound_sig = self.tcx.anonymize_late_bound_regions(&bound_sig); let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig); // Up till this point, we have ignored the annotations that the user // gave. This function will check that they unify successfully. // Along the way, it also writes out entries for types that the user // wrote into our tables, which are then later used by the privacy // check. match self.check_supplied_sig_against_expectation(decl, &closure_sigs) { Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok), Err(_) => return self.sig_of_closure_no_expectation(expr_def_id, decl, body), } closure_sigs } fn sig_of_closure_with_mismatched_number_of_arguments( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, expected_sig: ExpectedSig<'tcx>, ) -> ClosureSignatures<'tcx> { let expr_map_node = self.tcx.hir.get_if_local(expr_def_id).unwrap(); let expected_args: Vec<_> = expected_sig .sig .inputs() .iter() .map(|ty| ArgKind::from_expected_ty(ty)) .collect(); let (closure_span, found_args) = self.get_fn_like_arguments(expr_map_node); let expected_span = expected_sig.cause_span.unwrap_or(closure_span); self.report_arg_count_mismatch( expected_span, Some(closure_span), expected_args, found_args, true, ).emit(); let error_sig = self.error_sig_of_closure(decl); self.closure_sigs(expr_def_id, body, error_sig) } /// Enforce the user's types against the expectation. See /// `sig_of_closure_with_expectation` for details on the overall /// strategy. fn check_supplied_sig_against_expectation( &self, decl: &hir::FnDecl, expected_sigs: &ClosureSignatures<'tcx>, ) -> InferResult<'tcx, ()> { // Get the signature S that the user gave. // // (See comment on `sig_of_closure_with_expectation` for the // meaning of these letters.) let supplied_sig = self.supplied_sig_of_closure(decl); debug!( "check_supplied_sig_against_expectation: supplied_sig={:?}", supplied_sig ); // FIXME(#45727): As discussed in [this comment][c1], naively // forcing equality here actually results in suboptimal error // messages in some cases. For now, if there would have been // an obvious error, we fallback to declaring the type of the // closure to be the one the user gave, which allows other // error message code to trigger. // // However, I think [there is potential to do even better // here][c2], since in *this* code we have the precise span of // the type parameter in question in hand when we report the // error. // // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706 // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796 self.infcx.commit_if_ok(|_| { let mut all_obligations = vec![]; // The liberated version of this signature should be be a subtype // of the liberated form of the expectation. for ((hir_ty, &supplied_ty), expected_ty) in decl.inputs.iter() .zip(*supplied_sig.inputs().skip_binder()) // binder moved to (*) below .zip(expected_sigs.liberated_sig.inputs()) // `liberated_sig` is E'. { // Instantiate (this part of..) S to S', i.e., with fresh variables. let (supplied_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( hir_ty.span, LateBoundRegionConversionTime::FnCall, &ty::Binder(supplied_ty), ); // recreated from (*) above // Check that E' = S'. let cause = &self.misc(hir_ty.span); let InferOk { value: (), obligations, } = self.at(cause, self.param_env) .eq(*expected_ty, supplied_ty)?; all_obligations.extend(obligations); } let (supplied_output_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( decl.output.span(), LateBoundRegionConversionTime::FnCall, &supplied_sig.output(), ); let cause = &self.misc(decl.output.span()); let InferOk { value: (), obligations, } = self.at(cause, self.param_env) .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?; all_obligations.extend(obligations); Ok(InferOk { value: (), obligations: all_obligations, }) }) } /// If there is no expected signature, then we will convert the /// types that the user gave into a signature. fn supplied_sig_of_closure(&self, decl: &hir::FnDecl) -> ty::PolyFnSig<'tcx> { let astconv: &AstConv = self; // First, convert the types that the user supplied (if any). let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a)); let supplied_return = match decl.output { hir::Return(ref output) => astconv.ast_ty_to_ty(&output), hir::DefaultReturn(_) => astconv.ty_infer(decl.output.span()), }; let result = ty::Binder(self.tcx.mk_fn_sig( supplied_arguments, supplied_return, decl.variadic, hir::Unsafety::Normal, Abi::RustCall, )); debug!("supplied_sig_of_closure: result={:?}", result); result } /// Converts the types that the user supplied, in case that doing /// so should yield an error, but returns back a signature where /// all parameters are of type `TyErr`. fn error_sig_of_closure(&self, decl: &hir::FnDecl) -> ty::PolyFnSig<'tcx> { let astconv: &AstConv = self; let supplied_arguments = decl.inputs.iter().map(|a| { // Convert the types that the user supplied (if any), but ignore them. astconv.ast_ty_to_ty(a); self.tcx.types.err }); match decl.output { hir::Return(ref output) => { astconv.ast_ty_to_ty(&output); } hir::DefaultReturn(_) => {} } let result = ty::Binder(self.tcx.mk_fn_sig( supplied_arguments, self.tcx.types.err, decl.variadic, hir::Unsafety::Normal, Abi::RustCall, )); debug!("supplied_sig_of_closure: result={:?}", result); result } fn closure_sigs( &self, expr_def_id: DefId, body: &hir::Body, bound_sig: ty::PolyFnSig<'tcx>, ) -> ClosureSignatures<'tcx> { let liberated_sig = self.tcx() .liberate_late_bound_regions(expr_def_id, &bound_sig); let liberated_sig = self.inh.normalize_associated_types_in( body.value.span, body.value.id, self.param_env, &liberated_sig, ); ClosureSignatures { bound_sig, liberated_sig, } } }
ClosureSignatures
structured_light.rs
#![allow(unused_parens)] //! # Structured Light API //! //! Structured light is considered one of the most effective techniques to acquire 3D models. //! This technique is based on projecting a light pattern and capturing the illuminated scene //! from one or more points of view. Since the pattern is coded, correspondences between image //! points and points of the projected pattern can be quickly found and 3D information easily //! retrieved. //! //! One of the most commonly exploited coding strategies is based on trmatime-multiplexing. In this //! case, a set of patterns are successively projected onto the measuring surface. //! The codeword for a given pixel is usually formed by the sequence of illuminance values for that //! pixel across the projected patterns. Thus, the codification is called temporal because the bits //! of the codewords are multiplexed in time [pattern](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_pattern) . //! //! In this module a time-multiplexing coding strategy based on Gray encoding is implemented following the //! (stereo) approach described in 3DUNDERWORLD algorithm [UNDERWORLD](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_UNDERWORLD) . //! For more details, see @ref tutorial_structured_light. use crate::{mod_prelude::*, core, sys, types}; pub mod prelude { pub use { super::StructuredLightPattern, super::GrayCodePattern_ParamsTrait, super::GrayCodePattern, super::SinusoidalPattern_ParamsTrait, super::SinusoidalPattern }; } /// Kyriakos Herakleous, Charalambos Poullis. “3DUNDERWORLD-SLS: An Open-Source Structured-Light Scanning System for Rapid Geometry Acquisition”, arXiv preprint arXiv:1406.6595 (2014). pub const DECODE_3D_UNDERWORLD: i32 = 0; pub const FAPS: i32 = 2; pub const FTP: i32 = 0; pub const PSP: i32 = 1; /// Class implementing the Gray-code pattern, based on [UNDERWORLD](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_UNDERWORLD). /// /// The generation of the pattern images is performed with Gray encoding using the traditional white and black colors. /// /// The information about the two image axes x, y is encoded separately into two different pattern sequences. /// A projector P with resolution (P_res_x, P_res_y) will result in Ncols = log 2 (P_res_x) encoded pattern images representing the columns, and /// in Nrows = log 2 (P_res_y) encoded pattern images representing the rows. /// For example a projector with resolution 1024x768 will result in Ncols = 10 and Nrows = 10. /// /// However, the generated pattern sequence consists of both regular color and color-inverted images: inverted pattern images are images /// with the same structure as the original but with inverted colors. /// This provides an effective method for easily determining the intensity value of each pixel when it is lit (highest value) and /// when it is not lit (lowest value). So for a a projector with resolution 1024x768, the number of pattern images will be Ncols * 2 + Nrows * 2 = 40. pub trait GrayCodePattern: crate::structured_light::StructuredLightPattern { fn as_raw_GrayCodePattern(&self) -> *const c_void; fn as_raw_mut_GrayCodePattern(&mut self) -> *mut c_void; /// Get the number of pattern images needed for the graycode pattern. /// /// ## Returns /// The number of pattern images needed for the graycode pattern. fn get_number_of_pattern_images(&self) -> Result<size_t> { unsafe { sys::cv_structured_light_GrayCodePattern_getNumberOfPatternImages_const(self.as_raw_GrayCodePattern()) }.into_result() } /// Sets the value for white threshold, needed for decoding. /// /// White threshold is a number between 0-255 that represents the minimum brightness difference required for valid pixels, between the graycode pattern and its inverse images; used in getProjPixel method. /// /// ## Parameters /// * value: The desired white threshold value. fn set_white_threshold(&mut self, value: size_t) -> Result<()> { unsafe { sys::cv_structured_light_GrayCodePattern_setWhiteThreshold_size_t(self.as_raw_mut_GrayCodePattern(), value) }.into_result() } /// Sets the value for black threshold, needed for decoding (shadowsmasks computation). /// /// Black threshold is a number between 0-255 that represents the minimum brightness difference required for valid pixels, between the fully illuminated (white) and the not illuminated images (black); used in computeShadowMasks method. /// /// ## Parameters /// * value: The desired black threshold value. fn set_black_threshold(&mut self, value: size_t) -> Result<()> { unsafe { sys::cv_structured_light_GrayCodePattern_setBlackThreshold_size_t(self.as_raw_mut_GrayCodePattern(), value) }.into_result() } /// Generates the all-black and all-white images needed for shadowMasks computation. /// /// To identify shadow regions, the regions of two images where the pixels are not lit by projector's light and thus where there is not coded information, /// the 3DUNDERWORLD algorithm computes a shadow mask for the two cameras views, starting from a white and a black images captured by each camera. /// This method generates these two additional images to project. /// /// ## Parameters /// * blackImage: The generated all-black CV_8U image, at projector's resolution. /// * whiteImage: The generated all-white CV_8U image, at projector's resolution. fn get_images_for_shadow_masks(&self, black_image: &mut dyn core::ToInputOutputArray, white_image: &mut dyn core::ToInputOutputArray) -> Result<()> { input_output_array_arg!(black_image); input_output_array_arg!(white_image); unsafe { sys::cv_structured_light_GrayCodePattern_getImagesForShadowMasks_const_const__InputOutputArrayR_const__InputOutputArrayR(self.as_raw_GrayCodePattern(), black_image.as_raw__InputOutputArray(), white_image.as_raw__InputOutputArray()) }.into_result() } /// For a (x,y) pixel of a camera returns the corresponding projector pixel. /// /// The function decodes each pixel in the pattern images acquired by a camera into their corresponding decimal numbers representing the projector's column and row, /// providing a mapping between camera's and projector's pixel. /// /// ## Parameters /// * patternImages: The pattern images acquired by the camera, stored in a grayscale vector < Mat >. /// * x: x coordinate of the image pixel. /// * y: y coordinate of the image pixel. /// * projPix: Projector's pixel corresponding to the camera's pixel: projPix.x and projPix.y are the image coordinates of the projector’s pixel corresponding to the pixel being decoded in a camera. fn get_proj_pixel(&self, pattern_images: &dyn core::ToInputArray, x: i32, y: i32, proj_pix: &mut core::Point) -> Result<bool> { input_array_arg!(pattern_images); unsafe { sys::cv_structured_light_GrayCodePattern_getProjPixel_const_const__InputArrayR_int_int_PointR(self.as_raw_GrayCodePattern(), pattern_images.as_raw__InputArray(), x, y, proj_pix) }.into_result() } } impl dyn GrayCodePattern + '_ { /// Constructor /// ## Parameters /// * parameters: GrayCodePattern parameters GrayCodePattern::Params: the width and the height of the projector. /// /// ## C++ default parameters /// * parameters: GrayCodePattern::Params() pub fn create(parameters: &crate::structured_light::GrayCodePattern_Params) -> Result<core::Ptr::<dyn crate::structured_light::GrayCodePattern>> { unsafe { sys::cv_structured_light_GrayCodePattern_create_const_ParamsR(parameters.as_raw_GrayCodePattern_Params()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::structured_light::GrayCodePattern>::opencv_from_extern(r) } ) } pub fn create_1(width: i32, height: i32) -> Result<core::Ptr::<dyn crate::structured_light::GrayCodePattern>> { unsafe { sys::cv_structured_light_GrayCodePattern_create_int_int(width, height) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::structured_light::GrayCodePattern>::opencv_from_extern(r) } ) } } /// Parameters of StructuredLightPattern constructor. /// ## Parameters /// * width: Projector's width. Default value is 1024. /// * height: Projector's height. Default value is 768. pub trait GrayCodePattern_ParamsTrait { fn as_raw_GrayCodePattern_Params(&self) -> *const c_void; fn as_raw_mut_GrayCodePattern_Params(&mut self) -> *mut c_void; fn width(&self) -> i32 { unsafe { sys::cv_structured_light_GrayCodePattern_Params_getPropWidth_const(self.as_raw_GrayCodePattern_Params()) }.into_result().expect("Infallible function failed: width") } fn set_width(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_GrayCodePattern_Params_setPropWidth_int(self.as_raw_mut_GrayCodePattern_Params(), val) }.into_result().expect("Infallible function failed: set_width") } fn height(&self) -> i32 { unsafe { sys::cv_structured_light_GrayCodePattern_Params_getPropHeight_const(self.as_raw_GrayCodePattern_Params()) }.into_result().expect("Infallible function failed: height") } fn set_height(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_GrayCodePattern_Params_setPropHeight_int(self.as_raw_mut_GrayCodePattern_Params(), val) }.into_result().expect("Infallible function failed: set_height") } } /// Parameters of StructuredLightPattern constructor. /// ## Parameters /// * width: Projector's width. Default value is 1024. /// * height: Projector's height. Default value is 768. pub struct GrayCodePattern_Params { ptr: *mut c_void } opencv_type_boxed! { GrayCodePattern_Params } impl Drop for GrayCodePattern_Params { fn drop(&mut self) { extern "C" { fn cv_GrayCodePattern_Params_delete(instance: *mut c_void); } unsafe { cv_GrayCodePattern_Params_delete(self.as_raw_mut_GrayCodePattern_Params()) }; } } impl GrayCodePattern_Params { #[inline] pub fn as_raw_GrayCodePattern_Params(&self) -> *const c_void { self.as_raw() } #[inline] pub fn as_raw_mut_GrayCodePattern_Params(&mut self) -> *mut c_void { self.as_raw_mut() } } unsafe impl Send for GrayCodePattern_Params {} impl crate::structured_light::GrayCodePattern_ParamsTrait for GrayCodePattern_Params { #[inline] fn as_raw_GrayCodePattern_Params(&self) -> *const c_void { self.as_raw() } #[inline] fn as_raw_mut_GrayCodePattern_Params(&mut self) -> *mut c_void { self.as_raw_mut() } } impl GrayCodePattern_Params { pub fn default() -> Result<crate::structured_light::GrayCodePattern_Params> { unsafe { sys::cv_structured_light_GrayCodePattern_Params_Params() }.into_result().map(|r| unsafe { crate::structured_light::GrayCodePattern_Params::opencv_from_extern(r) } ) } } /// Class implementing Fourier transform profilometry (FTP) , phase-shifting profilometry (PSP) /// and Fourier-assisted phase-shifting profilometry (FAPS) based on [faps](https://docs.opencv.org/3.2.0/d0/de3/citelist.html#CITEREF_faps). /// /// This class generates sinusoidal patterns that can be used with FTP, PSP and FAPS. pub trait SinusoidalPattern: crate::structured_light::StructuredLightPattern { fn as_raw_SinusoidalPattern(&self) -> *const c_void; fn as_raw_mut_SinusoidalPattern(&mut self) -> *mut c_void; /// Compute a wrapped phase map from sinusoidal patterns. /// ## Parameters /// * patternImages: Input data to compute the wrapped phase map. /// * wrappedPhaseMap: Wrapped phase map obtained through one of the three methods. /// * shadowMask: Mask used to discard shadow regions. /// * fundamental: Fundamental matrix used to compute epipolar lines and ease the matching step. /// /// ## C++ default parameters /// * shadow_mask: noArray() /// * fundamental: noArray() fn compute_phase_map(&mut self, pattern_images: &dyn core::ToInputArray, wrapped_phase_map: &mut dyn core::ToOutputArray, shadow_mask: &mut dyn core::ToOutputArray, fundamental: &dyn core::ToInputArray) -> Result<()> { input_array_arg!(pattern_images); output_array_arg!(wrapped_phase_map); output_array_arg!(shadow_mask); input_array_arg!(fundamental); unsafe { sys::cv_structured_light_SinusoidalPattern_computePhaseMap_const__InputArrayR_const__OutputArrayR_const__OutputArrayR_const__InputArrayR(self.as_raw_mut_SinusoidalPattern(), pattern_images.as_raw__InputArray(), wrapped_phase_map.as_raw__OutputArray(), shadow_mask.as_raw__OutputArray(), fundamental.as_raw__InputArray()) }.into_result() } /// Unwrap the wrapped phase map to remove phase ambiguities. /// ## Parameters /// * wrappedPhaseMap: The wrapped phase map computed from the pattern. /// * unwrappedPhaseMap: The unwrapped phase map used to find correspondences between the two devices. /// * camSize: Resolution of the camera. /// * shadowMask: Mask used to discard shadow regions. /// /// ## C++ default parameters /// * shadow_mask: noArray() fn unwrap_phase_map(&mut self, wrapped_phase_map: &dyn core::ToInputArray, unwrapped_phase_map: &mut dyn core::ToOutputArray, cam_size: core::Size, shadow_mask: &dyn core::ToInputArray) -> Result<()> { input_array_arg!(wrapped_phase_map); output_array_arg!(unwrapped_phase_map); input_array_arg!(shadow_mask); unsafe { sys::cv_structured_light_SinusoidalPattern_unwrapPhaseMap_const__InputArrayR_const__OutputArrayR_Size_const__InputArrayR(self.as_raw_mut_SinusoidalPattern(), wrapped_phase_map.as_raw__InputArray(), unwrapped_phase_map.as_raw__OutputArray(), cam_size.opencv_as_extern(), shadow_mask.as_raw__InputArray()) }.into_result() } /// Find correspondences between the two devices thanks to unwrapped phase maps. /// ## Parameters /// * projUnwrappedPhaseMap: Projector's unwrapped phase map. /// * camUnwrappedPhaseMap: Camera's unwrapped phase map. /// * matches: Images used to display correspondences map. fn find_pro_cam_matches(&mut self, proj_unwrapped_phase_map: &dyn core::ToInputArray, cam_unwrapped_phase_map: &dyn core::ToInputArray, matches: &mut dyn core::ToOutputArray) -> Result<()> { input_array_arg!(proj_unwrapped_phase_map); input_array_arg!(cam_unwrapped_phase_map); output_array_arg!(matches); unsafe { sys::cv_structured_light_SinusoidalPattern_findProCamMatches_const__InputArrayR_const__InputArrayR_const__OutputArrayR(self.as_raw_mut_SinusoidalPattern(), proj_unwrapped_phase_map.as_raw__InputArray(), cam_unwrapped_phase_map.as_raw__InputArray(), matches.as_raw__OutputArray()) }.into_result() } /// compute the data modulation term. /// ## Parameters /// * patternImages: captured images with projected patterns. /// * dataModulationTerm: Mat where the data modulation term is saved. /// * shadowMask: Mask used to discard shadow regions. fn compute_data_modulation_term(&mut self, pattern_images: &dyn core::ToInputArray, data_modulation_term: &mut dyn core::ToOutputArray, shadow_mask: &dyn core::ToInputArray) -> Result<()> { input_array_arg!(pattern_images); output_array_arg!(data_modulation_term); input_array_arg!(shadow_mask); unsafe { sys::cv_structured_light_SinusoidalPattern_computeDataModulationTerm_const__InputArrayR_const__OutputArrayR_const__InputArrayR(self.as_raw_mut_SinusoidalPattern(), pattern_images.as_raw__InputArray(), data_modulation_term.as_raw__OutputArray(), shadow_mask.as_raw__InputArray()) }.into_result() } } impl dyn SinusoidalPattern + '_ { /// Constructor. /// ## Parameters /// * parameters: SinusoidalPattern parameters SinusoidalPattern::Params: width, height of the projector and patterns parameters. /// /// ## C++ default parameters /// * parameters: SinusoidalPattern::Params() pub fn create(parameters: &crate::structured_light::SinusoidalPattern_Params) -> Result<core::Ptr::<dyn crate::structured_light::SinusoidalPattern>> { unsafe { sys::cv_structured_light_SinusoidalPattern_create_const_ParamsR(parameters.as_raw_SinusoidalPattern_Params()) }.into_result().map(|r| unsafe { core::Ptr::<dyn crate::structured_light::SinusoidalPattern>::opencv_from_extern(r) } ) } } /// Parameters of SinusoidalPattern constructor /// ## Parameters /// * width: Projector's width. /// * height: Projector's height. /// * nbrOfPeriods: Number of period along the patterns direction. /// * shiftValue: Phase shift between two consecutive patterns. /// * methodId: Allow to choose between FTP, PSP and FAPS. /// * nbrOfPixelsBetweenMarkers: Number of pixels between two consecutive markers on the same row. /// * setMarkers: Allow to set markers on the patterns. /// * markersLocation: vector used to store markers location on the patterns. pub trait SinusoidalPattern_ParamsTrait { fn as_raw_SinusoidalPattern_Params(&self) -> *const c_void; fn as_raw_mut_SinusoidalPattern_Params(&mut self) -> *mut c_void; fn width(&self) -> i32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropWidth_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: width") } fn set_width(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropWidth_int(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_width") } fn height(&self) -> i32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropHeight_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: height") } fn set_height(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropHeight_int(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_height") } fn nbr_of_periods(&self) -> i32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropNbrOfPeriods_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: nbr_of_periods") } fn set_nbr_of_periods(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropNbrOfPeriods_int(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_nbr_of_periods") } fn shift_value(&self) -> f32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropShiftValue_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: shift_value") } fn set_shift_value(&mut self, val: f32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropShiftValue_float(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_shift_value") } fn method_id(&self) -> i32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropMethodId_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: method_id") } fn set_me
self, val: i32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropMethodId_int(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_method_id") } fn nbr_of_pixels_between_markers(&self) -> i32 { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropNbrOfPixelsBetweenMarkers_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: nbr_of_pixels_between_markers") } fn set_nbr_of_pixels_between_markers(&mut self, val: i32) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropNbrOfPixelsBetweenMarkers_int(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_nbr_of_pixels_between_markers") } fn horizontal(&self) -> bool { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropHorizontal_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: horizontal") } fn set_horizontal(&mut self, val: bool) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropHorizontal_bool(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_horizontal") } fn set_markers(&self) -> bool { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropSetMarkers_const(self.as_raw_SinusoidalPattern_Params()) }.into_result().expect("Infallible function failed: set_markers") } fn set_set_markers(&mut self, val: bool) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropSetMarkers_bool(self.as_raw_mut_SinusoidalPattern_Params(), val) }.into_result().expect("Infallible function failed: set_set_markers") } fn markers_location(&mut self) -> core::Vector::<core::Point2f> { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_getPropMarkersLocation(self.as_raw_mut_SinusoidalPattern_Params()) }.into_result().map(|r| unsafe { core::Vector::<core::Point2f>::opencv_from_extern(r) } ).expect("Infallible function failed: markers_location") } fn set_markers_location(&mut self, mut val: core::Vector::<core::Point2f>) -> () { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_setPropMarkersLocation_vector_Point2f_(self.as_raw_mut_SinusoidalPattern_Params(), val.as_raw_mut_VectorOfPoint2f()) }.into_result().expect("Infallible function failed: set_markers_location") } } /// Parameters of SinusoidalPattern constructor /// ## Parameters /// * width: Projector's width. /// * height: Projector's height. /// * nbrOfPeriods: Number of period along the patterns direction. /// * shiftValue: Phase shift between two consecutive patterns. /// * methodId: Allow to choose between FTP, PSP and FAPS. /// * nbrOfPixelsBetweenMarkers: Number of pixels between two consecutive markers on the same row. /// * setMarkers: Allow to set markers on the patterns. /// * markersLocation: vector used to store markers location on the patterns. pub struct SinusoidalPattern_Params { ptr: *mut c_void } opencv_type_boxed! { SinusoidalPattern_Params } impl Drop for SinusoidalPattern_Params { fn drop(&mut self) { extern "C" { fn cv_SinusoidalPattern_Params_delete(instance: *mut c_void); } unsafe { cv_SinusoidalPattern_Params_delete(self.as_raw_mut_SinusoidalPattern_Params()) }; } } impl SinusoidalPattern_Params { #[inline] pub fn as_raw_SinusoidalPattern_Params(&self) -> *const c_void { self.as_raw() } #[inline] pub fn as_raw_mut_SinusoidalPattern_Params(&mut self) -> *mut c_void { self.as_raw_mut() } } unsafe impl Send for SinusoidalPattern_Params {} impl crate::structured_light::SinusoidalPattern_ParamsTrait for SinusoidalPattern_Params { #[inline] fn as_raw_SinusoidalPattern_Params(&self) -> *const c_void { self.as_raw() } #[inline] fn as_raw_mut_SinusoidalPattern_Params(&mut self) -> *mut c_void { self.as_raw_mut() } } impl SinusoidalPattern_Params { pub fn default() -> Result<crate::structured_light::SinusoidalPattern_Params> { unsafe { sys::cv_structured_light_SinusoidalPattern_Params_Params() }.into_result().map(|r| unsafe { crate::structured_light::SinusoidalPattern_Params::opencv_from_extern(r) } ) } } /// Abstract base class for generating and decoding structured light patterns. pub trait StructuredLightPattern: core::AlgorithmTrait { fn as_raw_StructuredLightPattern(&self) -> *const c_void; fn as_raw_mut_StructuredLightPattern(&mut self) -> *mut c_void; /// Generates the structured light pattern to project. /// /// ## Parameters /// * patternImages: The generated pattern: a vector<Mat>, in which each image is a CV_8U Mat at projector's resolution. fn generate(&mut self, pattern_images: &mut dyn core::ToOutputArray) -> Result<bool> { output_array_arg!(pattern_images); unsafe { sys::cv_structured_light_StructuredLightPattern_generate_const__OutputArrayR(self.as_raw_mut_StructuredLightPattern(), pattern_images.as_raw__OutputArray()) }.into_result() } /// Decodes the structured light pattern, generating a disparity map /// /// ## Parameters /// * patternImages: The acquired pattern images to decode (vector<vector<Mat>>), loaded as grayscale and previously rectified. /// * disparityMap: The decoding result: a CV_64F Mat at image resolution, storing the computed disparity map. /// * blackImages: The all-black images needed for shadowMasks computation. /// * whiteImages: The all-white images needed for shadowMasks computation. /// * flags: Flags setting decoding algorithms. Default: DECODE_3D_UNDERWORLD. /// /// Note: All the images must be at the same resolution. /// /// ## C++ default parameters /// * black_images: noArray() /// * white_images: noArray() /// * flags: DECODE_3D_UNDERWORLD fn decode(&self, pattern_images: &dyn core::ToInputArray, disparity_map: &mut dyn core::ToOutputArray, black_images: &dyn core::ToInputArray, white_images: &dyn core::ToInputArray, flags: i32) -> Result<bool> { input_array_arg!(pattern_images); output_array_arg!(disparity_map); input_array_arg!(black_images); input_array_arg!(white_images); unsafe { sys::cv_structured_light_StructuredLightPattern_decode_const_const__InputArrayR_const__OutputArrayR_const__InputArrayR_const__InputArrayR_int(self.as_raw_StructuredLightPattern(), pattern_images.as_raw__InputArray(), disparity_map.as_raw__OutputArray(), black_images.as_raw__InputArray(), white_images.as_raw__InputArray(), flags) }.into_result() } }
thod_id(&mut
server.go
// Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package frontend import ( "embed" "encoding/json" "net/http" "path" "github.com/opentracing/opentracing-go" "go.uber.org/zap" "github.com/evanlixin/jaeger-examples/hotrod/pkg/httperr" "github.com/evanlixin/jaeger-examples/hotrod/pkg/log" "github.com/evanlixin/jaeger-examples/hotrod/pkg/tracing" "github.com/jaegertracing/jaeger/pkg/httpfs" ) //go:embed web_assets/* var assetFS embed.FS // Server implements jaeger-demo-frontend service type Server struct { hostPort string tracer opentracing.Tracer logger log.Factory bestETA *bestETA assetFS http.FileSystem basepath string jaegerUI string } // ConfigOptions used to make sure service clients // can find correct server ports type ConfigOptions struct { FrontendHostPort string DriverHostPort string CustomerHostPort string RouteHostPort string Basepath string JaegerUI string } // NewServer creates a new frontend.Server func NewServer(options ConfigOptions, tracer opentracing.Tracer, logger log.Factory) *Server {
return &Server{ hostPort: options.FrontendHostPort, tracer: tracer, logger: logger, bestETA: newBestETA(tracer, logger, options), assetFS: httpfs.PrefixedFS("web_assets", http.FS(assetFS)), basepath: options.Basepath, jaegerUI: options.JaegerUI, } } // Run starts the frontend server func (s *Server) Run() error { mux := s.createServeMux() s.logger.Bg().Info("Starting", zap.String("address", "http://"+path.Join(s.hostPort, s.basepath))) return http.ListenAndServe(s.hostPort, mux) } func (s *Server) createServeMux() http.Handler { mux := tracing.NewServeMux(s.tracer) p := path.Join("/", s.basepath) mux.Handle(p, http.StripPrefix(p, http.FileServer(s.assetFS))) mux.Handle(path.Join(p, "/dispatch"), http.HandlerFunc(s.dispatch)) mux.Handle(path.Join(p, "/config"), http.HandlerFunc(s.config)) return mux } func (s *Server) config(w http.ResponseWriter, r *http.Request) { config := map[string]string{ "jaeger": s.jaegerUI, } s.writeResponse(config, w, r) } func (s *Server) dispatch(w http.ResponseWriter, r *http.Request) { ctx := r.Context() s.logger.For(ctx).Info("HTTP request received", zap.String("method", r.Method), zap.Stringer("url", r.URL)) if err := r.ParseForm(); httperr.HandleError(w, err, http.StatusBadRequest) { s.logger.For(ctx).Error("bad request", zap.Error(err)) return } customerID := r.Form.Get("customer") if customerID == "" { http.Error(w, "Missing required 'customer' parameter", http.StatusBadRequest) return } // TODO distinguish between user errors (such as invalid customer ID) and server failures response, err := s.bestETA.Get(ctx, customerID) if httperr.HandleError(w, err, http.StatusInternalServerError) { s.logger.For(ctx).Error("request failed", zap.Error(err)) return } s.writeResponse(response, w, r) } func (s *Server) writeResponse(response interface{}, w http.ResponseWriter, r *http.Request) { data, err := json.Marshal(response) if httperr.HandleError(w, err, http.StatusInternalServerError) { s.logger.For(r.Context()).Error("cannot marshal response", zap.Error(err)) return } w.Header().Set("Content-Type", "application/json") w.Write(data) }
database_test.go
package wallet import ( "os" "path/filepath" "testing" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/coreos/bbolt" ) // TestDBOpen tests the wallet.openDB method. func
(t *testing.T) { if testing.Short() { t.SkipNow() } w := new(Wallet) err := w.openDB("") if err == nil { t.Fatal("expected error, got nil") } testdir := build.TempDir(modules.WalletDir, "TestDBOpen") os.MkdirAll(testdir, 0700) err = w.openDB(filepath.Join(testdir, dbFile)) if err != nil { t.Fatal(err) } w.db.View(func(tx *bolt.Tx) error { for _, b := range dbBuckets { if tx.Bucket(b) == nil { t.Error("bucket", string(b), "does not exist") } } return nil }) w.db.Close() }
TestDBOpen
partition.rs
//! Holds one or more Chunks. use arrow_deps::arrow::record_batch::RecordBatch; use generated_types::wal as wb; use std::{collections::BTreeMap, sync::Arc}; use crate::chunk::{Chunk, Error as ChunkError}; use snafu::{ResultExt, Snafu}; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display( "Error writing to open chunk of partition with key '{}': {}", partition_key, source ))] WritingChunkData { partition_key: String, source: ChunkError, }, #[snafu(display( "Can not drop open chunk '{}' of partition with key '{}'", chunk_id, partition_key, ))] DropOpenChunk { partition_key: String, chunk_id: u64, }, #[snafu(display( "Can not drop unknown chunk '{}' of partition with key '{}'. Valid chunk ids: {:?}", chunk_id, partition_key, valid_chunk_ids, ))] DropUnknownChunk { partition_key: String, chunk_id: u64, valid_chunk_ids: Vec<u64>, }, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Partition { /// The partition key that is shared by all Chunks in this Partition key: String, /// The currently active, open Chunk; All new writes go to this chunk open_chunk: Chunk, /// Closed chunks which can no longer be written /// key: chunk_id, value: Chunk /// /// List of chunks, ordered by chunk id (and thus creation time). /// The ordereing is achieved with a BTreeMap. The ordering is /// used when `iter()` is used to iterate over chunks in their /// creation order closed_chunks: BTreeMap<u64, Arc<Chunk>>, /// Responsible for assigning ids to chunks. Eventually, this might /// need to start at a number other than 0. id_generator: u64, } impl Partition { pub fn new(key: impl Into<String>) -> Self { // TODO: for existing partitions, does this need to pick up at preexisting ID? let mut id_generator = 0; let key: String = key.into(); let open_chunk = Chunk::new(id_generator); id_generator += 1; Self { key, open_chunk, closed_chunks: BTreeMap::new(), id_generator, } } /// write data to the open chunk pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> { assert_eq!( entry .partition_key() .expect("partition key should be present"), self.key ); self.open_chunk .write_entry(entry) .with_context(|| WritingChunkData { partition_key: entry.partition_key().unwrap(), }) } /// Convert the table specified in this chunk into some number of /// record batches, appended to dst pub fn table_to_arrow( &self, dst: &mut Vec<RecordBatch>, table_name: &str, columns: &[&str], ) -> crate::chunk::Result<()> { for chunk in self.iter() { chunk.table_to_arrow(dst, table_name, columns)? } Ok(()) } /// Return information about the chunks held in this partition #[allow(dead_code)] pub fn chunk_info(&self) -> PartitionChunkInfo { PartitionChunkInfo { num_closed_chunks: self.closed_chunks.len(), } } /// Close the currently open chunk and create a new open /// chunk. The newly closed chunk is adding to the list of closed /// chunks if it had data, and is returned. /// /// Any new writes to this partition will go to a new chunk. /// /// Queries will continue to see data in the specified chunk until /// it is dropped. pub fn rollover_chunk(&mut self) -> Arc<Chunk> { let chunk_id = self.id_generator; self.id_generator += 1; let mut chunk = Chunk::new(chunk_id); std::mem::swap(&mut chunk, &mut self.open_chunk); chunk.mark_closed(); let chunk = Arc::new(chunk); if !chunk.is_empty() { let existing_value = self.closed_chunks.insert(chunk.id(), chunk.clone()); assert!(existing_value.is_none()); } chunk } /// Drop the specified chunk for the partition, returning a reference to the /// chunk #[allow(dead_code)] pub fn drop_chunk(&mut self, chunk_id: u64) -> Result<Arc<Chunk>> { self.closed_chunks.remove(&chunk_id).ok_or_else(|| { let partition_key = self.key.clone(); if self.open_chunk.id() == chunk_id { Error::DropOpenChunk { partition_key, chunk_id, } } else { let valid_chunk_ids: Vec<u64> = self.iter().map(|c| c.id()).collect(); Error::DropUnknownChunk { partition_key, chunk_id, valid_chunk_ids, } } }) } /// Return the partition key shared by all data stored in this /// partition pub fn
(&self) -> &str { &self.key } /// in Return an iterator over each Chunk in this partition pub fn iter(&self) -> ChunkIter<'_> { ChunkIter::new(self) } } /// information on chunks for this partition #[derive(Debug, Default, PartialEq)] pub struct PartitionChunkInfo { pub num_closed_chunks: usize, } /// Iterates over chunks in a partition. Always iterates over chunks /// in their creation (id) order: Closed chunks first, followed by the /// open chunk, if any. This allows data to be read out in the same order it /// was written in pub struct ChunkIter<'a> { partition: &'a Partition, visited_open: bool, closed_iter: std::collections::btree_map::Iter<'a, u64, Arc<Chunk>>, } impl<'a> ChunkIter<'a> { fn new(partition: &'a Partition) -> Self { let closed_iter = partition.closed_chunks.iter(); Self { partition, visited_open: false, closed_iter, } } } impl<'a> Iterator for ChunkIter<'a> { type Item = &'a Chunk; fn next(&mut self) -> Option<Self::Item> { let partition = self.partition; self.closed_iter .next() .map(|(_k, v)| v.as_ref()) .or_else(|| { if !self.visited_open { self.visited_open = true; Some(&partition.open_chunk) } else { None } }) } } #[cfg(test)] mod tests { use super::*; use chrono::Utc; use data_types::data::split_lines_into_write_entry_partitions; use arrow_deps::{ arrow::record_batch::RecordBatch, assert_table_eq, test_util::sort_record_batch, }; use influxdb_line_protocol::parse_lines; #[tokio::test] async fn test_rollover_chunk() { let mut partition = Partition::new("a_key"); load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=71.4 200", ], ) .await; let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 71.4 | 200 |", "+--------+-------+------+------+", ]; assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 0 } ); assert_table_eq!(expected, &dump_table(&partition, "h2o")); println!("rolling over chunk"); // now rollover chunk, and expected results should be the same let chunk = partition.rollover_chunk(); assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 1 } ); assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(row_count("h2o", &chunk), 2); // calling rollover chunk again is ok; It is returned but not added to the // closed chunk list let chunk = partition.rollover_chunk(); assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 1 } ); assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(row_count("h2o", &chunk), 0); } #[tokio::test] async fn test_rollover_chunk_new_data_visible() { let mut partition = Partition::new("a_key"); load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=71.4 200", ], ) .await; // now rollover chunk let chunk = partition.rollover_chunk(); assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 1 } ); assert_eq!(row_count("h2o", &chunk), 2); load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=69.0 50", "h2o,state=MA,city=Boston temp=72.3 300", "h2o,state=MA,city=Boston temp=73.2 400", ], ) .await; // note the rows come out in the order they were written in let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 71.4 | 200 |", "| Boston | MA | 69 | 50 |", "| Boston | MA | 72.3 | 300 |", "| Boston | MA | 73.2 | 400 |", "+--------+-------+------+------+", ]; assert_table_eq!(expected, &dump_table(&partition, "h2o")); // now rollover chunk again let chunk = partition.rollover_chunk(); assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 2 } ); assert_eq!(row_count("h2o", &chunk), 3); assert_table_eq!(expected, &dump_table(&partition, "h2o")); } #[tokio::test] async fn test_rollover_chunk_multiple_tables() { let mut partition = Partition::new("a_key"); load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=70.4 100", "o2,state=MA,city=Boston temp=71.4 100", "o2,state=MA,city=Boston temp=72.4 200", ], ) .await; let expected_h2o = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "+--------+-------+------+------+", ]; let expected_o2 = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 71.4 | 100 |", "| Boston | MA | 72.4 | 200 |", "+--------+-------+------+------+", ]; assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 0 } ); assert_table_eq!(expected_h2o, &dump_table(&partition, "h2o")); assert_table_eq!(expected_o2, &dump_table(&partition, "o2")); // now rollover chunk again let chunk = partition.rollover_chunk(); assert_eq!( partition.chunk_info(), PartitionChunkInfo { num_closed_chunks: 1 } ); assert_eq!(row_count("h2o", &chunk), 1); assert_eq!(row_count("o2", &chunk), 2); assert_table_eq!(expected_h2o, &dump_table(&partition, "h2o")); assert_table_eq!(expected_o2, &dump_table(&partition, "o2")); } #[tokio::test] async fn test_rollover_chunk_ids() { let mut partition = Partition::new("a_key"); // When the chunk is rolled over, it gets id 0 let chunk = partition.rollover_chunk(); assert_eq!(chunk.id(), 0); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=70.4 100"]).await; let chunk = partition.rollover_chunk(); assert_eq!(chunk.id(), 1); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=71.4 200"]).await; let chunk = partition.rollover_chunk(); assert_eq!(chunk.id(), 2); assert_eq!(all_ids_with_data(&partition), vec![1, 2]); } #[tokio::test] async fn test_rollover_chunk_drop_data_is_gone() { let mut partition = Partition::new("a_key"); // Given data loaded into two chunks (one closed) load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=70.4 100", "h2o,state=MA,city=Boston temp=72.4 200", ], ) .await; // When the chunk is rolled over partition.rollover_chunk(); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=71.4 100"]).await; // Initially, data from both chunks appear in queries let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 72.4 | 200 |", "| Boston | MA | 71.4 | 100 |", "+--------+-------+------+------+", ]; assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(all_ids_with_data(&partition), vec![0, 1]); // When the first chunk is dropped partition.drop_chunk(0).unwrap(); // then it no longer is in the // partitions is left let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 71.4 | 100 |", "+--------+-------+------+------+", ]; assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(all_ids_with_data(&partition), vec![1]); } #[tokio::test] async fn test_write_after_drop_chunk() { let mut partition = Partition::new("a_key"); // Given data loaded into three chunks (two closed) load_data(&mut partition, &["h2o,state=MA,city=Boston temp=70.4 100"]).await; partition.rollover_chunk(); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=72.4 200"]).await; partition.rollover_chunk(); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=71.4 300"]).await; partition.rollover_chunk(); let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 72.4 | 200 |", "| Boston | MA | 71.4 | 300 |", "+--------+-------+------+------+", ]; assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(all_ids_with_data(&partition), vec![0, 1, 2]); // when one chunk is dropped and new data is added partition.drop_chunk(1).unwrap(); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=73.0 400"]).await; // then the results reflect that let expected = &[ "+--------+-------+------+------+", "| city | state | temp | time |", "+--------+-------+------+------+", "| Boston | MA | 70.4 | 100 |", "| Boston | MA | 71.4 | 300 |", "| Boston | MA | 73 | 400 |", "+--------+-------+------+------+", ]; assert_table_eq!(expected, &dump_table(&partition, "h2o")); assert_eq!(all_ids_with_data(&partition), vec![0, 2, 3]); } #[tokio::test] async fn test_drop_chunk_invalid() { let mut partition = Partition::new("a_key"); let e = partition.drop_chunk(0).unwrap_err(); assert_eq!( "Can not drop open chunk '0' of partition with key 'a_key'", format!("{}", e) ); load_data(&mut partition, &["h2o,state=MA,city=Boston temp=70.4 100"]).await; partition.rollover_chunk(); partition.drop_chunk(0).unwrap(); // drop is ok // can't drop again let e = partition.drop_chunk(0).unwrap_err(); assert_eq!( "Can not drop unknown chunk '0' of partition with key 'a_key'. Valid chunk ids: [1]", format!("{}", e) ); } #[tokio::test] async fn test_chunk_timestamps() { let start = Utc::now(); let mut partition = Partition::new("a_key"); let after_partition_creation = Utc::now(); // Given data loaded into two chunks load_data( &mut partition, &[ "h2o,state=MA,city=Boston temp=70.4 100", "o2,state=MA,city=Boston temp=71.4 100", "o2,state=MA,city=Boston temp=72.4 200", ], ) .await; let after_data_load = Utc::now(); // When the chunk is rolled over let chunk = partition.rollover_chunk(); let after_rollover = Utc::now(); println!("start: {:?}, after_partition_creation: {:?}, after_data_load: {:?}, after_rollover: {:?}", start, after_partition_creation, after_data_load, after_rollover); println!("Chunk: {:#?}", chunk); // then the chunk creation and rollover times are as expected assert!(start < chunk.time_of_first_write.unwrap()); assert!(after_partition_creation < chunk.time_of_first_write.unwrap()); assert!(chunk.time_of_first_write.unwrap() < after_data_load); assert!(chunk.time_of_first_write.unwrap() == chunk.time_of_last_write.unwrap()); assert!(after_data_load < chunk.time_closed.unwrap()); assert!(chunk.time_closed.unwrap() < after_rollover); } #[tokio::test] async fn test_chunk_timestamps_last_write() { let mut partition = Partition::new("a_key"); // Given data loaded into two chunks load_data(&mut partition, &["o2,state=MA,city=Boston temp=71.4 100"]).await; let after_data_load_1 = Utc::now(); load_data(&mut partition, &["o2,state=MA,city=Boston temp=72.4 200"]).await; let after_data_load_2 = Utc::now(); let chunk = partition.rollover_chunk(); assert!(chunk.time_of_first_write.unwrap() < after_data_load_1); assert!(chunk.time_of_first_write.unwrap() < chunk.time_of_last_write.unwrap()); assert!(chunk.time_of_last_write.unwrap() < after_data_load_2); } #[tokio::test] async fn test_chunk_timestamps_empty() { let mut partition = Partition::new("a_key"); let after_partition_creation = Utc::now(); let chunk = partition.rollover_chunk(); let after_rollover = Utc::now(); assert!(chunk.time_of_first_write.is_none()); assert!(chunk.time_of_last_write.is_none()); assert!(after_partition_creation < chunk.time_closed.unwrap()); assert!(chunk.time_closed.unwrap() < after_rollover); } #[tokio::test] async fn test_chunk_timestamps_empty_write() { let mut partition = Partition::new("a_key"); let after_partition_creation = Utc::now(); // Call load data but don't write any actual data (aka it was an empty write) load_data(&mut partition, &[""]).await; let chunk = partition.rollover_chunk(); let after_rollover = Utc::now(); assert!(chunk.time_of_first_write.is_none()); assert!(chunk.time_of_last_write.is_none()); assert!(after_partition_creation < chunk.time_closed.unwrap()); assert!(chunk.time_closed.unwrap() < after_rollover); } fn row_count(table_name: &str, chunk: &Chunk) -> u32 { let stats = chunk.table_stats().unwrap(); for s in &stats { if s.name == table_name { return s.columns[0].count(); } } 0 } /// Load the specified rows of line protocol data into this partition async fn load_data(partition: &mut Partition, lp_data: &[&str]) { let lp_string = lp_data.to_vec().join("\n"); let lines: Vec<_> = parse_lines(&lp_string).map(|l| l.unwrap()).collect(); let data = split_lines_into_write_entry_partitions(|_| partition.key().into(), &lines); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&data); let entries = batch.entries().unwrap(); for entry in entries { let key = entry .partition_key() .expect("partition key should have been inserted"); assert_eq!(key, partition.key()); partition.write_entry(&entry).unwrap() } } fn dump_table(partition: &Partition, table_name: &str) -> Vec<RecordBatch> { let mut dst = vec![]; let requested_columns = []; // empty ==> request all columns partition .table_to_arrow(&mut dst, table_name, &requested_columns) .unwrap(); // Now, sort dest dst.into_iter().map(sort_record_batch).collect() } /// returns a list of all chunk ids in partition that are not empty fn all_ids_with_data(partition: &Partition) -> Vec<u64> { partition .iter() .filter_map(|c| if c.is_empty() { None } else { Some(c.id()) }) .collect() } }
key
add_locale_test.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package add_locale import ( "regexp" "testing" "time" "github.com/stretchr/testify/assert" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" ) func
(t *testing.T) { testConfig, err := common.NewConfigFrom(map[string]interface{}{ "format": "abbreviation", }) if err != nil { t.Fatal(err) } input := common.MapStr{} zone, _ := time.Now().In(time.Local).Zone() actual := getActualValue(t, testConfig, input) expected := common.MapStr{ "event": map[string]string{ "timezone": zone, }, } assert.Equal(t, expected.String(), actual.String()) } func TestTimezoneFormat(t *testing.T) { // Test positive format posLoc, err := time.LoadLocation("Africa/Asmara") if err != nil { t.Fatal(err) } posZone, posOffset := time.Now().In(posLoc).Zone() posAddLocal := addLocale{TimezoneFormat: Offset} posVal := posAddLocal.Format(posZone, posOffset) assert.Regexp(t, regexp.MustCompile(`\+[\d]{2}\:[\d]{2}`), posVal) // Test negative format negLoc, err := time.LoadLocation("America/Curacao") if err != nil { t.Fatal(err) } negZone, negOffset := time.Now().In(negLoc).Zone() negAddLocal := addLocale{TimezoneFormat: Offset} negVal := negAddLocal.Format(negZone, negOffset) assert.Regexp(t, regexp.MustCompile(`\-[\d]{2}\:[\d]{2}`), negVal) } func getActualValue(t *testing.T, config *common.Config, input common.MapStr) common.MapStr { log := logp.NewLogger("add_locale_test") p, err := New(config) if err != nil { log.Error("Error initializing add_locale") t.Fatal(err) } actual, err := p.Run(&beat.Event{Fields: input}) return actual.Fields } func BenchmarkConstruct(b *testing.B) { var testConfig = common.NewConfig() input := common.MapStr{} p, err := New(testConfig) if err != nil { b.Fatal(err) } for i := 0; i < b.N; i++ { _, err = p.Run(&beat.Event{Fields: input}) } }
TestExportTimezone
nodesnetwork.go
package net import ( "encoding/hex" "encoding/json" "io/ioutil" "net/http" "strings" "sync" "time" "github.com/taincoin/taincoin/lib" "github.com/taincoin/taincoin/lib/utils" ) // INterface for extra storage for a nodes. // TODO // This is not used yet type NodeNetworkStorage interface { GetNodes() ([]NodeAddr, error) AddNodeToKnown(addr NodeAddr) RemoveNodeFromKnown(addr NodeAddr) GetCountOfKnownNodes() (int, error) } // This manages list of known nodes by a node type NodeNetwork struct { Logger *utils.LoggerMan Nodes []NodeAddr Storage NodeNetworkStorage lock *sync.Mutex } type NodesListJSON struct { Nodes []NodeAddr Genesis string } // Init nodes network object func (n *NodeNetwork) Init() { n.lock = &sync.Mutex{} } // Set extra storage for a nodes func (n *NodeNetwork) SetExtraManager(storage NodeNetworkStorage) { n.Storage = storage } // Loads list of nodes from storage func (n *NodeNetwork) LoadNodes() error { if n.Storage == nil { return nil } n.lock.Lock() defer n.lock.Unlock() nodes, err := n.Storage.GetNodes() if err != nil { return err } for _, node := range nodes { n.Nodes = append(n.Nodes, node) } return nil } // Set nodes list. This can be used to do initial nodes loading from config or so func (n *NodeNetwork) SetNodes(nodes []NodeAddr, replace bool) { n.lock.Lock() defer n.lock.Unlock() if replace { n.Nodes = nodes } else { n.Nodes = append(n.Nodes, nodes...) } if n.Storage != nil { // remember what is not yet remembered for _, node := range nodes { n.Storage.AddNodeToKnown(node) } } } // If n any known nodes then it will be loaded from the url on a host // Accepts genesis block hash. It will be compared to the hash in JSON doc func (n *NodeNetwork) LoadInitialNodes(geenesisHash []byte) error { timeout := time.Duration(2 * time.Second) client := http.Client{Timeout: timeout} response, err := client.Get(lib.InitialNodesList) if err != nil { return err } jsondoc, err := ioutil.ReadAll(response.Body) if err != nil { return err } nodes := NodesListJSON{} err = json.Unmarshal(jsondoc, &nodes) if err != nil { return err } if geenesisHash != nil && nodes.Genesis != "" { gh := hex.EncodeToString(geenesisHash) if gh != nodes.Genesis { // don't add return nil } } n.lock.Lock() defer n.lock.Unlock() n.Nodes = append(n.Nodes, nodes.Nodes...) if n.Storage != nil { // remember loaded nodes in local storage for _, node := range nodes.Nodes { node.Host = strings.Trim(node.Host, " ") n.Storage.AddNodeToKnown(node) } } return nil } func (n *NodeNetwork) GetNodes() []NodeAddr { return n.Nodes } // Returns number of known nodes func (n *NodeNetwork) GetCountOfKnownNodes() int { l := len(n.Nodes) return l } // Check if node address is known func (n *NodeNetwork) CheckIsKnown(addr NodeAddr) bool { exists := false
for _, node := range n.Nodes { if node.CompareToAddress(addr) { exists = true break } } return exists } /* * Checks if a node exists in list of known nodes and adds it if no * Returns true if was added */ func (n *NodeNetwork) AddNodeToKnown(addr NodeAddr) bool { n.lock.Lock() defer n.lock.Unlock() exists := false for _, node := range n.Nodes { if node.CompareToAddress(addr) { exists = true break } } if !exists { n.Nodes = append(n.Nodes, addr) } if n.Storage != nil { n.Storage.AddNodeToKnown(addr) } return !exists } // Removes a node from known func (n *NodeNetwork) RemoveNodeFromKnown(addr NodeAddr) { n.lock.Lock() defer n.lock.Unlock() updatedlist := []NodeAddr{} for _, node := range n.Nodes { if !node.CompareToAddress(addr) { updatedlist = append(updatedlist, node) } } n.Nodes = updatedlist if n.Storage != nil { n.Storage.RemoveNodeFromKnown(addr) } }
feature_extraction.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ Created on Fri May 1 16:28:06 2015 @author: ddboline """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import csv import gzip import numpy as np import pandas as pd from dateutil.parser import parse WEATHER_VARS_WITH_M_T = (u'Tmax', u'Tmin', u'Tavg', u'Depart', u'DewPoint', u'WetBulb', u'Heat', u'Cool', u'Snowfall', u'PrecipTotal', u'StnPressure', u'SeaLevel', u'ResultSpeed', u'ResultDir', u'AvgSpeed', u'Water1') WEATHER_PHENOMENA = ('BCFG', 'BLDU', 'BLSN', 'BR', 'DU', 'DZ', 'FG', 'FG+', 'FU', 'FZDZ', 'FZFG', 'FZRA', 'GR', 'GS', 'HZ', 'MIFG', 'PL', 'PRFG', 'RA', 'SG', 'SN', 'SQ', 'TS', 'TSRA', 'TSSN', 'UP', 'VCFG', 'VCTS') def haversine_distance(lat1, lon1, lat2, lon2): r_earth = 6371. dlat = np.abs(lat1-lat2)*np.pi/180. dlon = np.abs(lon1-lon2)*np.pi/180. lat1 *= np.pi/180. lat2 *= np.pi/180. dist = 2. * r_earth * np.arcsin( np.sqrt( np.sin(dlat/2.)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.)**2)) return dist def lat_lon_box(lat, lon, dist): r_earth = 6371. d_2r = dist/(2.*r_earth) dlat = 2. * (d_2r) dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat))) dlat *= 180./np.pi dlon *= 180./np.pi return abs(dlat), abs(dlon) def feature_extraction(): spray_df = pd.read_csv('spray.csv.gz', compression='gzip') spray_lat_lon_list = [] for idx, row in spray_df.iterrows(): spray_lat_lon_list.append((row['Latitude'], row['Longitude'])) weather_features = [] cumu_labels = ('Tmax', 'Tmin', 'PrecipTotal') cumu_features = {} cumu_total = 0 current_year = -1 with gzip.open('weather.csv.gz', 'r') as wfile: wcsv = csv.reader(wfile) weather_labels = next(wcsv) for row in wcsv: rowdict = dict(zip(weather_labels, row)) rowdict['Date'] = parse(rowdict['Date']) current_date = rowdict['Date'] if current_date.year != current_year: current_year = current_date.year cumu_features = {k: 0 for k in cumu_labels} cumu_total = 0 for k in WEATHER_VARS_WITH_M_T: if k in rowdict: rowdict[k] = rowdict[k].replace('M', 'nan') rowdict[k] = rowdict[k].replace('T', '0.0') for k in rowdict: if rowdict[k] == '-': rowdict[k] = 'nan' if type(rowdict[k]) == str: rowdict[k] = rowdict[k].strip() for ph in WEATHER_PHENOMENA: rowdict['wp%s' % ph] = '0' for ph in rowdict['CodeSum'].split(): if ph in WEATHER_PHENOMENA:
for lab in cumu_labels: _tmp = float(rowdict[lab]) if not np.isnan(_tmp): cumu_features[lab] += _tmp cumu_total += 1 for lab in ('Tmax', 'Tmin', 'PrecipTotal'): rowdict['%s_cumu' % lab] = cumu_features[lab] / cumu_total weather_features.append(rowdict) # print('\n'.join(['%s: %s' % (k, rowdict[k]) for k in rowdict])) # exit(0) for ph in WEATHER_PHENOMENA: weather_labels.append('wp%s' % ph) for lab in cumu_labels: weather_labels.append('%s_cumu' % lab) for prefix in 'train', 'test': with gzip.open('%s.csv.gz' % prefix, 'rb') as csvfile: outfile = gzip.open('%s_full.csv.gz' % prefix, 'wb') csv_reader = csv.reader(csvfile) labels = next(csv_reader) out_labels = labels +\ ['n_spray_%d' % x for x in range(1,11)] for lab in weather_labels: if lab == 'Date': continue out_labels.append(lab) csv_writer = csv.writer(outfile) csv_writer.writerow(out_labels) for idx, row in enumerate(csv_reader): if idx % 1000 == 0: print('processed %d' % idx) # if idx > 100: # exit(0) row_dict = dict(zip(labels, row)) current_date = parse(row_dict['Date']) cur_lat = float(row_dict['Latitude']) cur_lon = float(row_dict['Longitude']) for idx in range(1, 11): row_dict['n_spray_%d' % idx] = 0 dlat, dlon = lat_lon_box(cur_lat, cur_lon, 1.5) for slat, slon in spray_lat_lon_list: # print(dlat, dlon, abs(slat-cur_lat), abs(slon-cur_lon)) if abs(slat-cur_lat) > dlat or abs(slon-cur_lon) > dlon: continue sdist = haversine_distance(cur_lat, cur_lon, slat, slon) for idx in range(1,11): if sdist < idx/10.0: row_dict['n_spray_%d' % idx] += 1 for lab in ['Tmax_cumu', 'Tmin_cumu', 'PrecipTotal_cumu']: row_dict[lab] = 0 most_recent = 1000000 most_recent_w = weather_features[0] for wfeat in weather_features: wdate = wfeat['Date'] if current_date.year != wdate.year: continue wdur = abs((current_date - wdate).days) if wdur < most_recent: most_recent = wdur most_recent_w = wfeat for lab in weather_labels: if lab == 'Date': continue row_dict[lab] = most_recent_w[lab] row_val = [row_dict[col] for col in out_labels] csv_writer.writerow(row_val) # outfile.flush() # print('\n'.join(['%s: %s' % (k, row_dict[k]) for k in row_dict])) # exit(0) return if __name__ == '__main__': feature_extraction()
rowdict['wp%s' % ph] = '1'
index2.js
//Read by using fs module const fs = require("fs"); fs.readFile("user.json", function(err, data) { if (err) throw err; const users = JSON.parse(data);
console.log(users); });
container_inspect.go
package libpod import ( "fmt" "sort" "strings" "github.com/containers/common/pkg/config" "github.com/containers/podman/v3/libpod/define" "github.com/containers/podman/v3/libpod/driver" "github.com/containers/podman/v3/pkg/util" units "github.com/docker/go-units" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/runtime-tools/generate" "github.com/opencontainers/runtime-tools/validate" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" ) // inspectLocked inspects a container for low-level information. // The caller must held c.lock. func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) { storeCtr, err := c.runtime.store.Container(c.ID()) if err != nil { return nil, errors.Wrapf(err, "error getting container from store %q", c.ID()) } layer, err := c.runtime.store.Layer(storeCtr.LayerID) if err != nil { return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID) } driverData, err := driver.GetDriverData(c.runtime.store, layer.ID) if err != nil { return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID()) } return c.getContainerInspectData(size, driverData) } // Inspect a container for low-level information func (c *Container) Inspect(size bool) (*define.InspectContainerData, error) { if !c.batched { c.lock.Lock() defer c.lock.Unlock() if err := c.syncContainer(); err != nil { return nil, err } } return c.inspectLocked(size) } func (c *Container) getContainerInspectData(size bool, driverData *define.DriverData) (*define.InspectContainerData, error) { config := c.config runtimeInfo := c.state ctrSpec, err := c.specFromState() if err != nil { return nil, err } // Process is allowed to be nil in the stateSpec args := []string{} if config.Spec.Process != nil { args = config.Spec.Process.Args } var path string if len(args) > 0 { path = args[0] } if len(args) > 1 { args = args[1:] } execIDs := []string{} for id := range c.state.ExecSessions { execIDs = append(execIDs, id) } resolvPath := "" hostsPath := "" hostnamePath := "" if c.state.BindMounts != nil { if getPath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok { resolvPath = getPath } if getPath, ok := c.state.BindMounts["/etc/hosts"]; ok { hostsPath = getPath } if getPath, ok := c.state.BindMounts["/etc/hostname"]; ok { hostnamePath = getPath } } namedVolumes, mounts := c.sortUserVolumes(ctrSpec) inspectMounts, err := c.GetInspectMounts(namedVolumes, c.config.ImageVolumes, mounts) if err != nil { return nil, err } cgroupPath, err := c.cGroupPath() if err != nil { // Handle the case where the container is not running or has no cgroup. if errors.Is(err, define.ErrNoCgroups) || errors.Is(err, define.ErrCtrStopped) { cgroupPath = "" } else { return nil, err } } data := &define.InspectContainerData{ ID: config.ID, Created: config.CreatedTime, Path: path, Args: args, State: &define.InspectContainerState{ OciVersion: ctrSpec.Version, Status: runtimeInfo.State.String(), Running: runtimeInfo.State == define.ContainerStateRunning, Paused: runtimeInfo.State == define.ContainerStatePaused, OOMKilled: runtimeInfo.OOMKilled, Dead: runtimeInfo.State.String() == "bad state", Pid: runtimeInfo.PID, ConmonPid: runtimeInfo.ConmonPID, ExitCode: runtimeInfo.ExitCode, Error: "", // can't get yet StartedAt: runtimeInfo.StartedTime, FinishedAt: runtimeInfo.FinishedTime, Checkpointed: runtimeInfo.Checkpointed, CgroupPath: cgroupPath, RestoredAt: runtimeInfo.RestoredTime, CheckpointedAt: runtimeInfo.CheckpointedTime, Restored: runtimeInfo.Restored, CheckpointPath: runtimeInfo.CheckpointPath, CheckpointLog: runtimeInfo.CheckpointLog, RestoreLog: runtimeInfo.RestoreLog, }, Image: config.RootfsImageID, ImageName: config.RootfsImageName, Namespace: config.Namespace, Rootfs: config.Rootfs, Pod: config.Pod, ResolvConfPath: resolvPath, HostnamePath: hostnamePath, HostsPath: hostsPath, StaticDir: config.StaticDir, OCIRuntime: config.OCIRuntime, ConmonPidFile: config.ConmonPidFile, PidFile: config.PidFile, Name: config.Name, RestartCount: int32(runtimeInfo.RestartCount), Driver: driverData.Name, MountLabel: config.MountLabel, ProcessLabel: config.ProcessLabel, EffectiveCaps: ctrSpec.Process.Capabilities.Effective, BoundingCaps: ctrSpec.Process.Capabilities.Bounding, AppArmorProfile: ctrSpec.Process.ApparmorProfile, ExecIDs: execIDs, GraphDriver: driverData, Mounts: inspectMounts, Dependencies: c.Dependencies(), IsInfra: c.IsInfra(), } if c.state.ConfigPath != "" { data.OCIConfigPath = c.state.ConfigPath } if c.config.HealthCheckConfig != nil { // This container has a healthcheck defined in it; we need to add it's state healthCheckState, err := c.getHealthCheckLog() if err != nil { // An error here is not considered fatal; no health state will be displayed logrus.Error(err) } else { data.State.Health = healthCheckState } } networkConfig, err := c.getContainerNetworkInfo() if err != nil { return nil, err } data.NetworkSettings = networkConfig inspectConfig := c.generateInspectContainerConfig(ctrSpec) data.Config = inspectConfig hostConfig, err := c.generateInspectContainerHostConfig(ctrSpec, namedVolumes, mounts) if err != nil { return nil, err } data.HostConfig = hostConfig if size { rootFsSize, err := c.rootFsSize() if err != nil { logrus.Errorf("Getting rootfs size %q: %v", config.ID, err) } data.SizeRootFs = rootFsSize rwSize, err := c.rwSize() if err != nil { logrus.Errorf("Getting rw size %q: %v", config.ID, err) } data.SizeRw = &rwSize } return data, nil } // Get inspect-formatted mounts list. // Only includes user-specified mounts. Only includes bind mounts and named // volumes, not tmpfs volumes. func (c *Container) GetInspectMounts(namedVolumes []*ContainerNamedVolume, imageVolumes []*ContainerImageVolume, mounts []spec.Mount) ([]define.InspectMount, error) { inspectMounts := []define.InspectMount{} // No mounts, return early if len(c.config.UserVolumes) == 0 { return inspectMounts, nil } for _, volume := range namedVolumes { mountStruct := define.InspectMount{} mountStruct.Type = "volume" mountStruct.Destination = volume.Dest mountStruct.Name = volume.Name // For src and driver, we need to look up the named // volume. volFromDB, err := c.runtime.state.Volume(volume.Name) if err != nil { return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID()) } mountStruct.Driver = volFromDB.Driver() mountPoint, err := volFromDB.MountPoint() if err != nil { return nil, err } mountStruct.Source = mountPoint parseMountOptionsForInspect(volume.Options, &mountStruct) inspectMounts = append(inspectMounts, mountStruct) } for _, volume := range imageVolumes { mountStruct := define.InspectMount{} mountStruct.Type = "image" mountStruct.Destination = volume.Dest mountStruct.Source = volume.Source mountStruct.RW = volume.ReadWrite inspectMounts = append(inspectMounts, mountStruct) } for _, mount := range mounts { // It's a mount. // Is it a tmpfs? If so, discard. if mount.Type == "tmpfs" { continue } mountStruct := define.InspectMount{} mountStruct.Type = "bind" mountStruct.Source = mount.Source mountStruct.Destination = mount.Destination parseMountOptionsForInspect(mount.Options, &mountStruct)
} // Parse mount options so we can populate them in the mount structure. // The mount passed in will be modified. func parseMountOptionsForInspect(options []string, mount *define.InspectMount) { isRW := true mountProp := "" zZ := "" otherOpts := []string{} // Some of these may be overwritten if the user passes us garbage opts // (for example, [ro,rw]) // We catch these on the Podman side, so not a problem there, but other // users of libpod who do not properly validate mount options may see // this. // Not really worth dealing with on our end - garbage in, garbage out. for _, opt := range options { switch opt { case "ro": isRW = false case "rw": // Do nothing, silently discard case "shared", "slave", "private", "rshared", "rslave", "rprivate", "unbindable", "runbindable": mountProp = opt case "z", "Z": zZ = opt default: otherOpts = append(otherOpts, opt) } } mount.RW = isRW mount.Propagation = mountProp mount.Mode = zZ mount.Options = otherOpts } // Generate the InspectContainerConfig struct for the Config field of Inspect. func (c *Container) generateInspectContainerConfig(spec *spec.Spec) *define.InspectContainerConfig { ctrConfig := new(define.InspectContainerConfig) ctrConfig.Hostname = c.Hostname() ctrConfig.User = c.config.User if spec.Process != nil { ctrConfig.Tty = spec.Process.Terminal ctrConfig.Env = append([]string{}, spec.Process.Env...) ctrConfig.WorkingDir = spec.Process.Cwd } ctrConfig.StopTimeout = c.config.StopTimeout ctrConfig.Timeout = c.config.Timeout ctrConfig.OpenStdin = c.config.Stdin ctrConfig.Image = c.config.RootfsImageName ctrConfig.SystemdMode = c.config.Systemd // Leave empty is not explicitly overwritten by user if len(c.config.Command) != 0 { ctrConfig.Cmd = []string{} ctrConfig.Cmd = append(ctrConfig.Cmd, c.config.Command...) } // Leave empty if not explicitly overwritten by user if len(c.config.Entrypoint) != 0 { ctrConfig.Entrypoint = strings.Join(c.config.Entrypoint, " ") } if len(c.config.Labels) != 0 { ctrConfig.Labels = make(map[string]string) for k, v := range c.config.Labels { ctrConfig.Labels[k] = v } } if len(spec.Annotations) != 0 { ctrConfig.Annotations = make(map[string]string) for k, v := range spec.Annotations { ctrConfig.Annotations[k] = v } } ctrConfig.StopSignal = c.config.StopSignal // TODO: should JSON deep copy this to ensure internal pointers don't // leak. ctrConfig.Healthcheck = c.config.HealthCheckConfig ctrConfig.CreateCommand = c.config.CreateCommand ctrConfig.Timezone = c.config.Timezone for _, secret := range c.config.Secrets { newSec := define.InspectSecret{} newSec.Name = secret.Name newSec.ID = secret.ID newSec.UID = secret.UID newSec.GID = secret.GID newSec.Mode = secret.Mode ctrConfig.Secrets = append(ctrConfig.Secrets, &newSec) } // Pad Umask to 4 characters if len(c.config.Umask) < 4 { pad := strings.Repeat("0", 4-len(c.config.Umask)) ctrConfig.Umask = pad + c.config.Umask } else { ctrConfig.Umask = c.config.Umask } ctrConfig.Passwd = c.config.Passwd return ctrConfig } // Generate the InspectContainerHostConfig struct for the HostConfig field of // Inspect. func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, namedVolumes []*ContainerNamedVolume, mounts []spec.Mount) (*define.InspectContainerHostConfig, error) { hostConfig := new(define.InspectContainerHostConfig) logConfig := new(define.InspectLogConfig) logConfig.Type = c.config.LogDriver logConfig.Path = c.config.LogPath logConfig.Size = units.HumanSize(float64(c.config.LogSize)) logConfig.Tag = c.config.LogTag hostConfig.LogConfig = logConfig restartPolicy := new(define.InspectRestartPolicy) restartPolicy.Name = c.config.RestartPolicy restartPolicy.MaximumRetryCount = c.config.RestartRetries hostConfig.RestartPolicy = restartPolicy if c.config.NoCgroups { hostConfig.Cgroups = "disabled" } else { hostConfig.Cgroups = "default" } hostConfig.Dns = make([]string, 0, len(c.config.DNSServer)) for _, dns := range c.config.DNSServer { hostConfig.Dns = append(hostConfig.Dns, dns.String()) } hostConfig.DnsOptions = make([]string, 0, len(c.config.DNSOption)) hostConfig.DnsOptions = append(hostConfig.DnsOptions, c.config.DNSOption...) hostConfig.DnsSearch = make([]string, 0, len(c.config.DNSSearch)) hostConfig.DnsSearch = append(hostConfig.DnsSearch, c.config.DNSSearch...) hostConfig.ExtraHosts = make([]string, 0, len(c.config.HostAdd)) hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, c.config.HostAdd...) hostConfig.GroupAdd = make([]string, 0, len(c.config.Groups)) hostConfig.GroupAdd = append(hostConfig.GroupAdd, c.config.Groups...) hostConfig.SecurityOpt = []string{} if ctrSpec.Process != nil { if ctrSpec.Process.OOMScoreAdj != nil { hostConfig.OomScoreAdj = *ctrSpec.Process.OOMScoreAdj } if ctrSpec.Process.NoNewPrivileges { hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges") } } hostConfig.ReadonlyRootfs = ctrSpec.Root.Readonly hostConfig.ShmSize = c.config.ShmSize hostConfig.Runtime = "oci" // This is very expensive to initialize. // So we don't want to initialize it unless we absolutely have to - IE, // there are things that require a major:minor to path translation. var deviceNodes map[string]string // Annotations if ctrSpec.Annotations != nil { hostConfig.ContainerIDFile = ctrSpec.Annotations[define.InspectAnnotationCIDFile] if ctrSpec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue { hostConfig.AutoRemove = true } if ctrs, ok := ctrSpec.Annotations[define.InspectAnnotationVolumesFrom]; ok { hostConfig.VolumesFrom = strings.Split(ctrs, ",") } if ctrSpec.Annotations[define.InspectAnnotationPrivileged] == define.InspectResponseTrue { hostConfig.Privileged = true } if ctrSpec.Annotations[define.InspectAnnotationInit] == define.InspectResponseTrue { hostConfig.Init = true } if label, ok := ctrSpec.Annotations[define.InspectAnnotationLabel]; ok { hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("label=%s", label)) } if seccomp, ok := ctrSpec.Annotations[define.InspectAnnotationSeccomp]; ok { hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("seccomp=%s", seccomp)) } if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok { hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor)) } } // Resource limits if ctrSpec.Linux != nil { if ctrSpec.Linux.Resources != nil { if ctrSpec.Linux.Resources.CPU != nil { if ctrSpec.Linux.Resources.CPU.Shares != nil { hostConfig.CpuShares = *ctrSpec.Linux.Resources.CPU.Shares } if ctrSpec.Linux.Resources.CPU.Period != nil { hostConfig.CpuPeriod = *ctrSpec.Linux.Resources.CPU.Period } if ctrSpec.Linux.Resources.CPU.Quota != nil { hostConfig.CpuQuota = *ctrSpec.Linux.Resources.CPU.Quota } if ctrSpec.Linux.Resources.CPU.RealtimePeriod != nil { hostConfig.CpuRealtimePeriod = *ctrSpec.Linux.Resources.CPU.RealtimePeriod } if ctrSpec.Linux.Resources.CPU.RealtimeRuntime != nil { hostConfig.CpuRealtimeRuntime = *ctrSpec.Linux.Resources.CPU.RealtimeRuntime } hostConfig.CpusetCpus = ctrSpec.Linux.Resources.CPU.Cpus hostConfig.CpusetMems = ctrSpec.Linux.Resources.CPU.Mems } if ctrSpec.Linux.Resources.Memory != nil { if ctrSpec.Linux.Resources.Memory.Limit != nil { hostConfig.Memory = *ctrSpec.Linux.Resources.Memory.Limit } if ctrSpec.Linux.Resources.Memory.Reservation != nil { hostConfig.MemoryReservation = *ctrSpec.Linux.Resources.Memory.Reservation } if ctrSpec.Linux.Resources.Memory.Swap != nil { hostConfig.MemorySwap = *ctrSpec.Linux.Resources.Memory.Swap } if ctrSpec.Linux.Resources.Memory.Swappiness != nil { hostConfig.MemorySwappiness = int64(*ctrSpec.Linux.Resources.Memory.Swappiness) } else { // Swappiness has a default of -1 hostConfig.MemorySwappiness = -1 } if ctrSpec.Linux.Resources.Memory.DisableOOMKiller != nil { hostConfig.OomKillDisable = *ctrSpec.Linux.Resources.Memory.DisableOOMKiller } } if ctrSpec.Linux.Resources.Pids != nil { hostConfig.PidsLimit = ctrSpec.Linux.Resources.Pids.Limit } hostConfig.CgroupConf = ctrSpec.Linux.Resources.Unified if ctrSpec.Linux.Resources.BlockIO != nil { if ctrSpec.Linux.Resources.BlockIO.Weight != nil { hostConfig.BlkioWeight = *ctrSpec.Linux.Resources.BlockIO.Weight } hostConfig.BlkioWeightDevice = []define.InspectBlkioWeightDevice{} for _, dev := range ctrSpec.Linux.Resources.BlockIO.WeightDevice { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) // TODO: how do we handle LeafWeight vs // Weight? For now, ignore anything // without Weight set. if dev.Weight == nil { logrus.Infof("Ignoring weight device %s as it lacks a weight", key) continue } if deviceNodes == nil { nodes, err := util.FindDeviceNodes() if err != nil { return nil, err } deviceNodes = nodes } path, ok := deviceNodes[key] if !ok { logrus.Infof("Could not locate weight device %s in system devices", key) continue } weightDev := define.InspectBlkioWeightDevice{} weightDev.Path = path weightDev.Weight = *dev.Weight hostConfig.BlkioWeightDevice = append(hostConfig.BlkioWeightDevice, weightDev) } readBps, err := blkioDeviceThrottle(deviceNodes, ctrSpec.Linux.Resources.BlockIO.ThrottleReadBpsDevice) if err != nil { return nil, err } hostConfig.BlkioDeviceReadBps = readBps writeBps, err := blkioDeviceThrottle(deviceNodes, ctrSpec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice) if err != nil { return nil, err } hostConfig.BlkioDeviceWriteBps = writeBps readIops, err := blkioDeviceThrottle(deviceNodes, ctrSpec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice) if err != nil { return nil, err } hostConfig.BlkioDeviceReadIOps = readIops writeIops, err := blkioDeviceThrottle(deviceNodes, ctrSpec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice) if err != nil { return nil, err } hostConfig.BlkioDeviceWriteIOps = writeIops } } } // NanoCPUs. // This is only calculated if CpuPeriod == 100000. // It is given in nanoseconds, versus the microseconds used elsewhere - // so multiply by 10000 (not sure why, but 1000 is off by 10). if hostConfig.CpuPeriod == 100000 { hostConfig.NanoCpus = 10000 * hostConfig.CpuQuota } // Bind mounts, formatted as src:dst. // We'll be appending some options that aren't necessarily in the // original command line... but no helping that from inside libpod. binds := []string{} tmpfs := make(map[string]string) for _, namedVol := range namedVolumes { if len(namedVol.Options) > 0 { binds = append(binds, fmt.Sprintf("%s:%s:%s", namedVol.Name, namedVol.Dest, strings.Join(namedVol.Options, ","))) } else { binds = append(binds, fmt.Sprintf("%s:%s", namedVol.Name, namedVol.Dest)) } } for _, mount := range mounts { if mount.Type == "tmpfs" { tmpfs[mount.Destination] = strings.Join(mount.Options, ",") } else { // TODO - maybe we should parse for empty source/destination // here. Would be confusing if we print just a bare colon. if len(mount.Options) > 0 { binds = append(binds, fmt.Sprintf("%s:%s:%s", mount.Source, mount.Destination, strings.Join(mount.Options, ","))) } else { binds = append(binds, fmt.Sprintf("%s:%s", mount.Source, mount.Destination)) } } } hostConfig.Binds = binds hostConfig.Tmpfs = tmpfs // Network mode parsing. networkMode := c.NetworkMode() hostConfig.NetworkMode = networkMode // Port bindings. // Only populate if we're using CNI to configure the network. if c.config.CreateNetNS { hostConfig.PortBindings = makeInspectPortBindings(c.config.PortMappings, c.config.ExposedPorts) } else { hostConfig.PortBindings = make(map[string][]define.InspectHostPort) } // Cap add and cap drop. // We need a default set of capabilities to compare against. // The OCI generate package has one, and is commonly used, so we'll // use it. // Problem: there are 5 sets of capabilities. // Use the bounding set for this computation, it's the most encompassing // (but still not perfect). capAdd := []string{} capDrop := []string{} // No point in continuing if we got a spec without a Process block... if ctrSpec.Process != nil { // Max an O(1) lookup table for default bounding caps. boundingCaps := make(map[string]bool) g, err := generate.New("linux") if err != nil { return nil, err } if !hostConfig.Privileged { for _, cap := range g.Config.Process.Capabilities.Bounding { boundingCaps[cap] = true } } else { // If we are privileged, use all caps. for _, cap := range capability.List() { if g.HostSpecific && cap > validate.LastCap() { continue } boundingCaps[fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))] = true } } // Iterate through spec caps. // If it's not in default bounding caps, it was added. // If it is, delete from the default set. Whatever remains after // we finish are the dropped caps. for _, cap := range ctrSpec.Process.Capabilities.Bounding { if _, ok := boundingCaps[cap]; ok { delete(boundingCaps, cap) } else { capAdd = append(capAdd, cap) } } for cap := range boundingCaps { capDrop = append(capDrop, cap) } // Sort CapDrop so it displays in consistent order (GH #9490) sort.Strings(capDrop) } hostConfig.CapAdd = capAdd hostConfig.CapDrop = capDrop // IPC Namespace mode ipcMode := "" if c.config.IPCNsCtr != "" { ipcMode = fmt.Sprintf("container:%s", c.config.IPCNsCtr) } else if ctrSpec.Linux != nil { // Locate the spec's IPC namespace. // If there is none, it's ipc=host. // If there is one and it has a path, it's "ns:". // If no path, it's default - the empty string. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.IPCNamespace { if ns.Path != "" { ipcMode = fmt.Sprintf("ns:%s", ns.Path) } else { ipcMode = "private" } break } } if ipcMode == "" { ipcMode = "host" } } hostConfig.IpcMode = ipcMode // Cgroup namespace mode cgroupMode := "" if c.config.CgroupNsCtr != "" { cgroupMode = fmt.Sprintf("container:%s", c.config.CgroupNsCtr) } else if ctrSpec.Linux != nil { // Locate the spec's cgroup namespace // If there is none, it's cgroup=host. // If there is one and it has a path, it's "ns:". // If there is no path, it's private. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.CgroupNamespace { if ns.Path != "" { cgroupMode = fmt.Sprintf("ns:%s", ns.Path) } else { cgroupMode = "private" } } } if cgroupMode == "" { cgroupMode = "host" } } hostConfig.CgroupMode = cgroupMode // CGroup parent // Need to check if it's the default, and not print if so. defaultCgroupParent := "" switch c.CgroupManager() { case config.CgroupfsCgroupsManager: defaultCgroupParent = CgroupfsDefaultCgroupParent case config.SystemdCgroupsManager: defaultCgroupParent = SystemdDefaultCgroupParent } if c.config.CgroupParent != defaultCgroupParent { hostConfig.CgroupParent = c.config.CgroupParent } hostConfig.CgroupManager = c.CgroupManager() // PID namespace mode pidMode := "" if c.config.PIDNsCtr != "" { pidMode = fmt.Sprintf("container:%s", c.config.PIDNsCtr) } else if ctrSpec.Linux != nil { // Locate the spec's PID namespace. // If there is none, it's pid=host. // If there is one and it has a path, it's "ns:". // If there is no path, it's default - the empty string. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.PIDNamespace { if ns.Path != "" { pidMode = fmt.Sprintf("ns:%s", ns.Path) } else { pidMode = "private" } break } } if pidMode == "" { pidMode = "host" } } hostConfig.PidMode = pidMode // UTS namespace mode utsMode := "" if c.config.UTSNsCtr != "" { utsMode = fmt.Sprintf("container:%s", c.config.UTSNsCtr) } else if ctrSpec.Linux != nil { // Locate the spec's UTS namespace. // If there is none, it's uts=host. // If there is one and it has a path, it's "ns:". // If there is no path, it's default - the empty string. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.UTSNamespace { if ns.Path != "" { utsMode = fmt.Sprintf("ns:%s", ns.Path) } else { utsMode = "private" } break } } if utsMode == "" { utsMode = "host" } } hostConfig.UTSMode = utsMode // User namespace mode usernsMode := "" if c.config.UserNsCtr != "" { usernsMode = fmt.Sprintf("container:%s", c.config.UserNsCtr) } else if ctrSpec.Linux != nil { // Locate the spec's user namespace. // If there is none, it's default - the empty string. // If there is one, it's "private" if no path, or "ns:" if // there's a path. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.UserNamespace { if ns.Path != "" { usernsMode = fmt.Sprintf("ns:%s", ns.Path) } else { usernsMode = "private" } } } } hostConfig.UsernsMode = usernsMode // Devices // Do not include if privileged - assumed that all devices will be // included. var err error hostConfig.Devices, err = c.GetDevices(*&hostConfig.Privileged, *ctrSpec, deviceNodes) if err != nil { return nil, err } // Ulimits hostConfig.Ulimits = []define.InspectUlimit{} if ctrSpec.Process != nil { for _, limit := range ctrSpec.Process.Rlimits { newLimit := define.InspectUlimit{} newLimit.Name = limit.Type newLimit.Soft = int64(limit.Soft) newLimit.Hard = int64(limit.Hard) hostConfig.Ulimits = append(hostConfig.Ulimits, newLimit) } } // Terminal size // We can't actually get this for now... // So default to something sane. // TODO: Populate this. hostConfig.ConsoleSize = []uint{0, 0} return hostConfig, nil } // Return true if the container is running in the host's PID NS. func (c *Container) inHostPidNS() (bool, error) { if c.config.PIDNsCtr != "" { return false, nil } ctrSpec, err := c.specFromState() if err != nil { return false, err } if ctrSpec.Linux != nil { // Locate the spec's PID namespace. // If there is none, it's pid=host. // If there is one and it has a path, it's "ns:". // If there is no path, it's default - the empty string. for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.PIDNamespace { return false, nil } } } return true, nil } func (c *Container) GetDevices(priv bool, ctrSpec spec.Spec, deviceNodes map[string]string) ([]define.InspectDevice, error) { devices := []define.InspectDevice{} if ctrSpec.Linux != nil && !priv { for _, dev := range ctrSpec.Linux.Devices { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) if deviceNodes == nil { nodes, err := util.FindDeviceNodes() if err != nil { return nil, err } deviceNodes = nodes } path, ok := deviceNodes[key] if !ok { logrus.Warnf("Could not locate device %s on host", key) continue } newDev := define.InspectDevice{} newDev.PathOnHost = path newDev.PathInContainer = dev.Path devices = append(devices, newDev) } } return devices, nil } func blkioDeviceThrottle(deviceNodes map[string]string, devs []spec.LinuxThrottleDevice) ([]define.InspectBlkioThrottleDevice, error) { out := []define.InspectBlkioThrottleDevice{} for _, dev := range devs { key := fmt.Sprintf("%d:%d", dev.Major, dev.Minor) if deviceNodes == nil { nodes, err := util.FindDeviceNodes() if err != nil { return nil, err } deviceNodes = nodes } path, ok := deviceNodes[key] if !ok { logrus.Infof("Could not locate throttle device %s in system devices", key) continue } throttleDev := define.InspectBlkioThrottleDevice{} throttleDev.Path = path throttleDev.Rate = dev.Rate out = append(out, throttleDev) } return out, nil }
inspectMounts = append(inspectMounts, mountStruct) } return inspectMounts, nil
panic-arg.rs
// run-fail // error-pattern:woe // ignore-emscripten no processes fn f(a: isize) { println!("{}", a); } fn main()
{ f(panic!("woe")); }
export_FaceEmbedding.py
""" Exports the embeddings of a directory of images as numpy arrays. Following structure: D:\images: folder1: img_0 ... img_74 folder2: img_0 ... img_74 Output: embeddings.npy -- Embeddings as np array (with names "folder1", "folder2", etc.) Use --is_aligned False, if your images aren't already pre-aligned Use --image_batch to dictacte how many images to load in memory at a time. Started with export_embeddings.py from Charles Jekel, and modified the program to export the face embeddings for the audio-visual speech separation model. The pretrained model is from David Sandberg's facenet repository: https://github.com/davidsandberg/facenet export_embedding.py from same project: https://github.com/davidsandberg/facenet/tree/master/contributed Ensure you have set the PYTHONPATH for the pretrained facenet (3.): https://github.com/davidsandberg/facenet/wiki/Validate-on-LFW Execution: python export_FaceEmbedding.py models\20180402-114759\20180402-114759.pb D:\images --is_aligned False --image_size 160 --gpu_memory_fraction 0.5 --image_batch 75 Sereina Scherrer 2019 """ # MIT License # # Copyright (c) 2016 David Sandberg # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from scipy import misc import tensorflow as tf import numpy as np import sys import os import argparse import facenet import align.detect_face import re import glob from six.moves import xrange def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [atoi(c) for c in re.split(r'(\d+)', text)] def
(args): train_set = facenet.get_dataset(args.data_dir) image_list, label_list = facenet.get_image_paths_and_labels(train_set) # sort the image:s img_0 ... img_74 image_list.sort(key=natural_keys) # fetch the classes (labels as strings) exactly as it's done in get_dataset path_exp = os.path.expanduser(args.data_dir) classes = [path for path in os.listdir(path_exp) \ if os.path.isdir(os.path.join(path_exp, path))] classes.sort() # get the label strings label_strings = [name for name in classes if \ os.path.isdir(os.path.join(path_exp, name))] # define path to save the embeddings dirs = ["./emb/embeddings_AVspeech/"] for d in dirs: if not os.path.exists(d): os.makedirs(d) print("Folder created:", d) with tf.Graph().as_default(): with tf.Session() as sess: # Load the model facenet.load_model(args.model_dir) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0") embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0") phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0") # Run forward pass to calculate embeddings nrof_images = len(image_list) print('Number of images: ', nrof_images) batch_size = args.image_batch if nrof_images % batch_size == 0: nrof_batches = nrof_images // batch_size else: nrof_batches = (nrof_images // batch_size) + 1 print('Number of batches: ', nrof_batches) embedding_size = embeddings.get_shape()[1] emb_array = np.zeros((nrof_images, embedding_size)) start_time = time.time() for i in range(nrof_batches): if i == nrof_batches -1: n = nrof_images else: n = i*batch_size + batch_size # Get images for the batch if args.is_aligned is True: images = facenet.load_data(image_list[i*batch_size:n], False, False, args.image_size) else: images = load_and_align_data(image_list[i*batch_size:n], args.image_size, args.margin, args.gpu_memory_fraction) feed_dict = { images_placeholder: images, phase_train_placeholder:False } # Use the facenet model to calcualte embeddings embed = sess.run(embeddings, feed_dict=feed_dict) emb_array[i*batch_size:n, :] = embed # export the embedding s = dirs[0] + label_strings[i] + ".npy" np.save(s, embed) print('Completed batch', i+1, 'of', nrof_batches) run_time = time.time() - start_time print('Run time: ', run_time) print('Time per video: ',run_time/nrof_batches) def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction): print('Creating networks and loading parameters') with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(image_paths) img_list = [None] * nrof_samples for i in xrange(nrof_samples): print(image_paths[i]) img = misc.imread(os.path.expanduser(image_paths[i])) aligned = misc.imresize(img, (image_size, image_size), interp='bilinear') prewhitened = facenet.prewhiten(aligned) img_list[i] = prewhitened # uncomment if you want to save the aligned images '''f = os.path.basename(image_paths[i]) #print(f) tmp_folder = re.split(r'\\', image_paths[i]) tmp_f = tmp_folder[-2] d = "./aligned/" + tmp_f + "/" if not os.path.exists(d): os.makedirs(d) print("Folder created:", d) misc.imsave(d + f, aligned)''' images = np.stack(img_list) return images def parse_arguments(argv): parser = argparse.ArgumentParser() parser.add_argument('model_dir', type=str, help='Directory containing the meta_file and ckpt_file') parser.add_argument('data_dir', type=str, help='Directory containing images. If images are not already aligned and cropped include --is_aligned False.') parser.add_argument('--is_aligned', type=str, help='Is the data directory already aligned and cropped?', default=True) parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=160) parser.add_argument('--margin', type=int, help='Margin for the crop around the bounding box (height, width) in pixels.', default=44) parser.add_argument('--gpu_memory_fraction', type=float, help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0) parser.add_argument('--image_batch', type=int, help='Number of images stored in memory at a time. Default 75.', default=75) return parser.parse_args(argv) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))
main
adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math from collections.abc import Collection from dataclasses import dataclass, field from typing import List import torch import torch.distributed as dist import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from fairseq.optim.fused_adam import get_fused_adam_class from omegaconf import II, DictConfig logger = logging.getLogger(__name__) @dataclass class FairseqAdamConfig(FairseqDataclass): adam_betas: str = field( default="(0.9, 0.999)", metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) use_old_adam: bool = field( default=False, metadata={"help": "Use fairseq.optim.adam.Adam"} ) # TODO common vars below in parent tpu: bool = II("common.tpu") lr: List[float] = II("optimization.lr") @register_optimizer("adam", dataclass=FairseqAdamConfig) class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: DictConfig, params):
@property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): r"""Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad ) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( "Adam does not support sparse gradients, please consider SparseAdam instead" ) amsgrad = group.get("amsgrad", False) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) if amsgrad: state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( p_data_fp32 ) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] if amsgrad: max_exp_avg_sq = state["max_exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group["eps"]) else: denom = exp_avg_sq.sqrt().add_(group["eps"]) bias_correction1 = 1 - beta1 ** state["step"] bias_correction2 = 1 - beta2 ** state["step"] step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
super().__init__(cfg) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(cfg, "use_old_adam", False) and fused_adam_cls is not None and torch.cuda.is_available() ) if getattr(cfg, "tpu", False): # on TPUs we use the Adam defined here, since it # automatically casts gradients to FP32 self._optimizer = Adam(params, **self.optimizer_config) elif use_fused_adam: logger.info("using FusedAdam") self._optimizer = fused_adam_cls(params, **self.optimizer_config) else: self._optimizer = Adam(params, **self.optimizer_config)
index.js
var FS = require("fire-fs"); Editor.Panel.extend({ style: FS.readFileSync(Editor.url('packages://creator-chat-room/panel/index.css', 'utf8')) + "", template: FS.readFileSync(Editor.url('packages://creator-chat-room/panel/index.html', 'utf8')) + "", $: {}, ready() { window.plugin = new window.Vue({ el: this.shadowRoot, created() {
}, init() { }, data: { word: "test word", }, methods: { _checkFir(url) { let xhr = new XMLHttpRequest(); xhr.onreadystatechange = function () { if (xhr.readyState === 4 && ((xhr.status >= 200 && xhr.status < 400))) { let text = xhr.responseText; let result = JSON.parse(text); console.log(result); } }; xhr.open('GET', url, true); xhr.send(); }, onTest() { console.log("test"); let url = "https://download.fir.im/game"; this._checkFir(url); return; let wildDog = Editor.require("packages://creator-chat-room/node_modules/wilddog"); let config = { syncURL: "https://wild-hare-4781.wilddogio.com" //输入节点 URL }; wildDog.initializeApp(config); let ref = wildDog.sync().ref(); ref.on('value', function (snapshot) { let str = snapshot.val(); this.word = str; console.log(str); }.bind(this)); }, }, }); }, messages: { 'creator-chat-room:hello'(event) { } } });
soundchip.rs
const SAMPLE_BUFSIZE: usize = 128; // Sound chip example. it generates GB style white noise. pub struct SoundChip { tap_b: u8, register: u16 } impl SoundChip { pub fn new() -> SoundChip
pub fn process(&mut self, out_ptr: *mut f32, _sample_count: u32) { // borrow output buffer from out_ptr pointer. let out_buf: &mut [f32] = unsafe { std::slice::from_raw_parts_mut(out_ptr, SAMPLE_BUFSIZE) }; for i in 0..SAMPLE_BUFSIZE { self.clock(); // Generate hex value and convert to f32 sample let hex = self.hex() as f32; let gain = 0.25_f32; let sample = ((hex / 7.5) - 1.0) * gain; // fill f32 sample to output buffer out_buf[i] = sample; }; } // clock the LFSR fn clock(&mut self) { let bit_a = self.register & 1; let bit_b = (self.register >> self.tap_b) & 1; let feedback = (bit_a ^ bit_b) << 14; self.register >>=1; self.register |= feedback; } // Get current hex value of LFSR fn hex(&self) -> u8 { (self.register & 0xF) as u8 } }
{ SoundChip { tap_b: 1_u8, register: 1_u16 } }
count_request_builder.go
package count import ( i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go" ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors" ) // CountRequestBuilder provides operations to count the resources in the collection. type CountRequestBuilder struct { // Path parameters for the request pathParameters map[string]string // The request adapter to use to execute the requests. requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter // Url template to use to build the URL for the current request builder urlTemplate string } // CountRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options. type CountRequestBuilderGetRequestConfiguration struct { // Request headers Headers map[string]string // Request options Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption } // NewCountRequestBuilderInternal instantiates a new CountRequestBuilder and sets the default values. func NewCountRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*CountRequestBuilder) { m := &CountRequestBuilder{ } m.urlTemplate = "{+baseurl}/me/joinedTeams/{team%2Did}/schedule/timeOffRequests/$count"; urlTplParams := make(map[string]string) for idx, item := range pathParameters { urlTplParams[idx] = item } m.pathParameters = urlTplParams; m.requestAdapter = requestAdapter; return m } // NewCountRequestBuilder instantiates a new CountRequestBuilder and sets the default values. func NewCountRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*CountRequestBuilder)
// CreateGetRequestInformation get the number of the resource func (m *CountRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) { return m.CreateGetRequestInformationWithRequestConfiguration(nil); } // CreateGetRequestInformationWithRequestConfiguration get the number of the resource func (m *CountRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *CountRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) { requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation() requestInfo.UrlTemplate = m.urlTemplate requestInfo.PathParameters = m.pathParameters requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET if requestConfiguration != nil { requestInfo.AddRequestHeaders(requestConfiguration.Headers) requestInfo.AddRequestOptions(requestConfiguration.Options) } return requestInfo, nil } // Get get the number of the resource func (m *CountRequestBuilder) Get()(*int32, error) { return m.GetWithRequestConfigurationAndResponseHandler(nil, nil); } // GetWithRequestConfigurationAndResponseHandler get the number of the resource func (m *CountRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *CountRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(*int32, error) { requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration); if err != nil { return nil, err } errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings { "4XX": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue, "5XX": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue, } res, err := m.requestAdapter.SendPrimitiveAsync(requestInfo, "int32", responseHandler, errorMapping) if err != nil { return nil, err } return res.(*int32), nil }
{ urlParams := make(map[string]string) urlParams["request-raw-url"] = rawUrl return NewCountRequestBuilderInternal(urlParams, requestAdapter) }
accept_member.go
package ledgerdb //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // AcceptMember invokes the ledgerdb.AcceptMember API synchronously // api document: https://help.aliyun.com/api/ledgerdb/acceptmember.html func (client *Client) AcceptMember(request *AcceptMemberRequest) (response *AcceptMemberResponse, err error) { response = CreateAcceptMemberResponse() err = client.DoAction(request, response) return } // AcceptMemberWithChan invokes the ledgerdb.AcceptMember API asynchronously // api document: https://help.aliyun.com/api/ledgerdb/acceptmember.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) AcceptMemberWithChan(request *AcceptMemberRequest) (<-chan *AcceptMemberResponse, <-chan error) { responseChan := make(chan *AcceptMemberResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.AcceptMember(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // AcceptMemberWithCallback invokes the ledgerdb.AcceptMember API asynchronously // api document: https://help.aliyun.com/api/ledgerdb/acceptmember.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) AcceptMemberWithCallback(request *AcceptMemberRequest, callback func(response *AcceptMemberResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *AcceptMemberResponse var err error defer close(result) response, err = client.AcceptMember(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // AcceptMemberRequest is the request struct for api AcceptMember type AcceptMemberRequest struct { *requests.RpcRequest PublicKey string `position:"Body" name:"PublicKey"` KeyType string `position:"Body" name:"KeyType"` LedgerId string `position:"Body" name:"LedgerId"` } // AcceptMemberResponse is the response struct for api AcceptMember type AcceptMemberResponse struct { *responses.BaseResponse MemberId string `json:"MemberId" xml:"MemberId"` RequestId string `json:"RequestId" xml:"RequestId"` } // CreateAcceptMemberRequest creates a request to invoke AcceptMember API func CreateAcceptMemberRequest() (request *AcceptMemberRequest) { request = &AcceptMemberRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("ledgerdb", "2019-11-22", "AcceptMember", "ledgerdb", "openAPI") return } // CreateAcceptMemberResponse creates a response to parse from AcceptMember response func CreateAcceptMemberResponse() (response *AcceptMemberResponse)
{ response = &AcceptMemberResponse{ BaseResponse: &responses.BaseResponse{}, } return }
message.rs
use ffi; use glib; use glib::object::IsA; use glib::translate::*; use glib::GString; use std::fmt; use std::ptr; use crate::Message; pub trait MessageExtManual: 'static { #[doc(alias = "g_mime_message_partial_split_message")] fn split(&self, max_size: usize) -> Vec<Message>; } impl<O: IsA<Message>> MessageExtManual for O { fn
(&self, max_size: usize) -> Vec<Message> { unsafe { let mut n_parts = ::std::mem::uninitialized(); let parts = ffi::g_mime_message_partial_split_message( self.as_ref().to_glib_none().0, max_size, &mut n_parts, ); FromGlibContainer::from_glib_full_num(parts, n_parts as usize) } } }
split
model_policy.go
/* * Appgate SDP Controller REST API * * # About This specification documents the REST API calls for the Appgate SDP Controller. Please refer to the Integration chapter in the manual or contact Appgate support with any questions about this functionality. # Getting Started Requirements for API scripting: - Access to the Admin/API TLS Connection (default port 8443) of a Controller appliance. (https://sdphelp.appgate.com/adminguide/appliance-functions-configure.html?anchor=admin-api) - An API user with relevant permissions. (https://sdphelp.appgate.com/adminguide/administrative-roles-configure.html) - In order to use the simple login API, Admin MFA must be disabled or the API user must be excluded. (https://sdphelp.appgate.com/adminguide/mfa-for-admins.html) # Base path HTTPS requests must be sent to the Admin Interface hostname and port, with **_/admin** path. For example: **https://appgate.company.com:8443/admin** All requests must have the **Accept** header as: **application/vnd.appgate.peer-v15+json** # API Conventions API conventions are important to understand and follow strictly. - While updating objects (via PUT), entire object must be sent with all fields. - For example, in order to add a remedy method to the condition below: ``` { \"id\": \"12699e27-b584-464a-81ee-5b4784b6d425\", \"name\": \"Test\", \"notes\": \"Making a point\", \"tags\": [\"test\", \"tag\"], \"expression\": \"return true;\", \"remedyMethods\": [] } ``` - send the entire object with updated and non-updated fields: ``` { \"id\": \"12699e27-b584-464a-81ee-5b4784b6d425\", \"name\": \"Test\", \"notes\": \"Making a point\", \"tags\": [\"test\", \"tag\"], \"expression\": \"return true;\", \"remedyMethods\": [{\"type\": \"DisplayMessage\", \"message\": \"test message\"}] } ``` - In case Controller returns an error (non-2xx HTTP status code), response body is JSON. The \"message\" field contains information about the error. HTTP 422 \"Unprocessable Entity\" has extra `errors` field to list all the issues with specific fields. - Empty string (\"\") is considered a different value than \"null\" or field being omitted from JSON. Omitting the field is recommend if no value is intended. Empty string (\"\") will be almost always rejected as invalid value. - There are common pattern between many objects: - **Configuration Objects**: There are many objects with common fields, namely \"id\", \"name\", \"notes\", \"created\" and \"updated\". These entities are listed, queried, created, updated and deleted in a similar fashion. - **Distinguished Name**: Users and Devices are identified with what is called Distinguished Names, as used in LDAP. The distinguished format that identifies a device and a user combination is \"CN=\\<Device ID\\>,CN=\\<username\\>,OU=\\<Identity Provider Name\\>\". Some objects have the \"userDistinguishedName\" field, which does not include the CN for Device ID. This identifies a user on every device. * * API version: API version 15 * Contact: [email protected] */ // Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. package openapi import ( "encoding/json" "time" ) // Policy struct for Policy type Policy struct { // ID of the object. Id string `json:"id"` // Name of the object. Name string `json:"name"` // Notes for the object. Used for documentation purposes. Notes *string `json:"notes,omitempty"` // Create date. Created *time.Time `json:"created,omitempty"` // Last update date. Updated *time.Time `json:"updated,omitempty"` // Array of tags. Tags *[]string `json:"tags,omitempty"` // If true, the Policy will be disregarded during authorization. Disabled *bool `json:"disabled,omitempty"` // A JavaScript expression that returns boolean. Criteria Scripts may be used by calling them as functions. Expression string `json:"expression"` // List of Entitlement IDs in this Policy. Entitlements *[]string `json:"entitlements,omitempty"` // List of Entitlement tags in this Policy. EntitlementLinks *[]string `json:"entitlementLinks,omitempty"` // List of Ringfence Rule IDs in this Policy. RingfenceRules *[]string `json:"ringfenceRules,omitempty"` // List of Ringfence Rule tags in this Policy. RingfenceRuleLinks *[]string `json:"ringfenceRuleLinks,omitempty"` // Will enable Tamper Proofing on desktop clients which will make sure the routes and ringfence configurations are not changed. TamperProofing *bool `json:"tamperProofing,omitempty"` // Site ID where all the Entitlements of this Policy must be deployed. This overrides Entitlement's own Site and to be used only in specific network layouts. Otherwise the assigned site on individual Entitlements will be used. OverrideSite *string `json:"overrideSite,omitempty"` ProxyAutoConfig *PolicyAllOfProxyAutoConfig `json:"proxyAutoConfig,omitempty"` TrustedNetworkCheck *PolicyAllOfTrustedNetworkCheck `json:"trustedNetworkCheck,omitempty"` ClientSettings *PolicyAllOfClientSettings `json:"clientSettings,omitempty"` // List of Administrative Role IDs in this Policy. AdministrativeRoles *[]string `json:"administrativeRoles,omitempty"` } // NewPolicy instantiates a new Policy object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewPolicy(id string, name string, expression string) *Policy { this := Policy{} this.Id = id this.Name = name var disabled bool = false this.Disabled = &disabled this.Expression = expression var tamperProofing bool = true this.TamperProofing = &tamperProofing return &this } // NewPolicyWithDefaults instantiates a new Policy object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewPolicyWithDefaults() *Policy { this := Policy{} var disabled bool = false this.Disabled = &disabled var tamperProofing bool = true this.TamperProofing = &tamperProofing return &this } // GetId returns the Id field value func (o *Policy) GetId() string { if o == nil { var ret string return ret } return o.Id } // GetIdOk returns a tuple with the Id field value // and a boolean to check if the value has been set. func (o *Policy) GetIdOk() (*string, bool) { if o == nil { return nil, false } return &o.Id, true } // SetId sets field value func (o *Policy) SetId(v string) { o.Id = v } // GetName returns the Name field value func (o *Policy) GetName() string { if o == nil { var ret string return ret } return o.Name } // GetNameOk returns a tuple with the Name field value // and a boolean to check if the value has been set. func (o *Policy) GetNameOk() (*string, bool) { if o == nil { return nil, false } return &o.Name, true } // SetName sets field value func (o *Policy) SetName(v string) { o.Name = v } // GetNotes returns the Notes field value if set, zero value otherwise. func (o *Policy) GetNotes() string { if o == nil || o.Notes == nil { var ret string return ret } return *o.Notes } // GetNotesOk returns a tuple with the Notes field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetNotesOk() (*string, bool) { if o == nil || o.Notes == nil { return nil, false } return o.Notes, true } // HasNotes returns a boolean if a field has been set. func (o *Policy) HasNotes() bool { if o != nil && o.Notes != nil { return true } return false } // SetNotes gets a reference to the given string and assigns it to the Notes field. func (o *Policy) SetNotes(v string) { o.Notes = &v } // GetCreated returns the Created field value if set, zero value otherwise. func (o *Policy) GetCreated() time.Time { if o == nil || o.Created == nil { var ret time.Time return ret } return *o.Created } // GetCreatedOk returns a tuple with the Created field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetCreatedOk() (*time.Time, bool) { if o == nil || o.Created == nil { return nil, false } return o.Created, true } // HasCreated returns a boolean if a field has been set. func (o *Policy) HasCreated() bool { if o != nil && o.Created != nil { return true } return false } // SetCreated gets a reference to the given time.Time and assigns it to the Created field. func (o *Policy) SetCreated(v time.Time) { o.Created = &v } // GetUpdated returns the Updated field value if set, zero value otherwise. func (o *Policy) GetUpdated() time.Time { if o == nil || o.Updated == nil { var ret time.Time return ret } return *o.Updated } // GetUpdatedOk returns a tuple with the Updated field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetUpdatedOk() (*time.Time, bool) { if o == nil || o.Updated == nil { return nil, false } return o.Updated, true } // HasUpdated returns a boolean if a field has been set. func (o *Policy) HasUpdated() bool { if o != nil && o.Updated != nil { return true } return false } // SetUpdated gets a reference to the given time.Time and assigns it to the Updated field. func (o *Policy) SetUpdated(v time.Time) { o.Updated = &v } // GetTags returns the Tags field value if set, zero value otherwise. func (o *Policy) GetTags() []string { if o == nil || o.Tags == nil { var ret []string return ret } return *o.Tags } // GetTagsOk returns a tuple with the Tags field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetTagsOk() (*[]string, bool) { if o == nil || o.Tags == nil { return nil, false } return o.Tags, true } // HasTags returns a boolean if a field has been set. func (o *Policy) HasTags() bool { if o != nil && o.Tags != nil { return true } return false } // SetTags gets a reference to the given []string and assigns it to the Tags field. func (o *Policy) SetTags(v []string) { o.Tags = &v } // GetDisabled returns the Disabled field value if set, zero value otherwise. func (o *Policy) GetDisabled() bool { if o == nil || o.Disabled == nil { var ret bool return ret } return *o.Disabled } // GetDisabledOk returns a tuple with the Disabled field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetDisabledOk() (*bool, bool) { if o == nil || o.Disabled == nil { return nil, false } return o.Disabled, true } // HasDisabled returns a boolean if a field has been set. func (o *Policy) HasDisabled() bool { if o != nil && o.Disabled != nil { return true } return false } // SetDisabled gets a reference to the given bool and assigns it to the Disabled field. func (o *Policy) SetDisabled(v bool) { o.Disabled = &v } // GetExpression returns the Expression field value func (o *Policy) GetExpression() string { if o == nil { var ret string return ret } return o.Expression } // GetExpressionOk returns a tuple with the Expression field value // and a boolean to check if the value has been set. func (o *Policy) GetExpressionOk() (*string, bool) { if o == nil { return nil, false } return &o.Expression, true } // SetExpression sets field value func (o *Policy) SetExpression(v string) { o.Expression = v } // GetEntitlements returns the Entitlements field value if set, zero value otherwise. func (o *Policy) GetEntitlements() []string { if o == nil || o.Entitlements == nil { var ret []string return ret } return *o.Entitlements } // GetEntitlementsOk returns a tuple with the Entitlements field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetEntitlementsOk() (*[]string, bool) { if o == nil || o.Entitlements == nil { return nil, false } return o.Entitlements, true } // HasEntitlements returns a boolean if a field has been set. func (o *Policy) HasEntitlements() bool { if o != nil && o.Entitlements != nil { return true } return false } // SetEntitlements gets a reference to the given []string and assigns it to the Entitlements field. func (o *Policy) SetEntitlements(v []string) { o.Entitlements = &v } // GetEntitlementLinks returns the EntitlementLinks field value if set, zero value otherwise. func (o *Policy) GetEntitlementLinks() []string { if o == nil || o.EntitlementLinks == nil { var ret []string return ret } return *o.EntitlementLinks } // GetEntitlementLinksOk returns a tuple with the EntitlementLinks field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetEntitlementLinksOk() (*[]string, bool) { if o == nil || o.EntitlementLinks == nil { return nil, false } return o.EntitlementLinks, true } // HasEntitlementLinks returns a boolean if a field has been set. func (o *Policy) HasEntitlementLinks() bool { if o != nil && o.EntitlementLinks != nil { return true } return false } // SetEntitlementLinks gets a reference to the given []string and assigns it to the EntitlementLinks field. func (o *Policy) SetEntitlementLinks(v []string) { o.EntitlementLinks = &v } // GetRingfenceRules returns the RingfenceRules field value if set, zero value otherwise. func (o *Policy) GetRingfenceRules() []string { if o == nil || o.RingfenceRules == nil { var ret []string return ret } return *o.RingfenceRules } // GetRingfenceRulesOk returns a tuple with the RingfenceRules field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetRingfenceRulesOk() (*[]string, bool) { if o == nil || o.RingfenceRules == nil { return nil, false } return o.RingfenceRules, true } // HasRingfenceRules returns a boolean if a field has been set. func (o *Policy) HasRingfenceRules() bool { if o != nil && o.RingfenceRules != nil { return true } return false } // SetRingfenceRules gets a reference to the given []string and assigns it to the RingfenceRules field. func (o *Policy) SetRingfenceRules(v []string) { o.RingfenceRules = &v } // GetRingfenceRuleLinks returns the RingfenceRuleLinks field value if set, zero value otherwise. func (o *Policy) GetRingfenceRuleLinks() []string { if o == nil || o.RingfenceRuleLinks == nil { var ret []string return ret } return *o.RingfenceRuleLinks } // GetRingfenceRuleLinksOk returns a tuple with the RingfenceRuleLinks field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetRingfenceRuleLinksOk() (*[]string, bool) { if o == nil || o.RingfenceRuleLinks == nil { return nil, false } return o.RingfenceRuleLinks, true } // HasRingfenceRuleLinks returns a boolean if a field has been set. func (o *Policy) HasRingfenceRuleLinks() bool { if o != nil && o.RingfenceRuleLinks != nil { return true } return false } // SetRingfenceRuleLinks gets a reference to the given []string and assigns it to the RingfenceRuleLinks field. func (o *Policy) SetRingfenceRuleLinks(v []string) { o.RingfenceRuleLinks = &v } // GetTamperProofing returns the TamperProofing field value if set, zero value otherwise. func (o *Policy) GetTamperProofing() bool { if o == nil || o.TamperProofing == nil { var ret bool return ret } return *o.TamperProofing } // GetTamperProofingOk returns a tuple with the TamperProofing field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetTamperProofingOk() (*bool, bool) { if o == nil || o.TamperProofing == nil { return nil, false } return o.TamperProofing, true } // HasTamperProofing returns a boolean if a field has been set. func (o *Policy) HasTamperProofing() bool { if o != nil && o.TamperProofing != nil { return true } return false } // SetTamperProofing gets a reference to the given bool and assigns it to the TamperProofing field. func (o *Policy) SetTamperProofing(v bool) { o.TamperProofing = &v } // GetOverrideSite returns the OverrideSite field value if set, zero value otherwise. func (o *Policy) GetOverrideSite() string { if o == nil || o.OverrideSite == nil
return *o.OverrideSite } // GetOverrideSiteOk returns a tuple with the OverrideSite field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetOverrideSiteOk() (*string, bool) { if o == nil || o.OverrideSite == nil { return nil, false } return o.OverrideSite, true } // HasOverrideSite returns a boolean if a field has been set. func (o *Policy) HasOverrideSite() bool { if o != nil && o.OverrideSite != nil { return true } return false } // SetOverrideSite gets a reference to the given string and assigns it to the OverrideSite field. func (o *Policy) SetOverrideSite(v string) { o.OverrideSite = &v } // GetProxyAutoConfig returns the ProxyAutoConfig field value if set, zero value otherwise. func (o *Policy) GetProxyAutoConfig() PolicyAllOfProxyAutoConfig { if o == nil || o.ProxyAutoConfig == nil { var ret PolicyAllOfProxyAutoConfig return ret } return *o.ProxyAutoConfig } // GetProxyAutoConfigOk returns a tuple with the ProxyAutoConfig field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetProxyAutoConfigOk() (*PolicyAllOfProxyAutoConfig, bool) { if o == nil || o.ProxyAutoConfig == nil { return nil, false } return o.ProxyAutoConfig, true } // HasProxyAutoConfig returns a boolean if a field has been set. func (o *Policy) HasProxyAutoConfig() bool { if o != nil && o.ProxyAutoConfig != nil { return true } return false } // SetProxyAutoConfig gets a reference to the given PolicyAllOfProxyAutoConfig and assigns it to the ProxyAutoConfig field. func (o *Policy) SetProxyAutoConfig(v PolicyAllOfProxyAutoConfig) { o.ProxyAutoConfig = &v } // GetTrustedNetworkCheck returns the TrustedNetworkCheck field value if set, zero value otherwise. func (o *Policy) GetTrustedNetworkCheck() PolicyAllOfTrustedNetworkCheck { if o == nil || o.TrustedNetworkCheck == nil { var ret PolicyAllOfTrustedNetworkCheck return ret } return *o.TrustedNetworkCheck } // GetTrustedNetworkCheckOk returns a tuple with the TrustedNetworkCheck field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetTrustedNetworkCheckOk() (*PolicyAllOfTrustedNetworkCheck, bool) { if o == nil || o.TrustedNetworkCheck == nil { return nil, false } return o.TrustedNetworkCheck, true } // HasTrustedNetworkCheck returns a boolean if a field has been set. func (o *Policy) HasTrustedNetworkCheck() bool { if o != nil && o.TrustedNetworkCheck != nil { return true } return false } // SetTrustedNetworkCheck gets a reference to the given PolicyAllOfTrustedNetworkCheck and assigns it to the TrustedNetworkCheck field. func (o *Policy) SetTrustedNetworkCheck(v PolicyAllOfTrustedNetworkCheck) { o.TrustedNetworkCheck = &v } // GetClientSettings returns the ClientSettings field value if set, zero value otherwise. func (o *Policy) GetClientSettings() PolicyAllOfClientSettings { if o == nil || o.ClientSettings == nil { var ret PolicyAllOfClientSettings return ret } return *o.ClientSettings } // GetClientSettingsOk returns a tuple with the ClientSettings field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetClientSettingsOk() (*PolicyAllOfClientSettings, bool) { if o == nil || o.ClientSettings == nil { return nil, false } return o.ClientSettings, true } // HasClientSettings returns a boolean if a field has been set. func (o *Policy) HasClientSettings() bool { if o != nil && o.ClientSettings != nil { return true } return false } // SetClientSettings gets a reference to the given PolicyAllOfClientSettings and assigns it to the ClientSettings field. func (o *Policy) SetClientSettings(v PolicyAllOfClientSettings) { o.ClientSettings = &v } // GetAdministrativeRoles returns the AdministrativeRoles field value if set, zero value otherwise. func (o *Policy) GetAdministrativeRoles() []string { if o == nil || o.AdministrativeRoles == nil { var ret []string return ret } return *o.AdministrativeRoles } // GetAdministrativeRolesOk returns a tuple with the AdministrativeRoles field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *Policy) GetAdministrativeRolesOk() (*[]string, bool) { if o == nil || o.AdministrativeRoles == nil { return nil, false } return o.AdministrativeRoles, true } // HasAdministrativeRoles returns a boolean if a field has been set. func (o *Policy) HasAdministrativeRoles() bool { if o != nil && o.AdministrativeRoles != nil { return true } return false } // SetAdministrativeRoles gets a reference to the given []string and assigns it to the AdministrativeRoles field. func (o *Policy) SetAdministrativeRoles(v []string) { o.AdministrativeRoles = &v } func (o Policy) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if true { toSerialize["id"] = o.Id } if true { toSerialize["name"] = o.Name } if o.Notes != nil { toSerialize["notes"] = o.Notes } if o.Created != nil { toSerialize["created"] = o.Created } if o.Updated != nil { toSerialize["updated"] = o.Updated } if o.Tags != nil { toSerialize["tags"] = o.Tags } if o.Disabled != nil { toSerialize["disabled"] = o.Disabled } if true { toSerialize["expression"] = o.Expression } if o.Entitlements != nil { toSerialize["entitlements"] = o.Entitlements } if o.EntitlementLinks != nil { toSerialize["entitlementLinks"] = o.EntitlementLinks } if o.RingfenceRules != nil { toSerialize["ringfenceRules"] = o.RingfenceRules } if o.RingfenceRuleLinks != nil { toSerialize["ringfenceRuleLinks"] = o.RingfenceRuleLinks } if o.TamperProofing != nil { toSerialize["tamperProofing"] = o.TamperProofing } if o.OverrideSite != nil { toSerialize["overrideSite"] = o.OverrideSite } if o.ProxyAutoConfig != nil { toSerialize["proxyAutoConfig"] = o.ProxyAutoConfig } if o.TrustedNetworkCheck != nil { toSerialize["trustedNetworkCheck"] = o.TrustedNetworkCheck } if o.ClientSettings != nil { toSerialize["clientSettings"] = o.ClientSettings } if o.AdministrativeRoles != nil { toSerialize["administrativeRoles"] = o.AdministrativeRoles } return json.Marshal(toSerialize) } type NullablePolicy struct { value *Policy isSet bool } func (v NullablePolicy) Get() *Policy { return v.value } func (v *NullablePolicy) Set(val *Policy) { v.value = val v.isSet = true } func (v NullablePolicy) IsSet() bool { return v.isSet } func (v *NullablePolicy) Unset() { v.value = nil v.isSet = false } func NewNullablePolicy(val *Policy) *NullablePolicy { return &NullablePolicy{value: val, isSet: true} } func (v NullablePolicy) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullablePolicy) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
{ var ret string return ret }
rules.rs
use rustling::*; use rustling_ontology_values::dimension::*; use rustling_ontology_values::helpers; use rustling_ontology_moment::{Weekday, Grain, PeriodComp}; pub fn rules_percentage(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_2("<number> per cent", number_check!(), b.reg(r"(?:%|percento|per cento)")?, |number, _| Ok(PercentageValue(number.value().value())) ); Ok(()) } pub fn rules_finance(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_2("intersect (X cents)", amount_of_money_check!(|money: &AmountOfMoneyValue| money.unit != Some("cent")), amount_of_money_check!(|money: &AmountOfMoneyValue| money.unit == Some("cent")), |a, b| helpers::compose_money(a.value(), b.value())); b.rule_3("intersect (and X cents)", amount_of_money_check!(|money: &AmountOfMoneyValue| money.unit != Some("cent")), b.reg(r#"y"#)?, amount_of_money_check!(|money: &AmountOfMoneyValue| money.unit == Some("cent")), |a, _, b| helpers::compose_money(&a.value(), &b.value())); b.rule_2("intersect", amount_of_money_check!(|money: &AmountOfMoneyValue| money.unit != Some("cent")), number_check!(), |a, b| helpers::compose_money_number(&a.value(), &b.value())); b.rule_1_terminal("$", b.reg(r#"\$|dollar[oi]"#)?, |_| Ok(MoneyUnitValue { unit: Some("$") }) ); b.rule_1_terminal("EUR", b.reg(r#"€|[e€]uro?s?"#)?, |_| Ok(MoneyUnitValue { unit: Some("EUR") }) ); b.rule_1_terminal("£", b.reg(r#"sterlin[ae]|£"#)?, |_| Ok(MoneyUnitValue { unit: Some("£") }) ); b.rule_1_terminal("USD", b.reg(r#"us[d\$]|dollar[oi]? american(o|i)"#)?, |_| Ok(MoneyUnitValue { unit: Some("USD") }) ); b.rule_1_terminal("Bitcoin", b.reg(r#"bitcoins?"#)?, |_| Ok(MoneyUnitValue { unit: Some("฿") }) ); b.rule_1_terminal("GBP", b.reg(r#"gbp|sterlin[ae] "#)?, |_| Ok(MoneyUnitValue { unit: Some("GBP") }) ); b.rule_1_terminal("INR", b.reg(r#"rupi[ae]"#)?, |_| Ok(MoneyUnitValue { unit: Some("INR") }) ); b.rule_1_terminal("cent", b.reg(r#"cent(?:esim[oi]|s)?"#)?, |_| Ok(MoneyUnitValue { unit: Some("cent") }) ); b.rule_2("<amount> <unit>", number_check!(), money_unit!(), |a, b| { Ok(AmountOfMoneyValue { value: a.value().value(), unit: b.value().unit, ..AmountOfMoneyValue::default() }) }); b.rule_2("<unit> <amount>", money_unit!(), number_check!(), |a, b| { Ok(AmountOfMoneyValue { value: b.value().value(), unit: a.value().unit, ..AmountOfMoneyValue::default() }) }); Ok(()) } pub fn rules_duration(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_1_terminal("second (unit-of-duration)", b.reg(r#"sec(?:ond[oi])?"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Second)) ); b.rule_1_terminal("minute (unit-of-duration)", b.reg(r#"min(?:ut[oi])?"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Minute)) ); b.rule_1_terminal("hour (unit-of-duration)", b.reg(r#"or[ae]"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Hour)) ); b.rule_1_terminal("day (unit-of-duration)", b.reg(r#"giorn[oi]"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Day)) ); b.rule_1_terminal("week (unit-of-duration)", b.reg(r#"settiman[ae]"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Week)) ); b.rule_1_terminal("month (unit-of-duration)", b.reg(r#"mes(?:e|i)"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Month)) ); b.rule_1_terminal("year (unit-of-duration)", b.reg(r#"ann[oi]"#)?, |_| Ok(UnitOfDurationValue::new(Grain::Year)) ); b.rule_2("<integer> <unit-of-duration>", integer_check_by_range!(0), unit_of_duration_check!(), |integer, uod| Ok(DurationValue::new(PeriodComp::new(uod.value().grain, integer.value().value).into())) ); b.rule_2("en <duration>", b.reg(r#"en"#)?, duration_check!(), |_, duration| duration.value().in_present() ); b.rule_2("fa <duration>", b.reg(r#"fa"#)?, duration_check!(), |_, duration| duration.value().ago() ); b.rule_2("during <duration>", b.reg(r#"(?:durante|per)"#)?, duration_check!(), |_, duration| Ok(duration.value().clone().prefixed()) ); Ok(()) } pub fn rules_cycle(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_1_terminal("minuti (cycle)", b.reg(r#"minut[oi]"#)?, |_| CycleValue::new(Grain::Minute) ); Ok(()) } pub fn rules_time(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_2("intersect", time_check!(|time: &TimeValue| !time.latent), time_check!(|time: &TimeValue| !time.latent), |a, b| a.value().intersect(b.value()) ); b.rule_1_terminal("named-day", b.reg(r#"lun(?:ed[íi]|\.)?"#)?, |_| helpers::day_of_week(Weekday::Mon) ); b.rule_1_terminal("named-day", b.reg(r#"mar(?:ted[íi]|\.)?"#)?, |_| helpers::day_of_week(Weekday::Tue) ); b.rule_1_terminal("named-day", b.reg(r#"mer(?:coled[íi]|\.)?"#)?, |_| helpers::day_of_week(Weekday::Wed) ); b.rule_1_terminal("named-day", b.reg(r#"gio(?:ved[íi]|v?\.)?"#)?, |_| helpers::day_of_week(Weekday::Thu) ); b.rule_1_terminal("named-day", b.reg(r#"ven(?:erd[íi]|\.)?"#)?, |_| helpers::day_of_week(Weekday::Fri) ); b.rule_1_terminal("named-day", b.reg(r#"sab(?:at[oi]|\.)?"#)?, |_| helpers::day_of_week(Weekday::Sat) ); b.rule_1_terminal("named-day", b.reg(r#"dom(?:enic(?:a|he)|\.)?"#)?, |_| helpers::day_of_week(Weekday::Sun) ); b.rule_1_terminal("right now", b.reg(r#"ora|adesso|oggi|subito"#)?, |_| helpers::cycle_nth(Grain::Second, 0) ); b.rule_1_terminal("tomorrow", b.reg(r#"domani"#)?, |_| helpers::cycle_nth(Grain::Day, 1) ); b.rule_1_terminal("yesterday", b.reg(r#"ieri"#)?, |_| helpers::cycle_nth(Grain::Day, -1) ); b.rule_1_terminal("the day after tomorrow", b.reg(r#"dopodomani"#)?, |_| helpers::cycle_nth(Grain::Day, 2) ); b.rule_1_terminal("tonight", b.reg(r#"stanotte"#)?, |_| { let period = helpers::hour(18, false)?.span_to(&helpers::hour(0, false)?, false)?; Ok(helpers::cycle_nth(Grain::Day, 0)? .intersect(&period)? .form(Form::PartOfDay(PartOfDayForm::Night))) } ); b.rule_2("alle <time-of-day>", b.reg(r#"a(?:ll(?:e|'))?"#)?, time_check!(form!(Form::TimeOfDay(_))), |_, tod| Ok(tod.value().clone().not_latent()) ); b.rule_1_terminal("hh(:|.|h)mm (time-of-day)", b.reg(r#"((?:[01]?\d)|(?:2[0-3]))[:\.]([0-5]\d)"#)?, |text_match| helpers::hour_minute( text_match.group(1).parse()?, text_match.group(2).parse()?, true ) ); b.rule_1_terminal("hh:mm:ss", b.reg(r#"((?:[01]?\d)|(?:2[0-3]))[:.]([0-5]\d)[:.]([0-5]\d)"#)?, |text_match| helpers::hour_minute_second( text_match.group(1).parse()?, text_match.group(2).parse()?, text_match.group(3).parse()?, true ) ); b.rule_1_terminal("morning", b.reg(r#"mattin(?:a|o)"#)?, |_| Ok(helpers::hour(4, false)?.span_to(&helpers::hour(12, false)?, false)? .form(Form::PartOfDay(PartOfDayForm::Morning)) .latent()) );
|_| Ok(helpers::hour(12, false)?.span_to(&helpers::hour(19, false)?, false)? .form(Form::PartOfDay(PartOfDayForm::Afternoon)) .latent()) ); b.rule_1_terminal("noon", b.reg(r#"mezzogiorno"#)?, |_| Ok(helpers::hour(12, false)?.span_to(&helpers::hour(17, false)?, false)? .form(Form::PartOfDay(PartOfDayForm::Afternoon)) .latent()) ); b.rule_1_terminal("evening", b.reg(r#"sera"#)?, |_| Ok(helpers::hour(18, false)?.span_to(&helpers::hour(0, false)?, false)? .form(Form::PartOfDay(PartOfDayForm::Evening)) .latent()) ); Ok(()) } pub fn rules_temperature(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_1("number as temp", number_check!(), |a| { Ok(TemperatureValue { value: a.value().value(), unit: None, latent: true, }) }); b.rule_2("<latent temp> temp", temperature_check!(), b.reg(r#"(?:grad[oi]?)|°"#)?, |a, _| { Ok(TemperatureValue { value: a.value().value, unit: Some("degree"), latent: false, }) }); b.rule_2("<temp> Celsius", temperature_check!(), b.reg(r#"centigrad[oi]|celsius"#)?, |a, _| { Ok(TemperatureValue { value: a.value().value, unit: Some("celsius"), latent: false, }) }); b.rule_2("<temp> Fahrenheit", temperature_check!(), b.reg(r#"f(?:ah?reh?n(?:h?eit)?)?\.?"#)?, |a, _| { Ok(TemperatureValue { value: a.value().value, unit: Some("fahrenheit"), latent: false, }) }); b.rule_2("<latent temp> temp bajo cero", temperature_check!(), b.reg(r#"(?:(?:grad[oi]?)|°)?(?: sotto (lo) zero)"#)?, |a, _| { Ok(TemperatureValue { value: -1.0 * a.value().value, latent: false, ..*a.value() }) }); Ok(()) } pub fn rules_numbers(b: &mut RuleSetBuilder<Dimension>) -> RustlingResult<()> { b.rule_1_terminal("number (0..19)", b.reg(r#"(zero|un[oa']?|due|tre|quattro|cinque|sei|sette|otto|nove|dieci|(?:undici|dodici|tredici|quattordici|quindici|sedici)|(?:dici(?:assette|otto|annove)))"#)?, |text_match| { let value = match text_match.group(1).as_ref() { "zero" => 0, "un" => 1, "un'" => 1, "uno" => 1, "una" => 1, "due" => 2, "tre" => 3, "quattro" => 4, "cinque" => 5, "sei" => 6, "sette" => 7, "otto" => 8, "nove" => 9, "dieci" => 10, "undici" => 11, "dodici" => 12, "tredici" => 13, "quattordici" => 14, "quindici" => 15, "sedici" => 16, "diciassette" => 17, "diciotto" => 18, "diciannove" => 19, _ => return Err(RuleError::Invalid.into()), }; IntegerValue::new(value) } ); b.rule_1_terminal("number (20..90)", b.reg(r#"(venti|trenta|(?:(?:quar|cinqu|sess|sett|ott|nov)anta))"#)?, |text_match| { let value = match text_match.group(1).as_ref() { "venti" => 20, "trenta" => 30, "quaranta" => 40, "cinquanta" => 50, "sessanta" => 60, "settanta" => 70, "ottanta" => 80, "novanta" => 90, _ => return Err(RuleError::Invalid.into()), }; IntegerValue::new(value) }); b.rule_2("number (21..29 31..39 41..49 51..59 61..69 71..79 81..89 91..99)", b.reg(r#"(venti?|trenta?|(?:(?:quar|cinqu|sess|sett|ott|nov)anta?))"#)?, integer_check_by_range!(1, 9), |text_match, b| { let value = match text_match.group(1).as_ref() { "venti" => 20, "trenta" => 30, "quaranta" => 40, "cinquanta" => 50, "sessanta" => 60, "settanta" => 70, "ottanta" => 80, "novanta" => 90, "vent" => 20, "trent" => 30, "quarant" => 40, "cinquant" => 50, "sessant" => 60, "settant" => 70, "ottant" => 80, "novant" => 90, _ => return Err(RuleError::Invalid.into()) }; IntegerValue::new(value + b.value().value) }); b.rule_1_terminal("number 100..1000", b.reg(r#"(cento?|duecento|trecento|quattrocento|cinquecento|seicento|settecento|ottocento|novecento|mil)"#)?, |text_match| { let value = match text_match.group(1).as_ref() { "cent" => 100, "cento" => 100, "duecento" => 200, "trecento" => 300, "quattrocento" => 400, "cinquecento" => 500, "seicento" => 600, "settecento" => 700, "ottocento" => 800, "novecento" => 900, "mil" => 1000, _ => return Err(RuleError::Invalid.into()) }; IntegerValue::new_with_grain(value, 2) }); b.rule_2("numbers 100..199", integer_check_by_range!(100, 100), integer_check_by_range!(0, 99), |_, b| IntegerValue::new(b.value().value + 100)); b.rule_3("numbers 200..999", integer_check_by_range!(2, 9), integer_check_by_range!(100, 100), integer_check_by_range!(0, 99), |a, b, c| IntegerValue::new(a.value().value * b.value().value + c.value().value)); b.rule_1_terminal("integer (numeric)", b.reg(r#"(\d{1,18})"#)?, |text_match| IntegerValue::new(text_match.group(0).parse()?)); b.rule_1_terminal("integer with thousands separator .", b.reg(r#"(\d{1,3}(\.\d\d\d){1,5})"#)?, |text_match| { let reformatted_string = text_match.group(1).replace(".", ""); let value: i64 = reformatted_string.parse()?; IntegerValue::new(value) }); b.rule_1_terminal("decimal number", b.reg(r#"(\d*,\d+)"#)?, |text_match| { let reformatted_string = text_match.group(1).replace(",", "."); let value: f32 = reformatted_string.parse()?; FloatValue::new(value) }); b.rule_3("number dot number", number_check!(|number: &NumberValue| !number.prefixed()), b.reg(r#"punto"#)?, number_check!(|number: &NumberValue| !number.suffixed()), |a, _, b| { Ok(FloatValue { value: b.value().value() * 0.1 + a.value().value(), ..FloatValue::default() }) }); b.rule_1_terminal("decimal with thousands separator", b.reg(r#"(\d+(\.\d\d\d)+,\d+)"#)?, |text_match| { let reformatted_string = text_match.group(1).replace(".", "").replace(",", "."); let value: f32 = reformatted_string.parse()?; FloatValue::new(value) }); b.rule_2("numbers prefix with -, negative or minus", b.reg(r#"-|meno"#)?, number_check!(|number: &NumberValue| !number.prefixed()), |_, a| -> RuleResult<NumberValue> { Ok(match a.value().clone() { // checked NumberValue::Integer(integer) => { IntegerValue { value: integer.value * -1, prefixed: true, ..integer } .into() } NumberValue::Float(float) => { FloatValue { value: float.value * -1.0, prefixed: true, ..float } .into() } }) }); b.rule_1_terminal("ordinals (primo..10)", b.reg(r#"(prim|second|terz|quart|quint|sest|settim|ottav|non|decim)[oiae]"#)?, |text_match| { let value = match text_match.group(1).as_ref() { "prim" => 1, "second" => 2, "terz" => 3, "quart" => 3, "quint" => 4, "sest" => 5, "settim" => 7, "ottav" => 8, "non" => 9, "decim" => 10, _ => return Err(RuleError::Invalid.into()) }; Ok(OrdinalValue::new(value)) }); Ok(()) }
b.rule_1_terminal("afternoon", b.reg(r#"pomeriggio"#)?,
run_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa).""" import dataclasses import logging import os import sys from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset from transformers import GlueDataTrainingArguments as DataTrainingArguments from transformers import ( HfArgumentParser, Trainer, TrainingArguments, glue_compute_metrics, glue_output_modes, glue_tasks_num_labels, set_seed, ) logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) try: num_labels = glue_tasks_num_labels[data_args.task_name] output_mode = glue_output_modes[data_args.task_name] except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = GlueDataset(data_args, tokenizer=tokenizer) if training_args.do_train else None eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") if training_args.do_eval else None test_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="test") if training_args.do_predict else None def compute_metrics(p: EvalPrediction) -> Dict:
# Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation eval_results = {} if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) eval_datasets = [eval_dataset] if data_args.task_name == "mnli": mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm") eval_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev")) for eval_dataset in eval_datasets: eval_result = trainer.evaluate(eval_dataset=eval_dataset) output_eval_file = os.path.join( training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt" ) if trainer.is_world_master(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name)) for key, value in eval_result.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) eval_results.update(eval_result) if training_args.do_predict: logging.info("*** Test ***") test_datasets = [test_dataset] if data_args.task_name == "mnli": mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm") test_datasets.append(GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test")) for test_dataset in test_datasets: predictions = trainer.predict(test_dataset=test_dataset).predictions if output_mode == "classification": predictions = np.argmax(predictions, axis=1) output_test_file = os.path.join( training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt" ) if trainer.is_world_master(): with open(output_test_file, "w") as writer: logger.info("***** Test results {} *****".format(test_dataset.args.task_name)) writer.write("index\tprediction\n") for index, item in enumerate(predictions): if output_mode == "regression": writer.write("%d\t%3.3f\n" % (index, item)) else: item = test_dataset.get_labels()[item] writer.write("%d\t%s\n" % (index, item)) return eval_results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
if output_mode == "classification": preds = np.argmax(p.predictions, axis=1) elif output_mode == "regression": preds = np.squeeze(p.predictions) return glue_compute_metrics(data_args.task_name, preds, p.label_ids)
cog_settings.py
from redbot.core import data_manager from .json_utils import * class CogSettings(object): SETTINGS_FILE_NAME = "legacy_settings.json" def __init__(self, cog_name, bot=None): self.folder = str(data_manager.cog_data_path(raw_name=cog_name)) self.file_path = os.path.join(self.folder, CogSettings.SETTINGS_FILE_NAME) self.bot = bot self.check_folder() self.default_settings = self.make_default_settings() if not os.path.isfile(self.file_path): logger.warning("CogSettings config for {} not found. Creating default...".format(self.file_path)) self.bot_settings = self.default_settings self.save_settings() else: current = self.intify(read_json_file(self.file_path)) updated = False for key in self.default_settings.keys(): if key not in current.keys(): current[key] = self.default_settings[key] updated = True self.bot_settings = current if updated: self.save_settings() def check_folder(self): if not os.path.exists(self.folder): logger.info("Creating {}".format(self.folder)) os.makedirs(self.folder) def save_settings(self): write_json_file(self.file_path, self.bot_settings) def make_default_settings(self): return {} @classmethod def
(cls, key): if isinstance(key, dict): return {cls.intify(k): cls.intify(v) for k, v in key.items()} elif isinstance(key, (list, tuple)): return [cls.intify(x) for x in key] elif isinstance(key, str) and key.isdigit(): return int(key) elif isinstance(key, str) and key.replace('.', '', 1).isdigit(): return float(key) else: return key
intify
680. Valid Palindrome II_medium.py
class
: def validPalindrome(self, s: str) -> bool: deleted = 0 left, right = 0, len(s) - 1 while left < right: if s[left] == s[right]: left += 1 right -= 1 else: deleted += 1 if deleted >= 2: return False checkpoint = (left, right) left += 1 while left < right: if s[left] == s[right]: left += 1 right -= 1 if left >= right: return True else: break left, right = checkpoint right -= 1 while left < right: if s[left] == s[right]: left += 1 right -= 1 if left >= right: return True else: break return True
Solution
doc.go
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go_gapic. DO NOT EDIT. // Package spanner is an auto-generated package for the // Cloud Spanner API. // // Cloud Spanner is a managed, mission-critical, globally consistent and // scalable relational database service. // // Use of Context // // The ctx passed to NewClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. // // For information about setting deadlines, reusing contexts, and more // please visit godoc.org/cloud.google.com/go. package spanner // import "cloud.google.com/go/spanner/apiv1" import ( "context" "runtime" "strings" "unicode" "google.golang.org/grpc/metadata" ) const versionClient = "20200331" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) out = out.Copy() for _, md := range mds { for k, v := range md { out[k] = append(out[k], v...) } } return metadata.NewOutgoingContext(ctx, out) } // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/spanner.data", } } // versionGo returns the Go runtime version. The returned string // has no whitespace, suitable for reporting in header. func versionGo() string { const develPrefix = "devel +" s := runtime.Version() if strings.HasPrefix(s, develPrefix) { s = s[len(develPrefix):] if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0
return s } notSemverRune := func(r rune) bool { return !strings.ContainsRune("0123456789.", r) } if strings.HasPrefix(s, "go1") { s = s[2:] var prerelease string if p := strings.IndexFunc(s, notSemverRune); p >= 0 { s, prerelease = s[:p], s[p:] } if strings.HasSuffix(s, ".") { s += "0" } else if strings.Count(s, ".") < 2 { s += ".0" } if prerelease != "" { s += "-" + prerelease } return s } return "UNKNOWN" }
{ s = s[:p] }
addresses.py
from typing import TYPE_CHECKING from kivy.app import App from kivy.clock import Clock from kivy.factory import Factory from kivy.properties import ObjectProperty from kivy.lang import Builder from decimal import Decimal from kivy.uix.popup import Popup from electrum_ltc.gui.kivy.i18n import _ from ...util import address_colors if TYPE_CHECKING: from ...main_window import ElectrumWindow Builder.load_string(''' <AddressLabel@Label> text_size: self.width, None halign: 'left' valign: 'top' <AddressItem@CardItem> address: '' memo: '' amount: '' status: '' BoxLayout: spacing: '8dp' height: '32dp' orientation: 'vertical' Widget AddressLabel: text: root.address shorten: True Widget AddressLabel: text: (root.amount if root.status == 'Funded' else root.status) + ' ' + root.memo color: .699, .699, .699, 1 font_size: '13sp' shorten: True Widget <AddressButton@Button>: background_color: 1, .585, .878, 0 halign: 'center' text_size: (self.width, None) shorten: True size_hint: 0.5, None default_text: '' text: self.default_text padding: '5dp', '5dp' height: '40dp' text_color: self.foreground_color disabled_color: 1, 1, 1, 1 foreground_color: 1, 1, 1, 1 canvas.before: Color: rgba: (0.9, .498, 0.745, 1) if self.state == 'down' else self.background_color Rectangle: size: self.size pos: self.pos <AddressesDialog@Popup> id: popup title: _('Addresses') message: '' pr_status: 'Pending' show_change: 0 show_used: 0 on_message: self.update() BoxLayout: id:box padding: '12dp', '12dp', '12dp', '12dp' spacing: '12dp' orientation: 'vertical' BoxLayout: spacing: '6dp' height: self.minimum_height size_hint: 1, None orientation: 'horizontal' AddressFilter: opacity: 1 size_hint: 1, None height: self.minimum_height spacing: '5dp' AddressButton: id: search text: {0:_('Receiving'), 1:_('Change'), 2:_('All')}[root.show_change] on_release: root.show_change = (root.show_change + 1) % 3 Clock.schedule_once(lambda dt: root.update()) AddressFilter: opacity: 1 size_hint: 1, None height: self.minimum_height spacing: '5dp' AddressButton: id: search text: {0:_('All'), 1:_('Unused'), 2:_('Funded'), 3:_('Used')}[root.show_used] on_release: root.show_used = (root.show_used + 1) % 4 Clock.schedule_once(lambda dt: root.update()) AddressFilter: opacity: 1 size_hint: 1, None height: self.minimum_height spacing: '5dp' canvas.before: Color: rgba: 0.9, 0.9, 0.9, 1 AddressButton: id: change
on_release: Clock.schedule_once(lambda dt: app.description_dialog(popup)) RecycleView: scroll_type: ['bars', 'content'] bar_width: '15dp' viewclass: 'AddressItem' id: search_container RecycleBoxLayout: orientation: 'vertical' default_size: None, dp(56) default_size_hint: 1, None size_hint_y: None height: self.minimum_height <AddressPopup@Popup>: address: '' balance: '' status: '' script_type: '' pk: '' address_color: 1, 1, 1, 1 address_background_color: 0.3, 0.3, 0.3, 1 BoxLayout: orientation: 'vertical' ScrollView: GridLayout: cols: 1 height: self.minimum_height size_hint_y: None padding: '10dp' spacing: '10dp' TopLabel: text: _('Address') RefLabel: color: root.address_color background_color: root.address_background_color data: root.address name: _('Address') GridLayout: cols: 1 size_hint_y: None height: self.minimum_height spacing: '10dp' BoxLabel: text: _('Balance') value: root.balance BoxLabel: text: _('Script type') value: root.script_type BoxLabel: text: _('Status') value: root.status TopLabel: text: _('Private Key') RefLabel: data: root.pk name: _('Private key') on_touched: if not self.data: root.do_export(self) Widget: size_hint: 1, 0.1 BoxLayout: size_hint: 1, None height: '48dp' Button: size_hint: 0.5, None height: '48dp' text: _('Receive') on_release: root.receive_at() Button: size_hint: 0.5, None height: '48dp' text: _('Close') on_release: root.dismiss() ''') class AddressPopup(Popup): def __init__(self, parent, address, balance, status, **kwargs): super(AddressPopup, self).__init__(**kwargs) self.title = _('Address Details') self.parent_dialog = parent self.app = parent.app self.address = address self.status = status self.script_type = self.app.wallet.get_txin_type(self.address) self.balance = self.app.format_amount_and_units(balance) self.address_color, self.address_background_color = address_colors(self.app.wallet, address) def receive_at(self): self.dismiss() self.parent_dialog.dismiss() self.app.switch_to('receive') # retry until receive_screen is set Clock.schedule_interval(lambda dt: bool(self.app.receive_screen.set_address(self.address) and False) if self.app.receive_screen else True, 0.1) def do_export(self, pk_label): self.app.export_private_keys(pk_label, self.address) class AddressesDialog(Factory.Popup): def __init__(self, app): Factory.Popup.__init__(self) self.app = app # type: ElectrumWindow def get_card(self, addr, balance, is_used, label): ci = {} ci['screen'] = self ci['address'] = addr ci['memo'] = label ci['amount'] = self.app.format_amount_and_units(balance) ci['status'] = _('Used') if is_used else _('Funded') if balance > 0 else _('Unused') return ci def update(self): wallet = self.app.wallet if self.show_change == 0: _list = wallet.get_receiving_addresses() elif self.show_change == 1: _list = wallet.get_change_addresses() else: _list = wallet.get_addresses() search = self.message container = self.ids.search_container n = 0 cards = [] for address in _list: label = wallet.get_label(address) balance = sum(wallet.get_addr_balance(address)) is_used_and_empty = wallet.is_used(address) and balance == 0 if self.show_used == 1 and (balance or is_used_and_empty): continue if self.show_used == 2 and balance == 0: continue if self.show_used == 3 and not is_used_and_empty: continue card = self.get_card(address, balance, is_used_and_empty, label) if search and not self.ext_search(card, search): continue cards.append(card) n += 1 container.data = cards if not n: self.app.show_error('No address matching your search') def show_item(self, obj): address = obj.address c, u, x = self.app.wallet.get_addr_balance(address) balance = c + u + x d = AddressPopup(self, address, balance, obj.status) d.open() def ext_search(self, card, search): return card['memo'].find(search) >= 0 or card['amount'].find(search) >= 0
text: root.message if root.message else _('Search')