file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_authx_diagnoses.py | from test_helpers import helper_get_katsu_response
from test_helpers import helper_get_user_token
import pytest
"""
This test suite will cover the manual testsfor KATSU in README.md, ensuring that
authorization happens correctly
- beacon permissions
- registered/controlled access
- modified but live token
"""
KATSU_URL="http://localhost:8001"
OIDC1_NAME="oidc1"
OIDC2_NAME="oidc2"
@pytest.fixture(scope="session")
def user1_token():
"""
Return the token for user1
"""
return helper_get_user_token("user1", "pass1")
def test_user1_diagnoses_access(user1_token):
|
def test_user1_diagnoses_invalid(user1_token):
"""
Make sure invalid token will not have access to datasets other than open datasets
"""
invalid_token = 'A' + user1_token[1:]
response = helper_get_katsu_response(invalid_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 2
diagnoses_dscps = set()
for diagnosis in response_json["results"]:
diagnoses_dscps.add(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "registered3" not in diagnoses_dscps
assert "controlled4" not in diagnoses_dscps
@pytest.fixture(scope="session")
def user2_token():
"""
Return the token for user2
"""
return helper_get_user_token("user2", "pass2")
def test_user2_diagnoses_access(user2_token):
""""
Make sure user2 has access to open1, open2, registered3 and controlled 4
"""
response = helper_get_katsu_response(user2_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 3
diagnoses_ids = list()
diagnoses_dscps = list()
for diagnosis in response_json["results"]:
diagnoses_ids.append(diagnosis["id"])
diagnoses_dscps.append(diagnosis["extra_properties"]["description"])
""""
Make sure user2 has access to open1, open2, and controlled5
"""
for id in diagnoses_ids:
response = helper_get_katsu_response(user2_token, f"{KATSU_URL}/api/diagnoses/{id}")
assert response.status_code == 200
assert "id" in response.json().keys()
def test_user2_diagnoses_invalid(user2_token):
"""
Make sure invalid token will not have access to datasets other than open datasets
"""
invalid_token = 'A' + user2_token[1:]
response = helper_get_katsu_response(invalid_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 2
diagnoses_dscps = set()
for diagnosis in response_json["results"]:
diagnoses_dscps.add(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "registered3" not in diagnoses_dscps
assert "controlled5" not in diagnoses_dscps
@pytest.fixture(scope="session")
def user3_token():
"""
Return the token for user3
"""
return helper_get_user_token("user3", "pass3", OIDC2_NAME)
def test_user3_diagnoses_access(user3_token):
""""
Make sure user3 has access to open1, open2, registered3, controlled4, and controlled6
"""
response = helper_get_katsu_response(user3_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 5
diagnoses_ids = list()
diagnoses_dscps = list()
for diagnosis in response_json["results"]:
diagnoses_ids.append(diagnosis["id"])
diagnoses_dscps.append(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "registered3" in diagnoses_dscps
assert "controlled4" in diagnoses_dscps
assert "controlled6" in diagnoses_dscps
""""
Make sure user3 has access to open1, open2, registered3, controlled4, and controlled6 by id
"""
for id in diagnoses_ids:
response = helper_get_katsu_response(user3_token, f"{KATSU_URL}/api/diagnoses/{id}")
assert response.status_code == 200
assert "id" in response.json().keys()
def test_user3_diagnoses_invalid(user3_token):
"""
Make sure invalid token will not have access to datasets other than open datasets
"""
invalid_token = 'A' + user3_token[1:]
response = helper_get_katsu_response(invalid_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 2
diagnoses_dscps = set()
for diagnosis in response_json["results"]:
diagnoses_dscps.add(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "registered3" not in diagnoses_dscps
assert "controlled4" not in diagnoses_dscps
assert "controlled6" not in diagnoses_dscps
@pytest.fixture(scope="session")
def user4_token():
"""
Return the token for user4
"""
return helper_get_user_token("user4", "pass4", OIDC2_NAME)
def test_user4_diagnoses_access(user4_token):
""""
Make sure user3 has access to open1, open2, and controlled5
"""
response = helper_get_katsu_response(user4_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 3
diagnoses_ids = list()
diagnoses_dscps = list()
for diagnosis in response_json["results"]:
diagnoses_ids.append(diagnosis["id"])
diagnoses_dscps.append(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "controlled5" in diagnoses_dscps
""""
Make sure user4 has access to open1, open2, and controlled4 by id
"""
for id in diagnoses_ids:
response = helper_get_katsu_response(user4_token, f"{KATSU_URL}/api/diagnoses/{id}")
assert response.status_code == 200
assert "id" in response.json().keys()
def test_user4_diagnoses_invalid(user4_token):
"""
Make sure invalid token will not have access to datasets other than open datasets
"""
invalid_token = 'A' + user4_token[1:]
response = helper_get_katsu_response(invalid_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 2
diagnoses_dscps = set()
for diagnosis in response_json["results"]:
diagnoses_dscps.add(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "controlled5" not in diagnoses_dscps | """"
Make sure user1 has access to open1, open2, registered3 and controlled4
"""
response = helper_get_katsu_response(user1_token, f"{KATSU_URL}/api/diagnoses")
assert response.status_code == 200
response_json = response.json()
assert response_json["count"] == 4
diagnoses_ids = list()
diagnoses_dscps = list()
for diagnosis in response_json["results"]:
diagnoses_ids.append(diagnosis["id"])
diagnoses_dscps.append(diagnosis["extra_properties"]["description"])
assert "open1" in diagnoses_dscps
assert "open2" in diagnoses_dscps
assert "registered3" in diagnoses_dscps
assert "controlled4" in diagnoses_dscps
'''
Make sure user1 has access to open1, open2, registered3 and controlled4 by id
'''
for id in diagnoses_ids:
response = helper_get_katsu_response(user1_token, f"{KATSU_URL}/api/diagnoses/{id}")
assert response.status_code == 200
assert "id" in response.json().keys() |
0005_auto_20200505_1038.py | # Generated by Django 3.0.5 on 2020-05-05 05:08
from django.db import migrations, models
class | (migrations.Migration):
dependencies = [
('school', '0004_auto_20200504_1753'),
]
operations = [
migrations.RemoveField(
model_name='attendance',
name='student_id',
),
migrations.AddField(
model_name='attendance',
name='roll',
field=models.CharField(max_length=10, null=True),
),
migrations.AlterField(
model_name='studentextra',
name='roll',
field=models.CharField(max_length=10),
),
]
| Migration |
service.rs | // Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Main entry point of the sc-network crate.
//!
//! There are two main structs in this module: [`NetworkWorker`] and [`NetworkService`].
//! The [`NetworkWorker`] *is* the network and implements the `Future` trait. It must be polled in
//! order fo the network to advance.
//! The [`NetworkService`] is merely a shared version of the [`NetworkWorker`]. You can obtain an
//! `Arc<NetworkService>` by calling [`NetworkWorker::service`].
//!
//! The methods of the [`NetworkService`] are implemented by sending a message over a channel,
//! which is then processed by [`NetworkWorker::poll`].
use std::pin::Pin;
use std::sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Arc,
};
use std::task::Poll;
use std::{borrow::Cow, collections::HashSet, io};
use crate::config::{Params, TransportConfig};
use crate::discovery::DiscoveryConfig;
use crate::errors::Error;
use crate::metrics::Metrics;
use crate::network_state::{
NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer,
};
use crate::protocol::event::Event;
use crate::protocol::generic_proto::{NotificationsSink, Ready};
use crate::protocol::Protocol;
use crate::request_responses::{
InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, SendRequestError,
};
use crate::Multiaddr;
use crate::{
behaviour::{Behaviour, BehaviourOut},
errors, out_events, DhtEvent,
};
use crate::{
config::{parse_addr, parse_str_addr, NonReservedPeerMode},
transport,
};
use bitflags::_core::time::Duration;
use futures::channel::oneshot::Canceled;
use futures::{
channel::{mpsc, oneshot},
prelude::*,
};
use libp2p::core::network::ConnectionLimits;
use libp2p::core::{
connection::{ConnectionError, PendingConnectionError},
ConnectedPoint,
};
use libp2p::swarm::{
protocols_handler::NodeHandlerWrapperError, AddressScore, NetworkBehaviour, SwarmBuilder,
SwarmEvent,
};
use libp2p::{kad::record, PeerId};
use log::{error, info, trace, warn};
use parking_lot::Mutex;
use sc_peerset::{PeersetHandle, ReputationChange};
use starcoin_metrics::{Histogram, HistogramVec};
use starcoin_types::startup_info::ChainStatus;
use std::collections::HashMap;
use std::num::NonZeroUsize;
use std::time::Instant;
/// Minimum Requirements for a Hash within Networking
pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {}
impl<T> ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static
{}
/// A cloneable handle for reporting cost/benefits of peers.
#[derive(Clone)]
pub struct ReportHandle {
inner: PeersetHandle, // wraps it so we don't have to worry about breaking API.
}
impl From<PeersetHandle> for ReportHandle {
fn from(peerset_handle: PeersetHandle) -> Self {
ReportHandle {
inner: peerset_handle,
}
}
}
/// Substrate network service. Handles network IO and manages connectivity.
pub struct NetworkService {
/// Number of peers we're connected to.
num_connected: Arc<AtomicUsize>,
/// The local external addresses.
external_addresses: Arc<Mutex<Vec<Multiaddr>>>,
/// Are we actively catching up with the chain?
is_major_syncing: Arc<AtomicBool>,
/// Local copy of the `PeerId` of the local node.
local_peer_id: PeerId,
/// Bandwidth logging system. Can be queried to know the average bandwidth consumed.
bandwidth: Arc<transport::BandwidthSinks>,
/// Peerset manager (PSM); manages the reputation of nodes and indicates the network which
/// nodes it should be connected to or not.
peerset: PeersetHandle,
/// For each peer, an object that allows sending notifications to
/// that peer. Updated by the [`NetworkWorker`].
peers_notifications_sinks: Arc<Mutex<HashMap<PeerId, NotificationsSink>>>,
/// Channel that sends messages to the actual worker.
to_worker: mpsc::UnboundedSender<ServiceToWorkerMsg>,
/// Field extracted from the [`Metrics`] struct and necessary to report the
/// notifications-related metrics.
notifications_sizes_metric: Option<HistogramVec>,
}
impl NetworkWorker {
/// Creates the network service.
///
/// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order
/// for the network processing to advance. From it, you can extract a `NetworkService` using
/// `worker.service()`. The `NetworkService` can be shared through the codebase.
pub fn new(params: Params) -> errors::Result<NetworkWorker> {
// Ensure the listen addresses are consistent with the transport.
ensure_addresses_consistent_with_transport(
params.network_config.listen_addresses.iter(),
¶ms.network_config.transport,
)?;
ensure_addresses_consistent_with_transport(
params
.network_config
.boot_nodes
.iter()
.map(|x| &x.multiaddr),
¶ms.network_config.transport,
)?;
ensure_addresses_consistent_with_transport(
params
.network_config
.reserved_nodes
.iter()
.map(|x| &x.multiaddr),
¶ms.network_config.transport,
)?;
ensure_addresses_consistent_with_transport(
params.network_config.public_addresses.iter(),
¶ms.network_config.transport,
)?;
let (to_worker, from_worker) = mpsc::unbounded();
// List of multiaddresses that we know in the network.
let mut known_addresses = Vec::new();
let mut bootnodes = Vec::new();
let mut boot_node_ids = HashSet::new();
// Process the bootnodes.
for bootnode in params.network_config.boot_nodes.iter() {
bootnodes.push(bootnode.peer_id);
boot_node_ids.insert(bootnode.peer_id);
known_addresses.push((bootnode.peer_id, bootnode.multiaddr.clone()));
}
let boot_node_ids = Arc::new(boot_node_ids);
// Check for duplicate bootnodes.
known_addresses.iter().try_for_each(|(peer_id, addr)| {
if let Some(other) = known_addresses
.iter()
.find(|o| o.1 == *addr && o.0 != *peer_id)
{
Err(Error::DuplicateBootnode {
address: addr.clone(),
first_id: *peer_id,
second_id: other.0,
})
} else {
Ok(())
}
})?;
let priority_groups = {
let mut reserved_nodes = HashSet::new();
for reserved in params.network_config.reserved_nodes.iter() {
reserved_nodes.insert(reserved.peer_id);
known_addresses.push((reserved.peer_id, reserved.multiaddr.clone()));
}
vec![("reserved".to_owned(), reserved_nodes)]
};
let peerset_config = sc_peerset::PeersetConfig {
in_peers: params.network_config.in_peers,
out_peers: params.network_config.out_peers,
bootnodes,
reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny,
priority_groups,
};
// Private and public keys configuration.
let local_identity = params.network_config.node_key.clone().into_keypair()?;
let local_public = local_identity.public();
let local_peer_id = local_public.clone().into_peer_id();
info!(target: "sub-libp2p", "Local node identity is: {}", local_peer_id.to_base58());
let num_connected = Arc::new(AtomicUsize::new(0));
let is_major_syncing = Arc::new(AtomicBool::new(false));
let notif_protocols = params.network_config.notifications_protocols.clone();
let (protocol, peerset_handle) = Protocol::new(
peerset_config,
local_peer_id,
params.protocol_id.clone(),
params.chain_info,
boot_node_ids.clone(),
notif_protocols,
)?;
// Build the swarm.
let (mut swarm, bandwidth): (Swarm, _) = {
let user_agent = format!(
"{} ({})",
params.network_config.client_version, params.network_config.node_name
);
let discovery_config = {
let mut config = DiscoveryConfig::new(local_public.clone());
config.with_user_defined(known_addresses);
config.discovery_limit(u64::from(params.network_config.out_peers) + 15);
config.add_protocol(params.protocol_id.clone());
config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht);
match params.network_config.transport {
TransportConfig::MemoryOnly => {
config.with_mdns(false);
config.allow_private_ipv4(false);
} | ..
} => {
config.with_mdns(enable_mdns);
config.allow_private_ipv4(allow_private_ipv4);
}
}
config
};
let behaviour = match Behaviour::new(
protocol,
user_agent,
local_public,
discovery_config,
params.network_config.request_response_protocols,
) {
Ok(behaviour) => behaviour,
Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => {
return Err(Error::DuplicateRequestResponseProtocol { protocol: proto });
}
};
let (transport, bandwidth) = {
let (config_mem, config_wasm) = match params.network_config.transport {
TransportConfig::MemoryOnly => (true, None),
TransportConfig::Normal {
wasm_external_transport,
..
} => (false, wasm_external_transport),
};
transport::build_transport(local_identity, config_mem, config_wasm)
};
let builder = SwarmBuilder::new(transport, behaviour, local_peer_id)
.connection_limits(
ConnectionLimits::default()
.with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32))
.with_max_established_incoming(Some(
crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING,
)),
)
.notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed"))
.connection_event_buffer_size(1024);
(builder.build(), bandwidth)
};
// Listen on multiaddresses.
for addr in ¶ms.network_config.listen_addresses {
if let Err(err) = Swarm::listen_on(&mut swarm, addr.clone()) {
warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err)
}
}
// Add external addresses.
for addr in ¶ms.network_config.public_addresses {
Swarm::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite);
}
let external_addresses = Arc::new(Mutex::new(Vec::new()));
let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new()));
let metrics = params
.metrics_registry
.as_ref()
.and_then(|registry| Metrics::register(®istry).ok());
let service = Arc::new(NetworkService {
bandwidth,
external_addresses,
num_connected,
is_major_syncing,
peerset: peerset_handle,
local_peer_id,
peers_notifications_sinks: peers_notifications_sinks.clone(),
to_worker,
notifications_sizes_metric: metrics
.as_ref()
.map(|metrics| metrics.notifications_sizes.clone()),
});
Ok(NetworkWorker {
network_service: swarm,
service,
from_worker,
event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?,
metrics,
boot_node_ids,
pending_requests: HashMap::with_capacity(128),
peers_notifications_sinks,
})
}
/// Returns the total number of bytes received so far.
pub fn total_bytes_inbound(&self) -> u64 {
self.service.bandwidth.total_inbound()
}
/// Returns the total number of bytes sent so far.
pub fn total_bytes_outbound(&self) -> u64 {
self.service.bandwidth.total_outbound()
}
/// Returns the number of peers we're connected to.
pub fn num_connected_peers(&self) -> usize {
self.network_service.user_protocol().num_connected_peers()
}
// /// Returns the number of peers we're connected to and that are being queried.
// pub fn num_active_peers(&self) -> usize {
// self.network_service.user_protocol().num_active_peers()
// }
/// Adds an address for a node.
pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) {
self.network_service.add_known_address(peer_id, addr);
}
/// Return a `NetworkService` that can be shared through the code base and can be used to
/// manipulate the worker.
pub fn service(&self) -> &Arc<NetworkService> {
&self.service
}
/// Get network state.
///
/// **Note**: Use this only for debugging. This API is unstable. There are warnings literally
/// everywhere about this. Please don't use this function to retrieve actual information.
pub fn network_state(&mut self) -> NetworkState {
let swarm = &mut self.network_service;
let open = swarm
.user_protocol()
.open_peers()
.cloned()
.collect::<Vec<_>>();
let connected_peers = {
let swarm = &mut *swarm;
open.iter().filter_map(move |peer_id| {
let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id)
.into_iter().collect();
let endpoint = if let Some(e) = swarm.node(peer_id).map(|i| i.endpoint()) {
e.clone().into()
} else {
error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \
and debug information about {:?}", peer_id);
return None;
};
Some((peer_id.to_base58(), NetworkStatePeer {
endpoint,
version_string: swarm.node(peer_id)
.and_then(|i| i.client_version().map(|s| s.to_owned())),
latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()),
enabled: swarm.user_protocol().is_enabled(&peer_id),
open: swarm.user_protocol().is_open(&peer_id),
known_addresses,
}))
}).collect()
};
let not_connected_peers = {
let swarm = &mut *swarm;
swarm
.known_peers()
.into_iter()
.filter(|p| open.iter().all(|n| n != p))
.map(move |peer_id| {
(
peer_id.to_base58(),
NetworkStateNotConnectedPeer {
version_string: swarm
.node(&peer_id)
.and_then(|i| i.client_version().map(|s| s.to_owned())),
latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()),
known_addresses: NetworkBehaviour::addresses_of_peer(
&mut **swarm,
&peer_id,
)
.into_iter()
.collect(),
},
)
})
.collect()
};
NetworkState {
peer_id: Swarm::local_peer_id(&swarm).to_base58(),
listened_addresses: Swarm::listeners(&swarm).cloned().collect(),
external_addresses: Swarm::external_addresses(&swarm)
.map(|r| &r.addr)
.cloned()
.collect(),
connected_peers,
not_connected_peers,
peerset: swarm.user_protocol_mut().peerset_debug_info(),
}
}
/// Removes a `PeerId` from the list of reserved peers.
pub fn remove_reserved_peer(&self, peer: PeerId) {
self.service.remove_reserved_peer(peer);
}
/// Adds a `PeerId` and its address as reserved. The string should encode the address
/// and peer ID of the remote node.
pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> {
self.service.add_reserved_peer(peer)
}
/// Returns the list of all the peers we known.
pub fn known_peers(&mut self) -> HashSet<PeerId> {
self.network_service.known_peers()
}
pub fn is_open(&self, peer_id: &PeerId) -> bool {
self.network_service.is_open(peer_id)
}
}
impl NetworkService {
/// Writes a message on an open notifications channel. Has no effect if the notifications
/// channel with this protocol name is closed.
///
/// > **Note**: The reason why this is a no-op in the situation where we have no channel is
/// > that we don't guarantee message delivery anyway. Networking issues can cause
/// > connections to drop at any time, and higher-level logic shouldn't differentiate
/// > between the remote voluntarily closing a substream or a network error
/// > preventing the message from being delivered.
///
/// The protocol must have been registered with `register_notifications_protocol`.
///
pub fn write_notification(
&self,
target: PeerId,
protocol_name: Cow<'static, str>,
message: Vec<u8>,
) {
// We clone the `NotificationsSink` in order to be able to unlock the network-wide
// `peers_notifications_sinks` mutex as soon as possible.
let sink = {
let peers_notifications_sinks = self.peers_notifications_sinks.lock();
if let Some(sink) = peers_notifications_sinks.get(&target) {
sink.clone()
} else {
// Notification silently discarded, as documented.
return;
}
};
// Used later for the metrics report.
let message_len = message.len();
sink.send_sync_notification(protocol_name.clone(), message);
if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() {
notifications_sizes_metric
.with_label_values(&["out", &protocol_name])
.observe(message_len as f64);
}
}
/// Obtains a [`NotificationSender`] for a connected peer, if it exists.
///
/// A `NotificationSender` is scoped to a particular connection to the peer that holds
/// a receiver. With a `NotificationSender` at hand, sending a notification is done in two steps:
///
/// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready
/// for another notification, yielding a [`NotificationSenderReady`] token.
/// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation
/// can only fail if the underlying notification substream or connection has suddenly closed.
///
/// An error is returned by [`NotificationSenderReady::send`] if there exists no open
/// notifications substream with that combination of peer and protocol, or if the remote
/// has asked to close the notifications substream. If that happens, it is guaranteed that an
/// [`Event::NotificationStreamClosed`] has been generated on the stream returned by
/// [`NetworkService::event_stream`].
///
/// If the remote requests to close the notifications substream, all notifications successfully
/// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the
/// substream actually gets closed, but attempting to enqueue more notifications will now
/// return an error. It is however possible for the entire connection to be abruptly closed,
/// in which case enqueued notifications will be lost.
///
/// The protocol must have been registered with `register_notifications_protocol` or
/// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols).
///
/// # Usage
///
/// This method returns a struct that allows waiting until there is space available in the
/// buffer of messages towards the given peer. If the peer processes notifications at a slower
/// rate than we send them, this buffer will quickly fill up.
///
/// As such, you should never do something like this:
///
/// ```ignore
/// // Do NOT do this
/// for peer in peers {
/// if let Ok(n) = network.notification_sender(peer, ...) {
/// if let Ok(s) = n.ready().await {
/// let _ = s.send(...);
/// }
/// }
/// }
/// ```
///
/// Doing so would slow down all peers to the rate of the slowest one. A malicious or
/// malfunctioning peer could intentionally process notifications at a very slow rate.
///
/// Instead, you are encouraged to maintain your own buffer of notifications on top of the one
/// maintained by `sc-network`, and use `notification_sender` to progressively send out
/// elements from your buffer. If this additional buffer is full (which will happen at some
/// point if the peer is too slow to process notifications), appropriate measures can be taken,
/// such as removing non-critical notifications from the buffer or disconnecting the peer
/// using [`NetworkService::disconnect_peer`].
///
///
/// Notifications Per-peer buffer
/// broadcast +-------> of notifications +--> `notification_sender` +--> Internet
/// ^ (not covered by
/// | sc-network)
/// +
/// Notifications should be dropped
/// if buffer is full
///
///
/// See also the [`gossip`](crate::gossip) module for a higher-level way to send
/// notifications.
///
pub fn notification_sender(
&self,
target: PeerId,
protocol_name: Cow<'static, str>,
) -> Result<NotificationSender, NotificationSenderError> {
// We clone the `NotificationsSink` in order to be able to unlock the network-wide
// `peers_notifications_sinks` mutex as soon as possible.
let sink = {
let peers_notifications_sinks = self.peers_notifications_sinks.lock();
if let Some(sink) = peers_notifications_sinks.get(&target) {
sink.clone()
} else {
return Err(NotificationSenderError::Closed);
}
};
Ok(NotificationSender {
sink,
protocol_name: protocol_name.clone(),
notification_size_metric: self
.notifications_sizes_metric
.as_ref()
.map(|histogram| histogram.with_label_values(&["out", &protocol_name])),
})
}
pub async fn broadcast_message(&self, protocol_name: Cow<'static, str>, message: Vec<u8>) {
debug!("start send broadcast message");
let peers = self.known_peers().await;
for peer_id in peers {
self.write_notification(peer_id, protocol_name.clone(), message.clone());
}
debug!("finish send broadcast message");
}
pub async fn is_connected(&self, address: PeerId) -> bool {
let (tx, rx) = oneshot::channel();
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::IsConnected(address, tx));
match rx.await {
Ok(t) => t,
Err(e) => {
warn!("sth wrong {}", e);
false
}
}
}
pub async fn network_state(&self) -> Result<NetworkState, Canceled> {
let (tx, rx) = oneshot::channel();
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::NetworkState(tx));
rx.await
}
pub async fn known_peers(&self) -> HashSet<PeerId> {
let (tx, rx) = oneshot::channel();
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::KnownPeers(tx));
match rx.await {
Ok(t) => t,
Err(e) => {
debug!("sth wrong {}", e);
HashSet::new()
}
}
}
pub async fn get_address(&self, peer_id: PeerId) -> Vec<Multiaddr> {
let (tx, rx) = oneshot::channel();
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::AddressByPeerID(peer_id, tx));
match rx.await {
Ok(t) => t,
Err(e) => {
debug!("sth wrong {}", e);
Vec::new()
}
}
}
pub fn update_chain_status(&self, chain_status: ChainStatus) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::UpdateChainStatus(Box::new(
chain_status,
)));
}
/// Returns a stream containing the events that happen on the network.
///
/// If this method is called multiple times, the events are duplicated.
///
/// The stream never ends (unless the `NetworkWorker` gets shut down).
///
/// The name passed is used to identify the channel in the Prometheus metrics. Note that the
/// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having
/// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory
pub fn event_stream(&self, name: &'static str) -> impl Stream<Item = Event> {
let (tx, rx) = out_events::channel(name);
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::EventStream(tx));
rx
}
/// Sends a single targeted request to a specific peer. On success, returns the response of
/// the peer.
///
/// Request-response protocols are a way to complement notifications protocols, but
/// notifications should remain the default ways of communicating information. For example, a
/// peer can announce something through a notification, after which the recipient can obtain
/// more information by performing a request.
/// As such, this function is meant to be called only with peers we are already connected to.
/// Calling this method with a `target` we are not connected to will *not* attempt to connect
/// to said peer.
///
/// No limit or throttling of concurrent outbound requests per peer and protocol are enforced.
/// Such restrictions, if desired, need to be enforced at the call site(s).
///
/// The protocol must have been registered through
/// [`NetworkConfiguration::request_response_protocols`](
/// crate::config::NetworkConfiguration::request_response_protocols).
pub async fn request(
&self,
target: PeerId,
protocol: impl Into<Cow<'static, str>>,
request: Vec<u8>,
) -> Result<Vec<u8>, RequestFailure> {
let (tx, rx) = oneshot::channel();
let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request {
target,
protocol: protocol.into(),
request,
pending_response: tx,
});
match rx.await {
Ok(v) => v,
// The channel can only be closed if the network worker no longer exists. If the
// network worker no longer exists, then all connections to `target` are necessarily
// closed, and we legitimately report this situation as a "ConnectionClosed".
Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)),
}
}
/// Report a given peer as either beneficial (+) or costly (-) according to the
/// given scalar.
pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) {
self.peerset.report_peer(who, cost_benefit);
}
/// Disconnect from a node as soon as possible.
///
/// This triggers the same effects as if the connection had closed itself spontaneously.
pub fn disconnect_peer(&self, who: PeerId) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who));
}
/// Are we in the process of downloading the chain?
pub fn is_major_syncing(&self) -> bool {
self.is_major_syncing.load(Ordering::Relaxed)
}
/// Start getting a value from the DHT.
///
/// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an
/// item on the [`NetworkWorker`] stream.
pub fn get_value(&self, key: &record::Key) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone()));
}
/// Start putting a value in the DHT.
///
/// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an
/// item on the [`NetworkWorker`] stream.
pub fn put_value(&self, key: record::Key, value: Vec<u8>) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::PutValue(key, value));
}
/// Connect to unreserved peers and allow unreserved peers to connect.
pub fn accept_unreserved_peers(&self) {
self.peerset.set_reserved_only(false);
}
/// Disconnect from unreserved peers and deny new unreserved peers to connect.
pub fn deny_unreserved_peers(&self) {
self.peerset.set_reserved_only(true);
}
/// Removes a `PeerId` from the list of reserved peers.
pub fn remove_reserved_peer(&self, peer: PeerId) {
self.peerset.remove_reserved_peer(peer);
}
/// Adds a `PeerId` and its address as reserved. The string should encode the address
/// and peer ID of the remote node.
pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> {
let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?;
self.peerset.add_reserved_peer(peer_id);
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
Ok(())
}
/// Modify a peerset priority group.
pub fn set_priority_group(
&self,
group_id: String,
peers: HashSet<Multiaddr>,
) -> Result<(), String> {
let peers = peers
.into_iter()
.map(|p| parse_addr(p).map_err(|e| format!("{:?}", e)))
.collect::<Result<Vec<(PeerId, Multiaddr)>, String>>()?;
let peer_ids = peers.iter().map(|(peer_id, _addr)| *peer_id).collect();
self.peerset.set_priority_group(group_id, peer_ids);
for (peer_id, addr) in peers.into_iter() {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
}
Ok(())
}
/// Returns the number of peers we're connected to.
pub fn num_connected(&self) -> usize {
self.num_connected.load(Ordering::Relaxed)
}
pub fn peer_id(&self) -> &PeerId {
&self.local_peer_id
}
}
/// Trait for providing information about the local network state
pub trait NetworkStateInfo {
/// Returns the local external addresses.
fn external_addresses(&self) -> Vec<Multiaddr>;
/// Returns the local Peer ID.
fn local_peer_id(&self) -> PeerId;
}
impl NetworkStateInfo for NetworkService {
/// Returns the local external addresses.
fn external_addresses(&self) -> Vec<Multiaddr> {
self.external_addresses.lock().clone()
}
/// Returns the local Peer ID.
fn local_peer_id(&self) -> PeerId {
self.local_peer_id
}
}
/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol.
#[must_use]
pub struct NotificationSender {
sink: NotificationsSink,
/// Name of the protocol on the wire.
protocol_name: Cow<'static, str>,
/// Field extracted from the [`Metrics`] struct and necessary to report the
/// notifications-related metrics.
notification_size_metric: Option<Histogram>,
}
impl NotificationSender {
/// Returns a future that resolves when the `NotificationSender` is ready to send a notification.
pub async fn ready(&self) -> Result<NotificationSenderReady<'_>, NotificationSenderError> {
Ok(NotificationSenderReady {
ready: match self
.sink
.reserve_notification(self.protocol_name.clone())
.await
{
Ok(r) => r,
Err(()) => return Err(NotificationSenderError::Closed),
},
notification_size_metric: self.notification_size_metric.clone(),
})
}
}
/// Reserved slot in the notifications buffer, ready to accept data.
#[must_use]
pub struct NotificationSenderReady<'a> {
ready: Ready<'a>,
/// Field extracted from the [`Metrics`] struct and necessary to report the
/// notifications-related metrics.
notification_size_metric: Option<Histogram>,
}
impl<'a> NotificationSenderReady<'a> {
/// Consumes this slots reservation and actually queues the notification.
pub fn send(self, notification: impl Into<Vec<u8>>) -> Result<(), NotificationSenderError> {
let notification = notification.into();
if let Some(notification_size_metric) = &self.notification_size_metric {
notification_size_metric.observe(notification.len() as f64);
}
self.ready
.send(notification)
.map_err(|()| NotificationSenderError::Closed)
}
}
/// Error returned by [`NetworkService::send_notification`].
#[derive(Debug, derive_more::Display, derive_more::Error)]
pub enum NotificationSenderError {
/// The notification receiver has been closed, usually because the underlying connection closed.
///
/// Some of the notifications most recently sent may not have been received. However,
/// the peer may still be connected and a new `NotificationSender` for the same
/// protocol obtained from [`NetworkService::notification_sender`].
Closed,
/// Protocol name hasn't been registered.
BadProtocol,
}
/// Messages sent from the `NetworkService` to the `NetworkWorker`.
///
/// Each entry corresponds to a method of `NetworkService`.
enum ServiceToWorkerMsg {
GetValue(record::Key),
PutValue(record::Key, Vec<u8>),
AddKnownAddress(PeerId, Multiaddr),
EventStream(out_events::Sender),
Request {
target: PeerId,
protocol: Cow<'static, str>,
request: Vec<u8>,
pending_response: oneshot::Sender<Result<Vec<u8>, RequestFailure>>,
},
DisconnectPeer(PeerId),
IsConnected(PeerId, oneshot::Sender<bool>),
NetworkState(oneshot::Sender<NetworkState>),
KnownPeers(oneshot::Sender<HashSet<PeerId>>),
UpdateChainStatus(Box<ChainStatus>),
AddressByPeerID(PeerId, oneshot::Sender<Vec<Multiaddr>>),
}
/// Main network worker. Must be polled in order for the network to advance.
///
/// You are encouraged to poll this in a separate background thread or task.
#[must_use = "The NetworkWorker must be polled in order for the network to work"]
pub struct NetworkWorker {
/// The network service that can be extracted and shared through the codebase.
service: Arc<NetworkService>,
/// The *actual* network.
network_service: Swarm,
/// Messages from the `NetworkService` and that must be processed.
from_worker: mpsc::UnboundedReceiver<ServiceToWorkerMsg>,
/// Senders for events that happen on the network.
event_streams: out_events::OutChannels,
/// Prometheus network metrics.
metrics: Option<Metrics>,
/// The `PeerId`'s of all boot nodes.
boot_node_ids: Arc<HashSet<PeerId>>,
/// Requests started using [`NetworkService::request`]. Includes the channel to send back the
/// response, when the request has started, and the name of the protocol for diagnostic
/// purposes.
pending_requests: HashMap<
RequestId,
(
oneshot::Sender<Result<Vec<u8>, RequestFailure>>,
Instant,
String,
),
>,
/// For each peer, an object that allows sending notifications to
/// that peer. Shared with the [`NetworkService`].
peers_notifications_sinks: Arc<Mutex<HashMap<PeerId, NotificationsSink>>>,
}
impl Future for NetworkWorker {
type Output = Result<(), io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll<Self::Output> {
let this = &mut *self;
loop {
// Process the next message coming from the `NetworkService`.
let msg = match this.from_worker.poll_next_unpin(cx) {
Poll::Ready(Some(msg)) => msg,
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Pending => break,
};
match msg {
ServiceToWorkerMsg::GetValue(key) => this.network_service.get_value(&key),
ServiceToWorkerMsg::PutValue(key, value) => {
this.network_service.put_value(key, value)
}
ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => {
this.network_service.add_known_address(peer_id, addr)
}
ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender),
ServiceToWorkerMsg::Request {
target,
protocol,
request,
pending_response,
} => {
// Calling `send_request` can fail immediately in some circumstances.
// This is handled by sending back an error on the channel.
match this
.network_service
.send_request(&target, &protocol, request)
{
Ok(request_id) => {
if let Some(metrics) = this.metrics.as_ref() {
metrics
.requests_out_started_total
.with_label_values(&[&protocol])
.inc();
}
this.pending_requests.insert(
request_id,
(pending_response, Instant::now(), protocol.to_string()),
);
}
Err(SendRequestError::NotConnected) => {
let err = RequestFailure::Network(OutboundFailure::ConnectionClosed);
let _ = pending_response.send(Err(err));
}
Err(SendRequestError::UnknownProtocol) => {
let err =
RequestFailure::Network(OutboundFailure::UnsupportedProtocols);
let _ = pending_response.send(Err(err));
}
}
}
ServiceToWorkerMsg::DisconnectPeer(who) => this
.network_service
.user_protocol_mut()
.disconnect_peer(&who),
ServiceToWorkerMsg::IsConnected(who, tx) => {
let _ = tx.send(this.is_open(&who));
}
ServiceToWorkerMsg::NetworkState(tx) => {
let _ = tx.send(this.network_state());
}
ServiceToWorkerMsg::KnownPeers(tx) => {
let peers = this.known_peers();
let mut result = HashSet::new();
for peer in peers {
result.insert(peer);
}
let _ = tx.send(result);
}
ServiceToWorkerMsg::UpdateChainStatus(status) => {
this.network_service
.user_protocol_mut()
.update_chain_status(*status);
}
ServiceToWorkerMsg::AddressByPeerID(peer_id, tx) => {
let _ = tx.send(this.network_service.get_address(&peer_id));
}
}
}
// `num_iterations` serves the same purpose as in the previous loop.
// See the previous loop for explanations.
let mut num_iterations = 0;
loop {
num_iterations += 1;
if num_iterations >= 1000 {
cx.waker().wake_by_ref();
break;
}
// Process the next action coming from the network.
let next_event = this.network_service.next_event();
futures::pin_mut!(next_event);
let poll_value = next_event.poll_unpin(cx);
match poll_value {
Poll::Pending => break,
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest {
protocol,
result,
..
})) => {
if let Some(metrics) = this.metrics.as_ref() {
match result {
Ok(serve_time) => {
metrics
.requests_in_success_total
.with_label_values(&[&protocol])
.observe(
serve_time
.unwrap_or_else(|| Duration::from_secs(0))
.as_secs_f64(),
);
}
Err(err) => {
let reason = match err {
ResponseFailure::Network(InboundFailure::Timeout) => "timeout",
ResponseFailure::Network(
InboundFailure::UnsupportedProtocols,
) => "unsupported",
ResponseFailure::Network(InboundFailure::ConnectionClosed) => {
"connection-closed"
}
ResponseFailure::Network(InboundFailure::ResponseOmission) => {
"busy-omitted"
}
};
metrics
.requests_in_failure_total
.with_label_values(&[&protocol, reason])
.inc();
}
}
}
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished {
request_id,
result,
})) => {
if let Some((send_back, started, protocol)) =
this.pending_requests.remove(&request_id)
{
if let Some(metrics) = this.metrics.as_ref() {
match &result {
Ok(_) => {
metrics
.requests_out_success_total
.with_label_values(&[&protocol])
.observe(started.elapsed().as_secs_f64());
}
Err(err) => {
let reason = match err {
RequestFailure::Refused => "refused",
RequestFailure::Network(OutboundFailure::DialFailure) => {
"dial-failure"
}
RequestFailure::Network(OutboundFailure::Timeout) => {
"timeout"
}
RequestFailure::Network(
OutboundFailure::ConnectionClosed,
) => "connection-closed",
RequestFailure::Network(
OutboundFailure::UnsupportedProtocols,
) => "unsupported",
};
metrics
.requests_out_failure_total
.with_label_values(&[&protocol, reason])
.inc();
}
}
}
let _ = send_back.send(result);
} else {
error!("Request not in pending_requests");
}
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(_))) => {}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened {
remote,
notifications_sink,
info,
})) => {
if let Some(metrics) = this.metrics.as_ref() {
metrics.notifications_streams_opened_total.inc();
}
{
let mut peers_notifications_sinks = this.peers_notifications_sinks.lock();
peers_notifications_sinks.insert(remote, notifications_sink);
}
this.event_streams
.send(Event::NotificationStreamOpened { remote, info });
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced {
remote,
notifications_sink,
})) => {
let mut peers_notifications_sinks = this.peers_notifications_sinks.lock();
if let Some(s) = peers_notifications_sinks.get_mut(&remote) {
*s = notifications_sink;
} else {
log::error!(
target: "sub-libp2p",
"NotificationStreamReplaced for non-existing substream"
);
}
// TODO: Notifications might have been lost as a result of the previous
// connection being dropped, and as a result it would be preferable to notify
// the users of this fact by simulating the substream being closed then
// reopened.
// The code below doesn't compile because `role` is unknown. Propagating the
// handshake of the secondary connections is quite an invasive change and
// would conflict with https://github.com/paritytech/substrate/issues/6403.
// Considering that dropping notifications is generally regarded as
// acceptable, this bug is at the moment intentionally left there and is
// intended to be fixed at the same time as
// https://github.com/paritytech/substrate/issues/6403.
/*this.event_streams.send(Event::NotificationStreamClosed {
remote,
engine_id,
});
this.event_streams.send(Event::NotificationStreamOpened {
remote,
engine_id,
role,
});*/
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed {
remote,
})) => {
if let Some(metrics) = this.metrics.as_ref() {
metrics.notifications_streams_closed_total.inc();
}
this.event_streams
.send(Event::NotificationStreamClosed { remote });
{
let mut peers_notifications_sinks = this.peers_notifications_sinks.lock();
peers_notifications_sinks.remove(&remote);
}
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived {
remote,
protocol,
messages,
})) => {
if let Some(metrics) = this.metrics.as_ref() {
for message in &messages {
metrics
.notifications_sizes
.with_label_values(&["in", &protocol])
.observe(message.len() as f64);
}
}
this.event_streams.send(Event::NotificationsReceived {
remote,
protocol,
messages,
});
}
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => {
if let Some(metrics) = this.metrics.as_ref() {
let query_type = match event {
DhtEvent::ValueFound(_) => "value-found",
DhtEvent::ValueNotFound(_) => "value-not-found",
DhtEvent::ValuePut(_) => "value-put",
DhtEvent::ValuePutFailed(_) => "value-put-failed",
};
metrics
.kademlia_query_duration
.with_label_values(&[query_type])
.observe(duration.as_secs_f64());
}
this.event_streams.send(Event::Dht(event));
}
Poll::Ready(SwarmEvent::ConnectionEstablished {
peer_id, endpoint, ..
}) => {
trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id);
if let Some(metrics) = this.metrics.as_ref() {
match endpoint {
ConnectedPoint::Dialer { .. } => metrics
.connections_opened_total
.with_label_values(&["out"])
.inc(),
ConnectedPoint::Listener { .. } => metrics
.connections_opened_total
.with_label_values(&["in"])
.inc(),
}
}
}
Poll::Ready(SwarmEvent::ConnectionClosed {
peer_id,
cause,
endpoint,
num_established,
..
}) => {
trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause);
if let Some(metrics) = this.metrics.as_ref() {
let direction = match endpoint {
ConnectedPoint::Dialer { .. } => "out",
ConnectedPoint::Listener { .. } => "in",
};
let reason = match cause {
Some(ConnectionError::IO(_)) => "transport-error",
// Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(
// EitherError::A(EitherError::A(EitherError::B(EitherError::A(
// PingFailure::Timeout,
// )))),
// ))) => "ping-timeout",
// Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(
// EitherError::A(EitherError::A(EitherError::A(EitherError::A(
// EitherError::A(EitherError::A(NotifsHandlerError::Legacy(
// LegacyConnectionKillError,
// ))),
// )))),
// ))) => "force-closed",
// Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(
// EitherError::A(EitherError::A(EitherError::A(EitherError::A(
// EitherError::A(EitherError::A(
// NotifsHandlerError::SyncNotificationsClogged,
// )),
// )))),
// ))) => "sync-notifications-clogged",
Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => {
"protocol-error"
}
Some(ConnectionError::Handler(
NodeHandlerWrapperError::KeepAliveTimeout,
)) => "keep-alive-timeout",
None => "actively-closed",
};
metrics
.connections_closed_total
.with_label_values(&[direction, reason])
.inc();
// `num_established` represents the number of *remaining* connections.
if num_established == 0 {
metrics.distinct_peers_connections_closed_total.inc();
}
}
}
Poll::Ready(SwarmEvent::NewListenAddr(addr)) => {
trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr)
}
Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => {
trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr)
}
Poll::Ready(SwarmEvent::UnreachableAddr {
peer_id,
address,
error,
..
}) => {
trace!(
target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}",
peer_id,
address,
error,
);
if this.boot_node_ids.contains(&peer_id) {
if let PendingConnectionError::InvalidPeerId = error {
error!(
"💔 The bootnode you want to connect to at `{}` provided a different peer ID than the one you expect: `{}`.",
address,
peer_id,
);
}
}
if let Some(metrics) = this.metrics.as_ref() {
match error {
PendingConnectionError::ConnectionLimit(_) => metrics
.pending_connections_errors_total
.with_label_values(&["limit-reached"])
.inc(),
PendingConnectionError::InvalidPeerId => metrics
.pending_connections_errors_total
.with_label_values(&["invalid-peer-id"])
.inc(),
PendingConnectionError::Transport(_)
| PendingConnectionError::IO(_) => metrics
.pending_connections_errors_total
.with_label_values(&["transport-error"])
.inc(),
}
}
}
Poll::Ready(SwarmEvent::Dialing(peer_id)) => {
trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id)
}
Poll::Ready(SwarmEvent::IncomingConnection {
local_addr,
send_back_addr,
}) => {
trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))",
local_addr, send_back_addr);
}
Poll::Ready(SwarmEvent::IncomingConnectionError {
local_addr,
send_back_addr,
error,
}) => {
trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}",
local_addr, send_back_addr, error);
}
Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => {
trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.",
peer_id, endpoint);
}
Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => {
trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}",
address, error)
}
Poll::Ready(SwarmEvent::ListenerClosed {
reason,
addresses: _,
}) => {
warn!(target: "sub-libp2p", "Libp2p => ListenerClosed: {:?}", reason);
}
Poll::Ready(SwarmEvent::ListenerError { error }) => {
trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error);
}
};
}
if let Some(metrics) = this.metrics.as_ref() {
for (proto, buckets) in this.network_service.num_entries_per_kbucket() {
for (lower_ilog2_bucket_bound, num_entries) in buckets {
metrics
.kbuckets_num_nodes
.with_label_values(&[
&proto.as_ref(),
&lower_ilog2_bucket_bound.to_string(),
])
.set(num_entries as u64);
}
}
for (proto, num_entries) in this.network_service.num_kademlia_records() {
metrics
.kademlia_records_count
.with_label_values(&[&proto.as_ref()])
.set(num_entries as u64);
}
for (proto, num_entries) in this.network_service.kademlia_records_total_size() {
metrics
.kademlia_records_sizes_total
.with_label_values(&[&proto.as_ref()])
.set(num_entries as u64);
}
metrics
.peerset_num_discovered
.set(this.network_service.user_protocol().num_discovered_peers() as u64);
metrics.peerset_num_requested.set(
this.network_service
.user_protocol()
.requested_peers()
.count() as u64,
);
metrics.pending_connections.set(
Swarm::network_info(&this.network_service)
.connection_counters()
.num_pending() as u64,
);
}
Poll::Pending
}
}
impl Unpin for NetworkWorker {}
/// The libp2p swarm, customized for our needs.
type Swarm = libp2p::swarm::Swarm<Behaviour>;
fn ensure_addresses_consistent_with_transport<'a>(
addresses: impl Iterator<Item = &'a Multiaddr>,
transport: &TransportConfig,
) -> Result<(), Error> {
if matches!(transport, TransportConfig::MemoryOnly) {
let addresses: Vec<_> = addresses
.filter(|x| {
x.iter()
.any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))
})
.cloned()
.collect();
if !addresses.is_empty() {
return Err(Error::AddressesForAnotherTransport {
transport: transport.clone(),
addresses,
});
}
} else {
let addresses: Vec<_> = addresses
.filter(|x| {
x.iter()
.any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))
})
.cloned()
.collect();
if !addresses.is_empty() {
return Err(Error::AddressesForAnotherTransport {
transport: transport.clone(),
addresses,
});
}
}
Ok(())
} | TransportConfig::Normal {
enable_mdns,
allow_private_ipv4, |
setup.py | # coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF.Text is a TensorFlow library of text related ops, modules, and subgraphs.
TF.Text is a TensorFlow library of text related ops, modules, and subgraphs. The
library can perform the preprocessing regularly required by text-based models,
and includes other features useful for sequence modeling not provided by core
TensorFlow.
See the README on GitHub for further documentation.
http://github.com/tensorflow/text
"""
import os
from setuptools import find_packages
from setuptools import setup
from setuptools.command.install import install
from setuptools.dist import Distribution
project_name = 'tensorflow-text'
project_version = '2.3.0-rc0'
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def is_pure(self):
return False
def has_ext_modules(self):
return True
class InstallPlatlib(install):
"""This is needed to set the library to platlib compliant."""
def finalize_options(self):
|
DOCLINES = __doc__.split('\n')
setup(
name=project_name,
version=project_version.replace('-', ''),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
author='Google Inc.',
author_email='[email protected]',
url='http://github.com/tensorflow/text',
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
cmdclass={'install': InstallPlatlib},
distclass=BinaryDistribution,
install_requires=[
'tensorflow>=2.3.0rc0, <2.4',
'tensorflow_hub>=0.8.0',
],
extras_require={
'tensorflow_gpu': ['tensorflow-gpu>=2.1.0, <2.2',],
'tests': [
'absl-py',
'pytest',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow text machine learning',
)
| """For more info; see http://github.com/google/or-tools/issues/616 ."""
install.finalize_options(self)
self.install_lib = self.install_platlib
self.install_libbase = self.install_lib
self.install_lib = os.path.join(self.install_lib, self.extra_dirs) |
gui.py | import tkinter as tk
class GUI(tk.Frame):
def __init__(self,master=None):
super().__init__(master)
self.pack()
self.create_wigets()
def create_wigets(self):
|
def say_hi(self):
print("Hi there,everyone")
root = tk.Tk()
app = GUI(master=root)
root.title("This is a test")
app.mainloop()
| self.hi_there = tk.Button(self)
self.hi_there["width"] = 15
self.hi_there["height"] = 10
self.hi_there["text"] = "Hello World\n(Click me)"
self.hi_there["command"] = self.say_hi
self.hi_there.pack(side="left")
self.quit = tk.Button(self,text="QUIT",fg="red",command=root.destroy)
self.quit.pack(side="right") |
url-util.js | 'use strict';
/**
* Replace parameters in a URL template with values from a `params` object.
*
* Returns an object containing the expanded URL and a dictionary of unused
* parameters.
*
* replaceURLParams('/things/:id', {id: 'foo', q: 'bar'}) =>
* {url: '/things/foo', params: {q: 'bar'}}
*/
function replaceURLParams(url, params) {
const unusedParams = {};
for (const param in params) {
if (params.hasOwnProperty(param)) {
const value = params[param];
const urlParam = ':' + param;
if (url.indexOf(urlParam) !== -1) {
url = url.replace(urlParam, encodeURIComponent(value));
} else {
unusedParams[param] = value;
}
}
}
return {url: url, params: unusedParams};
}
/**
* Resolve a relative URL against a base URL to get an absolute URL.
*
* @param {string} relativeURL
* @param {string} baseURL
*/
function | (relativeURL, baseURL) {
return new URL(relativeURL, baseURL).href;
}
module.exports = {
replaceURLParams: replaceURLParams,
resolve: resolve,
};
| resolve |
address-test.js | 'use strict';
var should = require('chai').should();
var ip = require('..');
var Address4 = ip.Address4;
var Address6 = ip.Address6;
function | (addressString, descriptors) {
var address4 = new Address4(addressString);
var address6 = new Address6(addressString);
describe(addressString, function () {
descriptors.forEach(function (descriptor) {
if (descriptor === 'valid-ipv4') {
it('is valid', function () {
address4.should.be.an('object');
address4.parsedAddress.should.be.an.instanceOf(Array);
address4.parsedAddress.length.should.equal(4);
address4.subnetMask.should.be.a('number');
address4.subnetMask.should.be.at.least(0);
address4.subnetMask.should.be.at.most(128);
should.not.exist(address4.error);
should.not.exist(address4.parseError);
address4.isValid().should.equal(true);
});
}
if (descriptor === 'valid-ipv6') {
it('is valid', function () {
address6.should.be.an('object');
address6.zone.should.be.a('string');
address6.subnet.should.be.a('string');
address6.subnetMask.should.be.a('number');
address6.subnetMask.should.be.at.least(0);
address6.subnetMask.should.be.at.most(128);
address6.parsedAddress.should.be.an.instanceOf(Array);
address6.parsedAddress.length.should.equal(8);
should.not.exist(address6.error);
should.not.exist(address6.parseError);
address6.isValid().should.equal(true);
});
var re = address6.regularExpression();
var reSubstring = address6.regularExpression(true);
it('matches the correct form via regex', function () {
re.test(address6.correctForm()).should.equal(true);
reSubstring.test('abc ' + address6.correctForm() + ' def')
.should.equal(true);
});
it('matches the canonical form via regex', function () {
re.test(address6.canonicalForm()).should.equal(true);
reSubstring.test('abc ' + address6.canonicalForm() + ' def')
.should.equal(true);
});
it('matches the given form via regex', function () {
// We can't match addresses like ::192.168.0.1 yet
if (address6.is4()) {
return;
}
re.test(addressString).should.equal(true);
reSubstring.test('abc ' + addressString + ' def')
.should.equal(true);
});
}
if (descriptor === 'invalid-ipv4') {
it('is invalid as parsed by v4', function () {
address4.error.should.be.a('string');
address4.isValid().should.equal(false);
});
}
if (descriptor === 'invalid-ipv6') {
it('is invalid as parsed by v6', function () {
address6.error.should.be.a('string');
address6.isValid().should.equal(false);
should.not.exist(address6.correctForm());
});
}
if (descriptor === 'canonical') {
it('is canonical', function () {
address6.isCanonical().should.equal(true);
should.equal(address6.addressMinusSuffix.length, 39);
});
}
if (descriptor === 'correct') {
it('is correct', function () {
address6.isCorrect().should.equal(true);
});
}
if (descriptor === 'correct-ipv4') {
it('is correct', function () {
address4.isCorrect().should.equal(true);
});
}
if (descriptor === 'incorrect') {
it('is incorrect', function () {
address6.isCorrect().should.equal(false);
});
}
if (descriptor === 'incorrect-ipv4') {
it('is incorrect', function () {
address4.isCorrect().should.equal(false);
});
}
if (descriptor === 'has-subnet') {
it('parses the subnet', function () {
address6.subnet.should.match(/^\/\d{1,3}/);
});
}
if (descriptor === 'v4-in-v6') {
it('is an ipv4-in-ipv6 address', function () {
address6.is4().should.equal(true);
});
}
});
});
}
function loadJsonBatch(file, classes, noMerge) {
// Load the list of test addresses
var addresses = require(file);
addresses.forEach(function (address) {
if (address.conditions === undefined ||
!address.conditions.length || noMerge) {
address.conditions = classes;
} else {
address.conditions = address.conditions.concat(classes);
}
addressIs(address.address, address.conditions);
});
}
describe('Valid IPv4 addresses', function () {
loadJsonBatch('./data/valid-ipv4-addresses.json', ['valid-ipv4']);
loadJsonBatch('./data/valid-ipv4-addresses.json', ['invalid-ipv6'], true);
});
describe('Valid IPv6 addresses', function () {
loadJsonBatch('./data/valid-ipv6-addresses.json', ['valid-ipv6']);
loadJsonBatch('./data/valid-ipv6-addresses.json', ['invalid-ipv4'], true);
});
describe('Invalid IPv4 addresses', function () {
loadJsonBatch('./data/invalid-ipv4-addresses.json', ['invalid-ipv4']);
});
describe('Invalid IPv6 addresses', function () {
loadJsonBatch('./data/invalid-ipv6-addresses.json', ['invalid-ipv6']);
});
| addressIs |
index.ts | export { getModelToken } from './dynamoose.utils'; | export * from './dynamoose.decorators'; |
|
polonius.rs | use crate::def_use::{self, DefUse};
use crate::location::{LocationIndex, LocationTable};
use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{Body, Local, Location, Place};
use rustc_middle::ty::subst::GenericArg;
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
use super::TypeChecker;
type VarPointRelation = Vec<(Local, LocationIndex)>;
type PathPointRelation = Vec<(MovePathIndex, LocationIndex)>;
struct UseFactsExtractor<'me> {
var_defined_at: &'me mut VarPointRelation,
var_used_at: &'me mut VarPointRelation,
location_table: &'me LocationTable,
var_dropped_at: &'me mut VarPointRelation,
move_data: &'me MoveData<'me>,
path_accessed_at_base: &'me mut PathPointRelation,
}
// A Visitor to walk through the MIR and extract point-wise facts
impl UseFactsExtractor<'_> {
fn location_to_index(&self, location: Location) -> LocationIndex {
self.location_table.mid_index(location)
}
fn insert_def(&mut self, local: Local, location: Location) {
debug!("UseFactsExtractor::insert_def()");
self.var_defined_at.push((local, self.location_to_index(location)));
}
fn insert_use(&mut self, local: Local, location: Location) {
debug!("UseFactsExtractor::insert_use()");
self.var_used_at.push((local, self.location_to_index(location)));
}
fn insert_drop_use(&mut self, local: Local, location: Location) {
debug!("UseFactsExtractor::insert_drop_use()");
self.var_dropped_at.push((local, self.location_to_index(location)));
}
fn insert_path_access(&mut self, path: MovePathIndex, location: Location) {
debug!("UseFactsExtractor::insert_path_access({:?}, {:?})", path, location);
self.path_accessed_at_base.push((path, self.location_to_index(location)));
}
fn place_to_mpi(&self, place: &Place<'_>) -> Option<MovePathIndex> {
match self.move_data.rev_lookup.find(place.as_ref()) {
LookupResult::Exact(mpi) => Some(mpi),
LookupResult::Parent(mmpi) => mmpi,
}
}
}
impl Visitor<'tcx> for UseFactsExtractor<'_> {
fn visit_local(&mut self, &local: &Local, context: PlaceContext, location: Location) {
match def_use::categorize(context) {
Some(DefUse::Def) => self.insert_def(local, location),
Some(DefUse::Use) => self.insert_use(local, location),
Some(DefUse::Drop) => self.insert_drop_use(local, location),
_ => (),
}
}
fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
self.super_place(place, context, location);
match context {
PlaceContext::NonMutatingUse(_) => {
if let Some(mpi) = self.place_to_mpi(place) {
self.insert_path_access(mpi, location);
}
}
PlaceContext::MutatingUse(MutatingUseContext::Borrow) => |
_ => (),
}
}
}
pub(super) fn populate_access_facts(
typeck: &mut TypeChecker<'_, 'tcx>,
body: &Body<'tcx>,
location_table: &LocationTable,
move_data: &MoveData<'_>,
dropped_at: &mut Vec<(Local, Location)>,
) {
debug!("populate_access_facts()");
if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
let mut extractor = UseFactsExtractor {
var_defined_at: &mut facts.var_defined_at,
var_used_at: &mut facts.var_used_at,
var_dropped_at: &mut facts.var_dropped_at,
path_accessed_at_base: &mut facts.path_accessed_at_base,
location_table,
move_data,
};
extractor.visit_body(&body);
facts.var_dropped_at.extend(
dropped_at.iter().map(|&(local, location)| (local, location_table.mid_index(location))),
);
for (local, local_decl) in body.local_decls.iter_enumerated() {
debug!(
"add use_of_var_derefs_origin facts - local={:?}, type={:?}",
local, local_decl.ty
);
let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
let universal_regions = &typeck.borrowck_context.universal_regions;
typeck.infcx.tcx.for_each_free_region(&local_decl.ty, |region| {
let region_vid = universal_regions.to_region_vid(region);
facts.use_of_var_derefs_origin.push((local, region_vid));
});
}
}
}
// For every potentially drop()-touched region `region` in `local`'s type
// (`kind`), emit a Polonius `use_of_var_derefs_origin(local, origin)` fact.
pub(super) fn add_drop_of_var_derefs_origin(
typeck: &mut TypeChecker<'_, 'tcx>,
local: Local,
kind: &GenericArg<'tcx>,
) {
debug!("add_drop_of_var_derefs_origin(local={:?}, kind={:?}", local, kind);
if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
let universal_regions = &typeck.borrowck_context.universal_regions;
typeck.infcx.tcx.for_each_free_region(kind, |drop_live_region| {
let region_vid = universal_regions.to_region_vid(drop_live_region);
facts.drop_of_var_derefs_origin.push((local, region_vid));
});
}
}
| {
if let Some(mpi) = self.place_to_mpi(place) {
self.insert_path_access(mpi, location);
}
} |
pipeline_execution__embedded.rs | /*
* Cloud Manager API
*
* This API allows access to Cloud Manager programs, pipelines, and environments by an authorized technical account created through the Adobe I/O Console. The base url for this API is https://cloudmanager.adobe.io, e.g. to get the list of programs for an organization, you would make a GET request to https://cloudmanager.adobe.io/api/programs (with the correct set of headers as described below). This swagger file can be downloaded from https://raw.githubusercontent.com/AdobeDocs/cloudmanager-api-docs/master/swagger-specs/api.yaml.
*
* The version of the OpenAPI document: 1.0.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
| pub step_states: Option<Vec<crate::models::PipelineExecutionStepState>>,
}
impl PipelineExecutionEmbedded {
pub fn new() -> PipelineExecutionEmbedded {
PipelineExecutionEmbedded {
step_states: None,
}
}
} | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineExecutionEmbedded {
#[serde(rename = "stepStates", skip_serializing_if = "Option::is_none")] |
project.go | package project
var (
description = "The release-operator manages chart configs for new releases."
gitSHA = "n/a"
name = "release-operator"
source = "https://github.com/giantswarm/release-operator"
version = "2.4.1-dev"
)
func Description() string {
return description
}
func GitSHA() string {
return gitSHA
}
func Name() string {
return name
}
func Source() string {
return source
}
func Version() string | {
return version
} |
|
JobProduct.py | import time
import traceback
from datetime import datetime
from App.controllers.BaseController import BaseController
from App.jobs import get_job_pool
from App.jobs.JobMultipleHandler import job_multiple_handler
from App.jobs.JobQueuePutter import JobSuccessQueuePutter, JobStartedQueuePutter, JobRunningQueuePutter, \
JobFailingQueuePutter
from App.jobs.JobSingleHandler import job_single_handler
from App.jobs.JobTypeEnums import JobType
from App.models.BaseTaskMapping import BaseTask
from App.settings import get_config
class JobProduct(object):
def __init__(self, job_type, job_name, queue, args, kwargs):
"""
初始化Job对象
:param job_type: Job的类别
:param job_name: Job的名称
:param queue: 当前Job要使用的消息队列对象
:param args: 当前Job要执行的任务需要传入的参数,需以元组的形式传入
"""
self.id = str(int(time.time() * 1000000))
self.start_time = None
self.job_type = job_type
self.name = job_name
self.queue = queue
self.args = args
self.kwargs = kwargs
self.source_range = {}
self.search_range = {}
def target(self, *args, **kwargs):
"""
需由子类重写,该Job待执行的任务
:param args: 当前Job要执行的任务需要传入的参数,需以元组展开的形式传入
:param kwargs: 当前Job要执行的任务需要传入的参数,需以键值对展开的形式传入
:return: 返回值由子类定义
"""
pass
def wrapped_target(self, *args, **kwargs):
"""
用于获取子进程中的异常的装饰方法,也是实际传入子进程的方法
:param args: 当前Job要执行的任务需要传入的参数,需以元组展开的形式传入
:param kwargs: 当前Job要执行的任务需要传入的参数,需以键值对展开的形式传入
:return:
"""
try:
self.target(*args, **kwargs)
except Exception as e:
enable_error_traceback = get_config().ENABLE_ERROR_TRACEBACK
error_msg = traceback.format_exc() if enable_error_traceback else str(e)
JobFailingQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": 0,
"source_range": self.source_range,
"search_range": self.search_range,
"job_type": self.job_type,
"config": self.kwargs,
"error_msg": error_msg
}, "Job Failed!")
print(error_msg)
def start(self):
"""
创建子进程,开始一项Job
:return:
"""
self.start_time = datetime.now()
JobStartedQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": 0,
"source_range": self.source_range,
"search_range": self.search_range,
"job_type": self.job_type,
"config": self.kwargs,
}, "Job Started!")
get_job_pool().apply_async(self.wrapped_target, self.args, self.kwargs)
class JobSingleProduct(JobProduct):
""" 单文档查重的JobProduct子类 """
def target(self, index_id, task_id, document_id, search_range, document, **kwargs):
"""
单独查重任务的执行函数
:param index_id: 被查重文档所属index
:param task_id: 被查重文档所属task
:param document_id: 被查重文档的id
:param search_range: 查重范围
:param document: 被查重文档的BaseDocumentMapping对象
:return:
"""
source_range = {
index_id: [task_id]
}
JobRunningQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": 0,
"document": document_id,
"source_range": source_range,
"search_range": search_range,
"job_type": JobType.SINGLE_CHECK_JOB,
"config": self.kwargs
}, "Single job is running!")
ret = job_single_handler(index_id, task_id, document_id, search_range, document.body, **kwargs)
repetitive, result = ret[0], ret[1]
JobSuccessQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": 1,
"document": document_id,
"source_range": source_range,
"search_range": search_range,
"job_type": JobType.SINGLE_CHECK_JOB,
"config": self.kwargs,
"repetitive_rate": repetitive,
"document_result": result
}, "Single job finished successfully!")
class JobMultipleProduct(JobProduct):
""" 联合文档查重的JobProduct子类 """
total_doc_count = 0
finished_count = 0
def target(self, source_range, search_range, **kwargs):
"""
联合文档查重的执行函数
:param source_range: 被查重文档的范围,精确到task
:param search_range: 查重范围
:return:
"""
for index_id, tasks in source_range.items():
for task_id in tasks:
task_instance: BaseTask = BaseController().get_task(index_id, task_id)
self.total_doc_count += len(task_instance.docs)
self.source_range = source_range
self.search_range = search_range
res = job_multiple_handler(self.progress_callback, source_range, search_range, **kwargs)
JobSuccessQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": 1,
"source_range": source_range,
"search_range": search_range,
"job_type": JobType.MULTIPLE_CHECK_JOB,
"config": self.kwargs,
"result_summary": res[0],
"cluster_list": res[1]
}, "Multiple job finished successfully!")
def progress_callback(self, res):
"""
作为联合查重每个线程的回调函数,负责将任务进度记录发送给消息队列
:param res: 单个任务线程的返回值
:return:
"""
self.finished_count += 1
progress = self.finished_count / self.total_doc_count
result = res.result()
repetitive_rate = "%.4f" % result[0]
document_detail = result[3]
index, task, document = document_detail["index"], document_detail["task"], document_detail["document"]
progress_str = "Progress: %.2f" % (progress * 100) + "%\t"
detail_str = f"rep_rate: {repetitive_rate}\tindex: {index}\ttask: {task}\tdocument: {document}"
JobRunn | ingQueuePutter(self.id, self.name, self.queue, self.start_time).put({
"progress": progress,
"source_range": self.source_range,
"search_range": self.search_range,
"job_type": JobType.MULTIPLE_CHECK_JOB,
"config": self.kwargs
}, progress_str + detail_str)
|
|
facemask.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 15:56:47 2020
@author: Saksham
"""
| from keras.models import Sequential,load_model
from keras.optimizers import adam
from keras.preprocessing import image
import cv2
import datetime
# UNCOMMENT THE FOLLOWING CODE TO TRAIN THE CNN FROM SCRATCH
# BUILDING MODEL TO CLASSIFY BETWEEN MASK AND NO MASK
model=Sequential()
model.add(Conv2D(32,(3,3),activation='relu',input_shape=(150,150,3)))
model.add(MaxPooling2D() )
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(MaxPooling2D() )
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(MaxPooling2D() )
model.add(Flatten())
model.add(Dense(100,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'train',
target_size=(150,150),
batch_size=16 ,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'test',
target_size=(150,150),
batch_size=16,
class_mode='binary')
model_saved=model.fit_generator(
training_set,
epochs=10,
validation_data=test_set,
)
model.save('mymodel.h5',model_saved)
#To test for individual images
mymodel=load_model('mymodel.h5')
#test_image=image.load_img('C:/Users/saksham/Desktop/ML Datasets/Face Mask Detection/Dataset/test/without_mask/30.jpg',target_size=(150,150,3))
test_image=image.load_img(r'C:/Users/saksham/Desktop/FaceMaskDetector/test/with_mask/1-with-mask.jpg',
target_size=(150,150,3))
test_image
test_image=image.img_to_array(test_image)
test_image=np.expand_dims(test_image,axis=0)
mymodel.predict(test_image)[0][0]
# IMPLEMENTING LIVE DETECTION OF FACE MASK
mymodel=load_model('mymodel.h5')
cap=cv2.VideoCapture(0)
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while cap.isOpened():
_,img=cap.read()
face=face_cascade.detectMultiScale(img,scaleFactor=1.1,minNeighbors=4)
for(x,y,w,h) in face:
face_img = img[y:y+h, x:x+w]
cv2.imwrite('temp.jpg',face_img)
test_image=image.load_img('temp.jpg',target_size=(150,150,3))
test_image=image.img_to_array(test_image)
test_image=np.expand_dims(test_image,axis=0)
pred=mymodel.predict(test_image)[0][0]
if pred==1:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)
cv2.putText(img,'NO MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)
else:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)
cv2.putText(img,'MASK',((x+w)//2,y+h+20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),3)
datet=str(datetime.datetime.now())
cv2.putText(img,datet,(400,450),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),1)
cv2.imshow('img',img)
if cv2.waitKey(1)==ord('q'):
break
cap.release()
cv2.destroyAllWindows() | import numpy as np
import keras
import keras.backend as k
from keras.layers import Conv2D,MaxPooling2D,SpatialDropout2D,Flatten,Dropout,Dense |
getHTTPObject.js | function getHTTPObject() {
if (typeof XMLHttpRequest == "undefined") { // IE
XMLHttpRequest = function() {
try {
return new ActiveXObject("Msxml2.XMLHTTP.6.0");
} catch (e) {}
try {
return new ActiveXObject("Msxml2.XMLHTTP.3.0");
} catch (e) {}
try {
return new ActiveXObject("Msxml2.XMLHTTP");
} catch (e) {}
return false;
}
}
| return new XMLHttpRequest(); // other than IE
} |
|
crud.py | from . import models
import datetime
from discord import utils, TextChannel
def generate_id():
return utils.time_snowflake(datetime.datetime.now())
async def add_permanent_role(user_id: int, role_id: int):
await add_dbmember_if_not_exist(user_id)
if not await models.PermanentRole.query.where((models.PermanentRole.user_id == user_id) & (
models.PermanentRole.role_id == role_id)).gino.first():
return await models.PermanentRole.create(user_id=user_id, role_id=role_id)
async def remove_permanent_role(user_id: int, role_id: int):
permanent_role = await models.PermanentRole.query.where((models.PermanentRole.user_id == user_id) & (
models.PermanentRole.role_id == role_id)).gino.first()
if permanent_role:
await permanent_role.delete()
return permanent_role
async def get_permanent_roles(user_id: int):
db_member = await get_dbmember(user_id)
if db_member:
return await models.Role.query.where((models.Role.id == models.PermanentRole.role_id) & (models.PermanentRole.user_id == db_member.id)).gino.all()
async def add_staff(user_id: int, position: str):
await add_dbmember_if_not_exist(user_id)
staff = await get_staff(user_id) or await get_helper(user_id)
if staff:
await staff.update(position=position).apply()
else:
await models.Staff.create(id=user_id, position=position)
async def add_helper(user_id: int, position: str, console: str = None):
await add_dbmember_if_not_exist(user_id)
if staff := await get_staff(user_id):
await staff.update(console=console).apply()
else:
await models.Staff.create(id=user_id, position=position, console=console)
async def remove_staff(user_id: int):
staff = await get_staff(user_id)
if staff:
if staff.console:
await staff.update(position="Helper").apply()
else:
await staff.delete()
async def remove_helper(user_id: int):
helper = await get_helper(user_id)
if helper:
if helper.position != "Helper":
await helper.update(console=None).apply()
else:
await helper.delete()
async def get_staff_all():
return await models.Staff.query.where(models.Staff.position != 'Helper').gino.all()
async def get_staff(user_id: int):
return await models.Staff.query.where(
(models.Staff.position != 'Helper') & (models.Staff.id == user_id)).gino.first()
async def get_helpers():
return await models.Staff.query.where(models.Staff.console.isnot(None)).gino.all()
async def get_helper(user_id: int):
return await models.Staff.query.where(models.Staff.id == user_id).gino.first()
async def add_warn(user_id: int, issuer_id: int, reason: str):
await add_dbmember_if_not_exist(user_id)
await add_dbmember_if_not_exist(issuer_id)
await models.Warn.create(id=generate_id(), user=user_id, issuer=issuer_id, reason=reason)
async def copy_warn(user_id: int, warn: models.Warn): | warn.id = utils.time_snowflake(utils.snowflake_time(warn.id) + datetime.timedelta(milliseconds=1))
while await get_warn(warn.id):
warn.id = utils.time_snowflake(utils.snowflake_time(warn.id) + datetime.timedelta(milliseconds=1))
warn.user = user_id
await warn.create()
async def get_warn(warn_id: int):
return await models.Warn.get(warn_id)
async def get_warns(user_id: int):
return await models.Warn.query.where(models.Warn.user == user_id).gino.all()
async def remove_warn_id(user_id: int, index: int):
warn = await models.Warn.query.where(models.Warn.user == user_id).offset(index - 1).gino.first()
await warn.delete()
async def remove_warns(user_id: int):
n_warns = await (models.db.select([models.db.func.count()]).where(models.Warn.user == user_id).gino.scalar())
if n_warns:
await models.Warn.delete.where(models.Warn.user == user_id).gino.status()
return n_warns
async def add_timed_restriction(user_id: int, end_date: datetime.datetime, type: str):
await add_dbmember_if_not_exist(user_id)
await models.TimedRestriction.create(id=generate_id(), user=user_id, type=type,
end_date=end_date)
async def get_time_restrictions_by_user(user_id: int):
return await models.TimedRestriction.query.where(models.TimedRestriction.user == user_id).gino.all()
async def get_time_restrictions_by_user_type(user_id: int, type: str):
return await models.TimedRestriction.query.where((models.TimedRestriction.user == user_id) & (
models.TimedRestriction.type == type)).gino.first()
async def get_time_restrictions_by_type(type: str):
return await models.TimedRestriction.query.where(models.TimedRestriction.type == type).gino.all()
async def remove_timed_restriction(user_id: int, type: str):
time_restriction = await get_time_restrictions_by_user_type(user_id, type)
if time_restriction:
await time_restriction.delete()
async def set_time_restriction_alert(user_id: int, type: str):
time_restriction = await get_time_restrictions_by_user_type(user_id, type)
if time_restriction:
await time_restriction.update(alerted=True).apply()
async def add_timed_role(user_id: int, role_id: int, expiring_date: datetime.datetime):
await add_dbmember_if_not_exist(user_id)
entry = await get_time_role_by_user_type(user_id, role_id)
if not entry:
return await models.TimedRole.create(id=generate_id(), user_id=user_id, role_id=role_id, expiring_date=expiring_date)
await entry.update(expiring_date=expiring_date).apply()
return entry
async def remove_timed_role(user_id: int, role_id: int):
timed_role = await get_time_role_by_user_type(user_id, role_id)
if timed_role:
await timed_role.delete()
async def get_time_role_by_user_type(user_id: int, role_id: int):
return await models.TimedRole.query.where(
(models.TimedRole.user_id == user_id) & (models.TimedRole.role_id == role_id)).gino.first()
async def get_timed_roles():
return await models.TimedRole.query.gino.all()
async def add_flag(name: str):
await models.Flag.create(name=name)
async def get_flag(name: str):
if flag := await models.Flag.get(name):
return flag.value
return None
async def remove_flag(name: str):
flag = await get_flag(name)
if flag:
await flag.delete()
async def set_flag(name: str, value: bool):
flag = await get_flag(name)
if flag:
await flag.update(value=value).apply()
async def add_softban(user_id: int, issuer_id: int, reason: str):
await add_dbmember_if_not_exist(user_id)
await models.Softban.create(id=generate_id(), user=user_id, issuer=issuer_id, reason=reason)
async def remove_softban(user_id: int):
softban = await get_softban(user_id)
if softban:
await softban.delete()
async def add_dbmember(user_id: int):
return await models.Member.create(id=user_id)
async def add_dbmember_if_not_exist(user_id: int):
db_member = await get_dbmember(user_id)
if not db_member:
db_member = await add_dbmember(user_id)
return db_member
async def get_dbmember(user_id: int):
return await models.Member.get(user_id)
async def add_dbchannel(channel_id: int, name: str):
return await models.Channel.create(id=channel_id, name=name)
async def get_dbchannel(channel_id: int):
return await models.Channel.get(channel_id)
async def add_dbrole(role_id: int, name: str):
return await models.Role.create(id=role_id, name=name)
async def get_dbrole(role_id: int):
return await models.Role.get(role_id)
async def get_softban(user_id: int):
return await models.Softban.query.where(models.Softban.user == user_id).gino.first()
async def add_watch(user_id: int):
db_member = await add_dbmember_if_not_exist(user_id)
await db_member.update(watched=True).apply()
async def remove_watch(user_id: int):
db_member = await get_dbmember(user_id)
if db_member:
await db_member.update(watched=False).apply()
async def is_watched(user_id: int):
db_member = await get_dbmember(user_id)
return db_member.watched if db_member else False
async def add_nofilter(channel: TextChannel):
db_channel = await get_dbchannel(channel.id)
if not db_channel:
db_channel = await add_dbchannel(channel.id, channel.name)
await db_channel.update(nofilter=True).apply()
async def remove_nofilter(channel: TextChannel):
db_channel = await get_dbchannel(channel.id)
if db_channel:
await db_channel.update(nofilter=True).apply()
async def check_nofilter(channel: TextChannel):
channel = await models.Channel.get(channel.id)
return channel.nofilter if channel else False
async def add_friendcode_3ds(user_id: int, fc: int):
await add_dbmember_if_not_exist(user_id)
if fcs := await get_friendcode(user_id):
await fcs.update(fc_3ds=fc).apply()
return
await models.FriendCode.create(id=user_id, fc_3ds=fc)
async def add_friendcode_switch(user_id: int, fc: int):
await add_dbmember_if_not_exist(user_id)
if fcs := await get_friendcode(user_id):
await fcs.update(fc_switch=fc).apply()
return
await models.FriendCode.create(id=user_id, fc_switch=fc)
async def get_friendcode(user_id: int):
return await models.FriendCode.get(user_id)
async def delete_friendcode_3ds(user_id: int):
friendcodes = await get_friendcode(user_id)
if friendcodes:
await friendcodes.update(fc_3ds=None).apply()
if friendcodes.fc_3ds is None and friendcodes.fc_switch is None:
await friendcodes.delete()
async def delete_friendcode_switch(user_id: int):
friendcodes = await get_friendcode(user_id)
if friendcodes:
await friendcodes.update(fc_switch=None).apply()
if friendcodes.fc_3ds is None and friendcodes.fc_switch is None:
await friendcodes.delete()
async def add_rule(number: int, description: str):
rule = await get_rule(number)
if not rule:
await models.Rule.create(id=number, description=description)
async def edit_rule(number: int, description: str):
rule = await get_rule(number)
if rule:
await rule.update(description=description).apply()
async def delete_rule(number: int):
rule = await get_rule(number)
if rule:
await rule.delete()
async def get_rules():
return await models.Rule.query.order_by(models.Rule.id).gino.all()
async def get_rule(number: int):
return await models.Rule.get(number)
async def add_reminder(date: datetime.datetime, author: int, reminder: str):
await add_dbmember_if_not_exist(author)
await models.RemindMeEntry.create(id=generate_id(), date=date, author=author, reminder=reminder)
async def get_reminders() -> list[models.RemindMeEntry]:
return await models.RemindMeEntry.query.order_by(models.RemindMeEntry.date).gino.all()
async def remove_reminder(reminder_id: int):
db_reminder = await models.RemindMeEntry.get(reminder_id)
await db_reminder.delete()
async def create_tag(title: str, content: str, author: int):
await add_dbmember_if_not_exist(author)
await models.Tag.create(id=generate_id(), title=title, content=content, author=author)
async def get_tag(title: str) -> models.Tag:
return await models.Tag.query.where(models.Tag.title == title).gino.first()
async def get_tags() -> list[models.Tag]:
return await models.Tag.query.order_by(models.Tag.id).gino.all()
async def search_tags(query: str) -> list[models.Tag]:
return await models.Tag.query.where(models.Tag.title.ilike(f"%{query}%")).limit(10).gino.all()
async def delete_tag(title: str):
db_tag = await get_tag(title)
await db_tag.delete() | await add_dbmember_if_not_exist(user_id) |
memdb.go | package main
import (
"fmt"
"sync"
)
type inMemDb struct {
mu sync.RWMutex
// map a uid to their clips
data map[string][]Clip
}
func | () *inMemDb {
return &inMemDb{
data: make(map[string][]Clip),
}
}
func (m *inMemDb) GetUserClips(uid string) ([]Clip, error) {
m.mu.RLock()
clips, ok := m.data[uid]
m.mu.RUnlock()
if !ok {
return clips, fmt.Errorf("uid does not exist")
}
return clips, nil
}
func (m *inMemDb) GetUserClipsSince(userID string, timestamp int64) ([]Clip, error) {
m.mu.RLock()
defer m.mu.RUnlock()
clipsForUser, ok := m.data[userID]
if !ok {
return clipsForUser, fmt.Errorf("uid does not exist")
}
var clipsSinceTimestamp = make([]Clip, 0)
for _, clip := range clipsForUser {
if clip.Timestamp >= timestamp {
clipsSinceTimestamp = append(clipsSinceTimestamp, clip)
}
}
return clipsSinceTimestamp, nil
}
func (m *inMemDb) InsertUserClip(uid string, clip Clip) error {
m.mu.Lock()
m.data[uid] = append(m.data[uid], clip)
m.mu.Unlock()
return nil
}
| newInMemDB |
blocks_api.go | package server
import (
"bytes"
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/golang/protobuf/ptypes/wrappers"
g "github.com/wavesplatform/gowaves/pkg/grpc/generated"
"github.com/wavesplatform/gowaves/pkg/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func (s *Server) headerByHeight(height proto.Height) (*g.BlockWithHeight, error) {
header, err := s.state.HeaderByHeight(height)
if err != nil {
return nil, status.Errorf(codes.NotFound, err.Error())
}
res, err := header.HeaderToProtobufWithHeight(s.scheme, height)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
return res, nil
}
func (s *Server) blockByHeight(height proto.Height) (*g.BlockWithHeight, error) {
block, err := s.state.BlockByHeight(height)
if err != nil {
return nil, status.Errorf(codes.NotFound, err.Error())
}
res, err := block.ToProtobufWithHeight(s.scheme, height)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
return res, nil
}
func (s *Server) headerOrBlockByHeight(height proto.Height, includeTransactions bool) (*g.BlockWithHeight, error) {
if includeTransactions {
return s.blockByHeight(height)
}
return s.headerByHeight(height)
}
func (s *Server) GetBlock(ctx context.Context, req *g.BlockRequest) (*g.BlockWithHeight, error) {
switch r := req.Request.(type) {
case *g.BlockRequest_BlockId:
id, err := proto.NewBlockIDFromBytes(r.BlockId)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
blockHeight, err := s.state.BlockIDToHeight(id)
if err != nil {
return nil, status.Errorf(codes.NotFound, err.Error())
}
return s.headerOrBlockByHeight(blockHeight, req.IncludeTransactions)
case *g.BlockRequest_Height:
return s.headerOrBlockByHeight(proto.Height(r.Height), req.IncludeTransactions)
case *g.BlockRequest_Reference:
id, err := proto.NewBlockIDFromBytes(r.Reference)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
parentHeight, err := s.state.BlockIDToHeight(id)
if err != nil {
return nil, status.Errorf(codes.NotFound, err.Error())
}
blockHeight := parentHeight + 1
return s.headerOrBlockByHeight(blockHeight, req.IncludeTransactions)
default:
return nil, status.Errorf(codes.InvalidArgument, "Unknown argument type")
}
}
func (s *Server) GetBlockRange(req *g.BlockRangeRequest, srv g.BlocksApi_GetBlockRangeServer) error { | if err != nil {
return status.Errorf(codes.NotFound, err.Error())
}
if hasFilter && !bytes.Equal(block.Block.Header.Generator, generator) {
continue
}
if err := srv.Send(block); err != nil {
return status.Errorf(codes.Internal, err.Error())
}
}
return nil
}
func (s *Server) GetCurrentHeight(ctx context.Context, req *empty.Empty) (*wrappers.UInt32Value, error) {
height, err := s.state.Height()
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
return &wrappers.UInt32Value{Value: uint32(height)}, nil
} | generator := req.GetGenerator()
hasFilter := generator != nil
for height := proto.Height(req.FromHeight); height <= proto.Height(req.ToHeight); height++ {
block, err := s.headerOrBlockByHeight(height, req.IncludeTransactions) |
credentials_test.go | // Copyright (c) 2018 Yandex LLC. All rights reserved.
// Author: Vladimir Skipor <[email protected]>
package ycsdk
import (
"context"
"crypto"
"crypto/rsa"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
jwt "github.com/golang-jwt/jwt/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/yandex-cloud/go-sdk/iamkey"
)
func TestOAuthToken(t *testing.T) {
const token = "AAAA00000000000000000000000000000000000"
creds := OAuthToken(token)
iamTokenReq, err := creds.(ExchangeableCredentials).IAMTokenRequest()
require.NoError(t, err)
assert.Equal(t, token, iamTokenReq.GetYandexPassportOauthToken())
}
func TestIAMToken(t *testing.T) {
const iamToken = "this-is-iam-token"
creds := NewIAMTokenCredentials(iamToken)
iamTokenResp, err := creds.IAMToken(context.Background())
require.NoError(t, err)
assert.Equal(t, iamToken, iamTokenResp.GetIamToken())
}
func TestServiceAccountKey(t *testing.T) {
key := testKey(t)
creds, err := ServiceAccountKey(key)
require.NoError(t, err)
iamTokenReq, err := creds.(ExchangeableCredentials).IAMTokenRequest()
require.NoError(t, err)
require.NotEmpty(t, iamTokenReq.GetJwt())
parser := jwt.Parser{}
jot, parts, err := parser.ParseUnverified(iamTokenReq.GetJwt(), &jwt.RegisteredClaims{})
require.NoError(t, err)
publicKey, err := jwt.ParseRSAPublicKeyFromPEM([]byte(key.PublicKey))
require.NoError(t, err)
// Force salt length: https://github.com/dgrijalva/jwt-go/issues/285
method := &jwt.SigningMethodRSAPSS{
SigningMethodRSA: jwt.SigningMethodPS256.SigningMethodRSA,
Options: &rsa.PSSOptions{
Hash: crypto.SHA256,
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
}
err = method.Verify(strings.Join(parts[:2], "."), parts[2], publicKey)
require.NoError(t, err, "token verification failed")
claims := jot.Claims.(*jwt.RegisteredClaims)
assert.Equal(t, key.Id, jot.Header["kid"])
assert.Equal(t, key.GetServiceAccountId(), claims.Issuer)
assert.Contains(t, claims.Audience, "https://iam.api.cloud.yandex.net/iam/v1/tokens")
issuedAt := claims.IssuedAt.Time
sinceIssued := time.Since(issuedAt)
assert.True(t, sinceIssued > 0)
assert.True(t, sinceIssued < time.Minute)
assert.Equal(t, time.Hour, claims.ExpiresAt.Sub(issuedAt))
}
func TestInstanceServiceAccount(t *testing.T) {
t.Run("success", func(t *testing.T) {
const token = "AAAAAAAAAAAAAAAAAAAAAAAA"
const expiresIn = 43167
server := httptest.NewServer(http.HandlerFunc(
func(rw http.ResponseWriter, req *http.Request) {
_, err := io.WriteString(rw, fmt.Sprintf(`{
"access_token": %q,
"expires_in": %v,
"token_type":"Bearer"
}`, token, expiresIn))
assert.NoError(t, err)
}))
defer server.Close()
creds := newInstanceServiceAccountCredentials(server.Listener.Addr().String())
iamToken, err := creds.IAMToken(context.Background())
require.NoError(t, err)
assert.Equal(t, token, iamToken.IamToken)
expectedExpiresAt := time.Now().Add(expiresIn * time.Second)
actualExpiresAt, err := iamToken.ExpiresAt.AsTime(), iamToken.ExpiresAt.CheckValid()
require.NoError(t, err)
assert.True(t, expectedExpiresAt.After(actualExpiresAt))
assert.True(t, expectedExpiresAt.Add(-10*time.Second).Before(actualExpiresAt))
})
t.Run("internal error", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(
func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusInternalServerError)
_, err := io.WriteString(rw, "ERRRORRRRR")
assert.NoError(t, err)
}))
defer server.Close()
creds := newInstanceServiceAccountCredentials(server.Listener.Addr().String())
_, err := creds.IAMToken(context.Background())
require.Error(t, err)
t.Log(err)
})
}
func testKey(t *testing.T) *iamkey.Key | {
key, err := iamkey.ReadFromJSONFile("test_data/service_account_key.json")
require.NoError(t, err)
return key
} |
|
daemonset.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
v1beta2 "k8s.io/api/apps/v1beta2"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "github.com/hyperhq/client-go/kubernetes/scheme"
rest "github.com/hyperhq/client-go/rest"
)
// DaemonSetsGetter has a method to return a DaemonSetInterface.
// A group's client should implement this interface.
type DaemonSetsGetter interface {
DaemonSets(namespace string) DaemonSetInterface
}
// DaemonSetInterface has methods to work with DaemonSet resources.
type DaemonSetInterface interface {
Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error)
List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error)
DaemonSetExpansion
}
// daemonSets implements DaemonSetInterface
type daemonSets struct {
client rest.Interface
ns string
}
// newDaemonSets returns a DaemonSets
func | (c *AppsV1beta2Client, namespace string) *daemonSets {
return &daemonSets{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
result = &v1beta2.DaemonSet{}
err = c.client.Get().
Namespace(c.ns).
Resource("daemonsets").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
result = &v1beta2.DaemonSetList{}
err = c.client.Get().
Namespace(c.ns).
Resource("daemonsets").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested daemonSets.
func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("daemonsets").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
result = &v1beta2.DaemonSet{}
err = c.client.Post().
Namespace(c.ns).
Resource("daemonsets").
Body(daemonSet).
Do().
Into(result)
return
}
// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
result = &v1beta2.DaemonSet{}
err = c.client.Put().
Namespace(c.ns).
Resource("daemonsets").
Name(daemonSet.Name).
Body(daemonSet).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
result = &v1beta2.DaemonSet{}
err = c.client.Put().
Namespace(c.ns).
Resource("daemonsets").
Name(daemonSet.Name).
SubResource("status").
Body(daemonSet).
Do().
Into(result)
return
}
// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("daemonsets").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("daemonsets").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched daemonSet.
func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) {
result = &v1beta2.DaemonSet{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("daemonsets").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| newDaemonSets |
api.go | package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"runtime"
"sort"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/swarm/cluster"
"github.com/docker/swarm/scheduler"
"github.com/gorilla/mux"
"github.com/samalba/dockerclient"
)
type context struct {
cluster *cluster.Cluster
scheduler *scheduler.Scheduler
eventsHandler *eventsHandler
debug bool
version string
}
type handler func(c *context, w http.ResponseWriter, r *http.Request)
// GET /info
func getInfo(c *context, w http.ResponseWriter, r *http.Request) {
nodes := c.cluster.Nodes()
driverStatus := [][2]string{{"\bNodes", fmt.Sprintf("%d", len(nodes))}}
for _, node := range nodes {
driverStatus = append(driverStatus, [2]string{node.Name, node.Addr})
}
info := struct {
Containers int
DriverStatus [][2]string
NEventsListener int
Debug bool
}{
len(c.cluster.Containers()),
driverStatus,
c.eventsHandler.Size(),
c.debug,
}
json.NewEncoder(w).Encode(info)
}
// GET /version
func getVersion(c *context, w http.ResponseWriter, r *http.Request) {
version := struct {
Version string
GoVersion string
GitCommit string
}{
Version: "swarm/" + c.version,
GoVersion: runtime.Version(),
GitCommit: "swarm",
}
json.NewEncoder(w).Encode(version)
}
// GET /containers/ps
// GET /containers/json
func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
all := r.Form.Get("all") == "1"
out := []*dockerclient.Container{}
for _, container := range c.cluster.Containers() {
tmp := (*container).Container
// Skip stopped containers unless -a was specified.
if !strings.Contains(tmp.Status, "Up") && !all {
continue
}
if !container.Node().IsHealthy() {
tmp.Status = "Pending"
}
// TODO remove the Node ID in the name when we have a good solution
tmp.Names = make([]string, len(container.Names))
for i, name := range container.Names {
tmp.Names[i] = "/" + container.Node().Name + name
}
tmp.Ports = make([]dockerclient.Port, len(container.Ports))
for i, port := range container.Ports {
tmp.Ports[i] = port
if port.IP == "0.0.0.0" {
tmp.Ports[i].IP = container.Node().IP
}
}
out = append(out, &tmp)
}
sort.Sort(sort.Reverse(ContainerSorter(out)))
json.NewEncoder(w).Encode(out)
}
// GET /containers/{name:.*}/json
func getContainerJSON(c *context, w http.ResponseWriter, r *http.Request) {
container := c.cluster.Container(mux.Vars(r)["name"])
if container != nil {
resp, err := http.Get(container.Node().Addr + "/containers/" + container.Id + "/json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(bytes.Replace(data, []byte("\"HostIp\":\"0.0.0.0\""), []byte(fmt.Sprintf("\"HostIp\":%q", container.Node().IP)), -1))
}
}
// POST /containers/create
func postContainersCreate(c *context, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
var (
config dockerclient.ContainerConfig
name = r.Form.Get("name")
)
if err := json.NewDecoder(r.Body).Decode(&config); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if config.AttachStdout || config.AttachStdin || config.AttachStderr {
http.Error(w, "Attach is not supported in clustering mode, use -d.", http.StatusInternalServerError)
return
}
if container := c.cluster.Container(name); container != nil {
http.Error(w, fmt.Sprintf("Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", name, container.Id, name), http.StatusConflict)
return
}
container, err := c.scheduler.CreateContainer(&config, name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError) | }
// DELETE /containers/{name:.*}
func deleteContainer(c *context, w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
name := mux.Vars(r)["name"]
force := r.Form.Get("force") == "1"
container := c.cluster.Container(name)
if container == nil {
http.Error(w, fmt.Sprintf("Container %s not found", name), http.StatusNotFound)
return
}
if err := c.scheduler.RemoveContainer(container, force); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// GET /events
func getEvents(c *context, w http.ResponseWriter, r *http.Request) {
c.eventsHandler.Add(r.RemoteAddr, w)
w.Header().Set("Content-Type", "application/json")
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
c.eventsHandler.Wait(r.RemoteAddr)
}
// GET /_ping
func ping(c *context, w http.ResponseWriter, r *http.Request) {
w.Write([]byte{'O', 'K'})
}
// Proxy a request to the right node
func proxyContainer(c *context, w http.ResponseWriter, r *http.Request) {
container := c.cluster.Container(mux.Vars(r)["name"])
if container != nil {
// Use a new client for each request
client := &http.Client{}
// RequestURI may not be sent to client
r.RequestURI = ""
parts := strings.SplitN(container.Node().Addr, "://", 2)
if len(parts) == 2 {
r.URL.Scheme = parts[0]
r.URL.Host = parts[1]
} else {
r.URL.Scheme = "http"
r.URL.Host = parts[0]
}
log.Debugf("[PROXY] --> %s %s", r.Method, r.URL)
resp, err := client.Do(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.Copy(w, resp.Body)
}
}
// Default handler for methods not supported by clustering.
func notImplementedHandler(c *context, w http.ResponseWriter, r *http.Request) {
http.Error(w, "Not supported in clustering mode.", http.StatusNotImplemented)
}
func optionsHandler(c *context, w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func writeCorsHeaders(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Access-Control-Allow-Origin", "*")
w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
}
func createRouter(c *context, enableCors bool) (*mux.Router, error) {
r := mux.NewRouter()
m := map[string]map[string]handler{
"GET": {
"/_ping": ping,
"/events": getEvents,
"/info": getInfo,
"/version": getVersion,
"/images/json": notImplementedHandler,
"/images/viz": notImplementedHandler,
"/images/search": notImplementedHandler,
"/images/get": notImplementedHandler,
"/images/{name:.*}/get": notImplementedHandler,
"/images/{name:.*}/history": notImplementedHandler,
"/images/{name:.*}/json": notImplementedHandler,
"/containers/ps": getContainersJSON,
"/containers/json": getContainersJSON,
"/containers/{name:.*}/export": proxyContainer,
"/containers/{name:.*}/changes": proxyContainer,
"/containers/{name:.*}/json": getContainerJSON,
"/containers/{name:.*}/top": proxyContainer,
"/containers/{name:.*}/logs": proxyContainer,
"/containers/{name:.*}/attach/ws": notImplementedHandler,
"/exec/{id:.*}/json": proxyContainer,
},
"POST": {
"/auth": notImplementedHandler,
"/commit": notImplementedHandler,
"/build": notImplementedHandler,
"/images/create": notImplementedHandler,
"/images/load": notImplementedHandler,
"/images/{name:.*}/push": notImplementedHandler,
"/images/{name:.*}/tag": notImplementedHandler,
"/containers/create": postContainersCreate,
"/containers/{name:.*}/kill": proxyContainer,
"/containers/{name:.*}/pause": proxyContainer,
"/containers/{name:.*}/unpause": proxyContainer,
"/containers/{name:.*}/restart": proxyContainer,
"/containers/{name:.*}/start": proxyContainer,
"/containers/{name:.*}/stop": proxyContainer,
"/containers/{name:.*}/wait": proxyContainer,
"/containers/{name:.*}/resize": proxyContainer,
"/containers/{name:.*}/attach": notImplementedHandler,
"/containers/{name:.*}/copy": notImplementedHandler,
"/containers/{name:.*}/exec": notImplementedHandler,
"/exec/{name:.*}/start": notImplementedHandler,
"/exec/{name:.*}/resize": proxyContainer,
},
"DELETE": {
"/containers/{name:.*}": deleteContainer,
"/images/{name:.*}": notImplementedHandler,
},
"OPTIONS": {
"": optionsHandler,
},
}
for method, routes := range m {
for route, fct := range routes {
log.Debugf("Registering %s, %s", method, route)
// NOTE: scope issue, make sure the variables are local and won't be changed
localRoute := route
localFct := fct
wrap := func(w http.ResponseWriter, r *http.Request) {
log.Infof("%s %s", r.Method, r.RequestURI)
if enableCors {
writeCorsHeaders(w, r)
}
localFct(c, w, r)
}
localMethod := method
// add the new route
r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(wrap)
r.Path(localRoute).Methods(localMethod).HandlerFunc(wrap)
}
}
return r, nil
} | return
}
fmt.Fprintf(w, "{%q:%q}", "Id", container.Id)
return |
portal.commands.ts | /*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { RequestInfo, RequestInfoHolder } from '../model/technical';
import { ApiPortalCommands } from './portal/apis.portal.commands';
export class | extends RequestInfoHolder {
constructor(requestInfo: RequestInfo) {
super(requestInfo);
}
apis(): ApiPortalCommands {
return new ApiPortalCommands(this.requestInfo);
}
}
| PortalCommands |
mod.ts | export function partTwo(numbers: number[]) {
const rightNumbers = numbers.map((number) =>
numbers.map((other) =>
numbers.filter((another) => number + other + another === 2020)
)
);
const realRightNumbers = new Set(
rightNumbers.flatMap((numbers) => numbers.flatMap((nested) => nested)), | );
return [...realRightNumbers].reduce((acc, number) => acc * number, 1);
} |
|
keys_proto.rs | // This file is generated by rust-protobuf 2.3.0. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(PartialEq,Clone,Default)]
pub struct PublicKey {
// message fields
Type: ::std::option::Option<KeyType>,
Data: ::protobuf::SingularField<::std::vec::Vec<u8>>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl PublicKey {
pub fn new() -> PublicKey {
::std::default::Default::default()
}
// required .KeyType Type = 1;
pub fn clear_Type(&mut self) {
self.Type = ::std::option::Option::None;
}
pub fn has_Type(&self) -> bool {
self.Type.is_some()
}
// Param is passed by value, moved
pub fn set_Type(&mut self, v: KeyType) {
self.Type = ::std::option::Option::Some(v);
}
pub fn get_Type(&self) -> KeyType {
self.Type.unwrap_or(KeyType::RSA)
}
// required bytes Data = 2;
pub fn clear_Data(&mut self) {
self.Data.clear();
}
pub fn has_Data(&self) -> bool {
self.Data.is_some()
}
// Param is passed by value, moved
pub fn set_Data(&mut self, v: ::std::vec::Vec<u8>) {
self.Data = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_Data(&mut self) -> &mut ::std::vec::Vec<u8> {
if self.Data.is_none() {
self.Data.set_default();
}
self.Data.as_mut().unwrap()
}
// Take field
pub fn take_Data(&mut self) -> ::std::vec::Vec<u8> {
self.Data.take().unwrap_or_else(|| ::std::vec::Vec::new())
}
pub fn | (&self) -> &[u8] {
match self.Data.as_ref() {
Some(v) => &v,
None => &[],
}
}
}
impl ::protobuf::Message for PublicKey {
fn is_initialized(&self) -> bool {
if self.Type.is_none() {
return false;
}
if self.Data.is_none() {
return false;
}
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto2_enum_with_unknown_fields_into(wire_type, is, &mut self.Type, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_bytes_into(wire_type, is, &mut self.Data)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(v) = self.Type {
my_size += ::protobuf::rt::enum_size(1, v);
}
if let Some(ref v) = self.Data.as_ref() {
my_size += ::protobuf::rt::bytes_size(2, &v);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.Type {
os.write_enum(1, v.value())?;
}
if let Some(ref v) = self.Data.as_ref() {
os.write_bytes(2, &v)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> PublicKey {
PublicKey::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeEnum<KeyType>>(
"Type",
|m: &PublicKey| { &m.Type },
|m: &mut PublicKey| { &mut m.Type },
));
fields.push(::protobuf::reflect::accessor::make_singular_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"Data",
|m: &PublicKey| { &m.Data },
|m: &mut PublicKey| { &mut m.Data },
));
::protobuf::reflect::MessageDescriptor::new::<PublicKey>(
"PublicKey",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static PublicKey {
static mut instance: ::protobuf::lazy::Lazy<PublicKey> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const PublicKey,
};
unsafe {
instance.get(PublicKey::new)
}
}
}
impl ::protobuf::Clear for PublicKey {
fn clear(&mut self) {
self.clear_Type();
self.clear_Data();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for PublicKey {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for PublicKey {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef<'_> {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct PrivateKey {
// message fields
Type: ::std::option::Option<KeyType>,
Data: ::protobuf::SingularField<::std::vec::Vec<u8>>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl PrivateKey {
pub fn new() -> PrivateKey {
::std::default::Default::default()
}
// required .KeyType Type = 1;
pub fn clear_Type(&mut self) {
self.Type = ::std::option::Option::None;
}
pub fn has_Type(&self) -> bool {
self.Type.is_some()
}
// Param is passed by value, moved
pub fn set_Type(&mut self, v: KeyType) {
self.Type = ::std::option::Option::Some(v);
}
pub fn get_Type(&self) -> KeyType {
self.Type.unwrap_or(KeyType::RSA)
}
// required bytes Data = 2;
pub fn clear_Data(&mut self) {
self.Data.clear();
}
pub fn has_Data(&self) -> bool {
self.Data.is_some()
}
// Param is passed by value, moved
pub fn set_Data(&mut self, v: ::std::vec::Vec<u8>) {
self.Data = ::protobuf::SingularField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_Data(&mut self) -> &mut ::std::vec::Vec<u8> {
if self.Data.is_none() {
self.Data.set_default();
}
self.Data.as_mut().unwrap()
}
// Take field
pub fn take_Data(&mut self) -> ::std::vec::Vec<u8> {
self.Data.take().unwrap_or_else(|| ::std::vec::Vec::new())
}
pub fn get_Data(&self) -> &[u8] {
match self.Data.as_ref() {
Some(v) => &v,
None => &[],
}
}
}
impl ::protobuf::Message for PrivateKey {
fn is_initialized(&self) -> bool {
if self.Type.is_none() {
return false;
}
if self.Data.is_none() {
return false;
}
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto2_enum_with_unknown_fields_into(wire_type, is, &mut self.Type, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_bytes_into(wire_type, is, &mut self.Data)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(v) = self.Type {
my_size += ::protobuf::rt::enum_size(1, v);
}
if let Some(ref v) = self.Data.as_ref() {
my_size += ::protobuf::rt::bytes_size(2, &v);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(v) = self.Type {
os.write_enum(1, v.value())?;
}
if let Some(ref v) = self.Data.as_ref() {
os.write_bytes(2, &v)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> PrivateKey {
PrivateKey::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_option_accessor::<_, ::protobuf::types::ProtobufTypeEnum<KeyType>>(
"Type",
|m: &PrivateKey| { &m.Type },
|m: &mut PrivateKey| { &mut m.Type },
));
fields.push(::protobuf::reflect::accessor::make_singular_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"Data",
|m: &PrivateKey| { &m.Data },
|m: &mut PrivateKey| { &mut m.Data },
));
::protobuf::reflect::MessageDescriptor::new::<PrivateKey>(
"PrivateKey",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static PrivateKey {
static mut instance: ::protobuf::lazy::Lazy<PrivateKey> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const PrivateKey,
};
unsafe {
instance.get(PrivateKey::new)
}
}
}
impl ::protobuf::Clear for PrivateKey {
fn clear(&mut self) {
self.clear_Type();
self.clear_Data();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for PrivateKey {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for PrivateKey {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef<'_> {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum KeyType {
RSA = 0,
Ed25519 = 1,
Secp256k1 = 2,
}
impl ::protobuf::ProtobufEnum for KeyType {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<KeyType> {
match value {
0 => ::std::option::Option::Some(KeyType::RSA),
1 => ::std::option::Option::Some(KeyType::Ed25519),
2 => ::std::option::Option::Some(KeyType::Secp256k1),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [KeyType] = &[
KeyType::RSA,
KeyType::Ed25519,
KeyType::Secp256k1,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::EnumDescriptor,
};
unsafe {
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new("KeyType", file_descriptor_proto())
})
}
}
}
impl ::std::marker::Copy for KeyType {
}
impl ::std::default::Default for KeyType {
fn default() -> Self {
KeyType::RSA
}
}
impl ::protobuf::reflect::ProtobufValue for KeyType {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef<'_> {
::protobuf::reflect::ProtobufValueRef::Enum(self.descriptor())
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\nkeys.proto\"=\n\tPublicKey\x12\x1c\n\x04Type\x18\x01\x20\x02(\x0e2\
\x08.KeyTypeR\x04type\x12\x12\n\x04Data\x18\x02\x20\x02(\x0cR\x04data\">\
\n\nPrivateKey\x12\x1c\n\x04Type\x18\x01\x20\x02(\x0e2\x08.KeyTypeR\x04t\
ype\x12\x12\n\x04Data\x18\x02\x20\x02(\x0cR\x04data*.\n\x07KeyType\x12\
\x07\n\x03RSA\x10\0\x12\x0b\n\x07Ed25519\x10\x01\x12\r\n\tSecp256k1\x10\
\x02J\xdf\x03\n\x06\x12\x04\0\0\x0e\x01\n\n\n\x02\x05\0\x12\x04\0\0\x04\
\x01\n\n\n\x03\x05\0\x01\x12\x03\0\x05\x0c\n\x0b\n\x04\x05\0\x02\0\x12\
\x03\x01\x02\n\n\x0c\n\x05\x05\0\x02\0\x01\x12\x03\x01\x02\x05\n\x0c\n\
\x05\x05\0\x02\0\x02\x12\x03\x01\x08\t\n\x0b\n\x04\x05\0\x02\x01\x12\x03\
\x02\x02\x0e\n\x0c\n\x05\x05\0\x02\x01\x01\x12\x03\x02\x02\t\n\x0c\n\x05\
\x05\0\x02\x01\x02\x12\x03\x02\x0c\r\n\x0b\n\x04\x05\0\x02\x02\x12\x03\
\x03\x02\x10\n\x0c\n\x05\x05\0\x02\x02\x01\x12\x03\x03\x02\x0b\n\x0c\n\
\x05\x05\0\x02\x02\x02\x12\x03\x03\x0e\x0f\n\n\n\x02\x04\0\x12\x04\x06\0\
\t\x01\n\n\n\x03\x04\0\x01\x12\x03\x06\x08\x11\n\x0b\n\x04\x04\0\x02\0\
\x12\x03\x07\x02\x1c\n\x0c\n\x05\x04\0\x02\0\x04\x12\x03\x07\x02\n\n\x0c\
\n\x05\x04\0\x02\0\x06\x12\x03\x07\x0b\x12\n\x0c\n\x05\x04\0\x02\0\x01\
\x12\x03\x07\x13\x17\n\x0c\n\x05\x04\0\x02\0\x03\x12\x03\x07\x1a\x1b\n\
\x0b\n\x04\x04\0\x02\x01\x12\x03\x08\x02\x1a\n\x0c\n\x05\x04\0\x02\x01\
\x04\x12\x03\x08\x02\n\n\x0c\n\x05\x04\0\x02\x01\x05\x12\x03\x08\x0b\x10\
\n\x0c\n\x05\x04\0\x02\x01\x01\x12\x03\x08\x11\x15\n\x0c\n\x05\x04\0\x02\
\x01\x03\x12\x03\x08\x18\x19\n\n\n\x02\x04\x01\x12\x04\x0b\0\x0e\x01\n\n\
\n\x03\x04\x01\x01\x12\x03\x0b\x08\x12\n\x0b\n\x04\x04\x01\x02\0\x12\x03\
\x0c\x02\x1c\n\x0c\n\x05\x04\x01\x02\0\x04\x12\x03\x0c\x02\n\n\x0c\n\x05\
\x04\x01\x02\0\x06\x12\x03\x0c\x0b\x12\n\x0c\n\x05\x04\x01\x02\0\x01\x12\
\x03\x0c\x13\x17\n\x0c\n\x05\x04\x01\x02\0\x03\x12\x03\x0c\x1a\x1b\n\x0b\
\n\x04\x04\x01\x02\x01\x12\x03\r\x02\x1a\n\x0c\n\x05\x04\x01\x02\x01\x04\
\x12\x03\r\x02\n\n\x0c\n\x05\x04\x01\x02\x01\x05\x12\x03\r\x0b\x10\n\x0c\
\n\x05\x04\x01\x02\x01\x01\x12\x03\r\x11\x15\n\x0c\n\x05\x04\x01\x02\x01\
\x03\x12\x03\r\x18\x19\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}
| get_Data |
lambda_function.py | import uuid
from os import getenv
from boto3utils import s3
from cirruslib import Catalog, get_task_logger
# envvars
CATALOG_BUCKET = getenv('CIRRUS_CATALOG_BUCKET')
def | (payload, context):
catalog = Catalog.from_payload(payload)
logger = get_task_logger("task.pre-batch", catalog=catalog)
url = f"s3://{CATALOG_BUCKET}/batch/{catalog['id']}/{uuid.uuid1()}.json"
try:
# copy payload to s3
s3().upload_json(catalog, url)
logger.debug(f"Uploaded catalog to {url}")
return {
'url': url
}
except Exception as err:
msg = f"pre-batch: failed pre processing batch job for ({err})"
logger.error(msg, exc_info=True)
raise Exception(msg) from err
| lambda_handler |
multicast.py | """Multicast transport for stomp.py.
Obviously not a typical message broker, but convenient if you don't have a broker, but still want to use stomp.py
methods.
"""
import struct
from stomp.connect import BaseConnection
from stomp.protocol import *
from stomp.transport import *
from stomp.utils import *
MCAST_GRP = '224.1.1.1'
MCAST_PORT = 5000
class MulticastTransport(Transport):
"""
Transport over multicast connections rather than using a broker.
"""
def __init__(self, encoding):
Transport.__init__(self, [], False, False, 0.0, 0.0, 0.0, 0.0, 0, False, None, None, None, None, False,
DEFAULT_SSL_VERSION, None, None, None, encoding)
self.subscriptions = {}
self.current_host_and_port = (MCAST_GRP, MCAST_PORT)
def attempt_connection(self):
"""
Establish a multicast connection - uses 2 sockets (one for sending, the other for receiving)
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
self.receiver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.receiver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.receiver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.receiver_socket.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
self.receiver_socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
if not self.socket or not self.receiver_socket:
raise exception.ConnectFailedException()
def send(self, encoded_frame):
"""
Send an encoded frame through the mcast socket.
:param bytes encoded_frame:
"""
self.socket.sendto(encoded_frame, (MCAST_GRP, MCAST_PORT))
def receive(self):
"""
Receive 1024 bytes from the multicast receiver socket.
:rtype: bytes
"""
return self.receiver_socket.recv(1024)
def process_frame(self, f, frame_str):
"""
:param Frame f: Frame object
:param bytes frame_str: Raw frame content
"""
frame_type = f.cmd.lower()
if frame_type in ['disconnect']:
return
if frame_type == 'send':
frame_type = 'message'
f.cmd = 'MESSAGE'
if frame_type in ['connected', 'message', 'receipt', 'error', 'heartbeat']:
if frame_type == 'message':
if f.headers['destination'] not in self.subscriptions.values():
return
(f.headers, f.body) = self.notify('before_message', f.headers, f.body)
self.notify(frame_type, f.headers, f.body)
if 'receipt' in f.headers:
receipt_frame = Frame('RECEIPT', {'receipt-id': f.headers['receipt']})
lines = convert_frame(receipt_frame)
self.send(encode(pack(lines)))
logging.debug("Received frame: %r, headers=%r, body=%r", f.cmd, f.headers, f.body)
def stop(self):
self.running = False
if hasattr(self.receiver_socket, 'SHUT_RDWR'):
self.receiver_socket.shutdown(socket.SHUT_RDWR)
self.receiver_socket.close()
self.disconnect_socket()
Transport.stop(self)
class MulticastConnection(BaseConnection, Protocol12):
def __init__(self, wait_on_receipt=False, encoding='utf-8'):
"""
:param bool wait_on_receipt: deprecated, ignored
"""
self.transport = MulticastTransport(encoding)
self.transport.set_listener('mcast-listener', self)
self.transactions = {}
Protocol12.__init__(self, self.transport, (0, 0))
def connect(self, username=None, passcode=None, wait=False, headers=None, **keyword_headers):
"""
:param str username:
:param str passcode:
:param bool wait:
:param dict headers:
:param keyword_headers:
"""
self.transport.start()
def subscribe(self, destination, id, ack='auto', headers=None, **keyword_headers):
"""
:param str destination:
:param str id:
:param str ack:
:param dict headers:
:param keyword_headers:
"""
self.transport.subscriptions[id] = destination
def unsubscribe(self, id, headers=None, **keyword_headers):
"""
:param str id:
:param dict headers:
:param keyword_headers:
"""
del self.transport.subscriptions[id]
def disconnect(self, receipt=None, headers=None, **keyword_headers):
"""
:param str receipt:
:param dict headers:
:param keyword_headers:
"""
Protocol12.disconnect(self, receipt, headers, **keyword_headers)
self.transport.stop()
def send_frame(self, cmd, headers=None, body=''):
"""
:param str cmd:
:param dict headers:
:param body:
"""
if headers is None: | headers = {}
frame = utils.Frame(cmd, headers, body)
if cmd == CMD_BEGIN:
trans = headers[HDR_TRANSACTION]
if trans in self.transactions:
self.notify('error', {}, 'Transaction %s already started' % trans)
else:
self.transactions[trans] = []
elif cmd == CMD_COMMIT:
trans = headers[HDR_TRANSACTION]
if trans not in self.transactions:
self.notify('error', {}, 'Transaction %s not started' % trans)
else:
for f in self.transactions[trans]:
self.transport.transmit(f)
del self.transactions[trans]
elif cmd == CMD_ABORT:
trans = headers['transaction']
del self.transactions[trans]
else:
if 'transaction' in headers:
trans = headers['transaction']
if trans not in self.transactions:
self.transport.notify('error', {}, 'Transaction %s not started' % trans)
return
else:
self.transactions[trans].append(frame)
else:
self.transport.transmit(frame) | |
expansion.component.ts | import { Component } from '@angular/core';
@Component({
selector: 'app-expansion',
templateUrl: './expansion.component.html',
styleUrls: ['./expansion.component.scss']
})
export class | {
panelOpenState = false;
step = 0;
setStep(index: number) {
this.step = index;
}
nextStep() {
this.step++;
}
prevStep() {
this.step--;
}
}
| ExpansionComponent |
ampd.py | import numpy as np
from scipy.ndimage import uniform_filter1d
from scipy.signal import detrend
def find_peaks_original(x, scale=None, debug=False):
|
def find_peaks(x, scale=None, debug=False):
"""Find peaks in quasi-periodic noisy signals using AMPD algorithm.
Extended implementation handles peaks near start/end of the signal.
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
scale : int, optional
specify maximum scale window size of (2 * scale + 1)
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
weigted number of maxima, 'G',
and scale at which G is maximized, `l`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
# create LSM matix
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
) # compare to right neighbours
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours
# Find scale with most maxima
G = LSM.sum(axis=1)
G = G * np.arange(
N // 2, N // 2 - L, -1
) # normalize to adjust for new edge regions
l_scale = np.argmax(G)
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, G, l_scale
return pks
def find_peaks_adaptive(x, window=None, debug=False):
"""Find peaks in quasi-periodic noisy signals using ASS-AMPD algorithm.
Adaptive Scale Selection Automatic Multi-Scale Peak Detection,
an extension of AMPD -
"An Efficient Algorithm for Automatic Peak Detection in
Noisy Periodic and Quasi-Periodic Signals", Algorithms 2012, 5, 588-603
https://doi.org/10.1109/ICRERA.2016.7884365
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
window : int, optional
sliding window size for adaptive scale selection
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
and `adaptive_scale`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
if not window:
window = N
if window > N:
window = N
L = window // 2
# create LSM matix
LSM = np.ones((L, N), dtype=bool)
for k in np.arange(1, L + 1):
LSM[k - 1, 0:N - k] &= (x[0:N - k] > x[k:N]
) # compare to right neighbours
LSM[k - 1, k:N] &= (x[k:N] > x[0:N - k]) # compare to left neighbours
# Create continuos adaptive LSM
ass_LSM = uniform_filter1d(LSM * window, window, axis=1, mode='nearest')
normalization = np.arange(L, 0, -1) # scale normalization weight
ass_LSM = ass_LSM * normalization.reshape(-1, 1)
# Find adaptive scale at each point
adaptive_scale = ass_LSM.argmax(axis=0)
# construct reduced LSM
LSM_reduced = LSM[:adaptive_scale.max(), :]
mask = (np.indices(LSM_reduced.shape)[0] > adaptive_scale
) # these elements are outside scale of interest
LSM_reduced[mask] = 1
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM_reduced, axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, ass_LSM, adaptive_scale
return pks
| """Find peaks in quasi-periodic noisy signals using AMPD algorithm.
Automatic Multi-Scale Peak Detection originally proposed in
"An Efficient Algorithm for Automatic Peak Detection in
Noisy Periodic and Quasi-Periodic Signals", Algorithms 2012, 5, 588-603
https://doi.org/10.1109/ICRERA.2016.7884365
Optimized implementation by Igor Gotlibovych, 2018
Parameters
----------
x : ndarray
1-D array on which to find peaks
scale : int, optional
specify maximum scale window size of (2 * scale + 1)
debug : bool, optional
if set to True, return the Local Scalogram Matrix, `LSM`,
and scale with most local maxima, `l`,
together with peak locations
Returns
-------
pks: ndarray
The ordered array of peak indices found in `x`
"""
x = detrend(x)
N = len(x)
L = N // 2
if scale:
L = min(scale, L)
# create LSM matix
LSM = np.zeros((L, N), dtype=bool)
for k in np.arange(1, L):
LSM[k - 1, k:N - k] = (
(x[0:N - 2 * k] < x[k:N - k]) & (x[k:N - k] > x[2 * k:N])
)
# Find scale with most maxima
G = LSM.sum(axis=1)
l_scale = np.argmax(G)
# find peaks that persist on all scales up to l
pks_logical = np.min(LSM[0:l_scale, :], axis=0)
pks = np.flatnonzero(pks_logical)
if debug:
return pks, LSM, l_scale
return pks |
els_data.py | # Cassini CAPS ELS data reader
# Modeled after Gary's MDIS reader
# Kiri Wagstaff, 11/28/18
import os
from datetime import datetime
from collections import defaultdict
import numpy as np
from pds.core.parser import Parser
from scipy.interpolate import interp1d
GEOMFILE = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'ref',
'geometricfactor.npz'
)
_EARRAY = None
_GEOM = None
E_CHARGE_COULOMBS = 1.602176487e-19
E_MASS_KG = 9.10938188e-31
def _load_gfactors():
"""
Using global variables here because we only want to read these values from
file once, then cache them at the module level
"""
global _EARRAY
global _GEOM
if _EARRAY is None:
sav = np.load(GEOMFILE)
_EARRAY = sav['earray']
_GEOM = sav['geom']
def needs_gfactors(f):
"""
Decorator for any function that needs to have the geometric factors loaded
first (calls `_load_gfactors` prior to calling the function).
"""
def fprime(*args, **kwargs):
_load_gfactors()
return f(*args, **kwargs)
return fprime
@needs_gfactors
def compute_def(e, counts):
"""
Computes the Differential Energy Flux (DEF)
Units: m^-2 sr^-1 s^-1
According to Abi's script and the CAPS User Guide, this is done by dividing
the counts by the anode- and energy-specific geometric factors.
"""
# According to section 9.2 of the CAPS PDS User Guide, the proper thing to
# do is interpolate the geometric factors: "If the ELS data record you are
# working with has energy summing ... then you can use the above table to
# interpolate the value you need for G."
geom_interp = interp1d(
_EARRAY, _GEOM, axis=0,
fill_value='extrapolate',
bounds_error=False,
assume_sorted=True,
)
G = geom_interp(e)
# newaxis is for the "phi" dimension of the data
return counts / G[..., np.newaxis]
def compute_dnf(e, def_data):
"""
Computes the Differential Number Flux (DNF)
Units: m^-2 sr^-1 s^-1 J^-1
Following Abi's script and the CAPS User Guide, this is the DEF divided by
the product of the energy and the charge of the particle (electron).
"""
# Add the new axes to broadcast across the theta/phi dimensions
return def_data / (E_CHARGE_COULOMBS*e[..., np.newaxis, np.newaxis])
def compute_psd(e, def_data):
"""
Computes the Phase Space Density (PSD)
Units: m^-6 s^-3
Following Abi's script and the CAPS User Guide, this is the DEF times a
factor of (mass^2 / (2 q^2 E^2)).
the product of the energy and the charge of the particle (electron).
"""
qE_squared = (E_CHARGE_COULOMBS*e)**2
# Add the new axes to broadcast across the theta/phi dimensions
return (
def_data * (E_MASS_KG**2) /
(2 * qE_squared[..., np.newaxis, np.newaxis])
)
def parse_dates(datearray):
return np.array([
datetime.strptime(row.tostring(), '%Y-%jT%H:%M:%S.%f')
for row in datearray
])
def reshape_data(data):
# Dimensions taken from ELS_V01.FMT
# (records, energy, theta, phi)
return data.reshape((-1, 63, 8, 1))
class ELS(object):
COLUMNS = (
# Values obtained from ELS_V01.FMT
# Name, start byte, dtype, items, missing constant
('start_date', 1, np.uint8, 21, None),
('dead_time_method', 22, np.uint8, 1, None),
('record_dur', 25, np.float32, 1, 65535.0),
('acc_time', 29, np.float32, 63, 65535.0),
('data', 281, np.float32, 504, 65535.0),
('dim1_e', 2297, np.float32, 63, 65535.0),
('dim1_e_upper', 2549, np.float32, 63, 65535.0),
('dim1_e_lower', 2801, np.float32, 63, 65535.0),
('dim2_theta', 3053, np.float32, 8, 65535.0),
('dim2_theta_upper', 3085, np.float32, 8, 65535.0),
('dim2_theta_lower', 3117, np.float32, 8, 65535.0),
('dim3_phi', 3149, np.float32, 1, 65535.0),
('dim3_phi_upper', 3153, np.float32, 1, 65535.0),
('dim3_phi_lower', 3157, np.float32, 1, 65535.0),
)
POSTPROCESS = {
'start_date': parse_dates,
'data': reshape_data,
}
def __init__(self, data_path, lbl_path=None, verbose=False):
"""
If the LBL file path is not specified, we'll assume that it is
sitting right next to the DAT file (and raise an Error if not).
"""
self.data_path = data_path
if lbl_path is None:
# Infer the LBL path if not supplied
data_base, data_ext = os.path.splitext(data_path)
if data_ext.lower() == data_ext:
lbl_path = data_base + '.lbl'
else:
lbl_path = data_base + '.LBL'
if not os.path.exists(lbl_path):
raise ValueError('Expected LBL file "%s" does not exist' % lbl_path)
self.lbl_path = lbl_path
self.verbose = verbose
self._load()
def _log(self, msg):
if self.verbose:
print(msg)
def _load(self):
with open(self.lbl_path, 'r') as f:
parser = Parser()
labels = parser.parse(f)
record_bytes = int(labels['RECORD_BYTES'])
nrecords = int(labels['FILE_RECORDS'])
columns = defaultdict(list)
with open(self.data_path, 'rb') as f:
for i in range(nrecords):
for cname, cstart, ctype, citems, _ in ELS.COLUMNS:
# Subtract 1 because they are indexed from 1 in the .FMT
f.seek(i*record_bytes + cstart - 1)
columns[cname].append(f.read(np.dtype(ctype).itemsize*citems))
for cname, _, ctype, citems, missing in ELS.COLUMNS: | # Replace missing value with NaN
if missing is not None:
col[col == missing] = np.nan
# Apply post-processing steps to appropriate columns
if cname in ELS.POSTPROCESS:
col = ELS.POSTPROCESS[cname](col)
# Store column as object attribute
setattr(self, cname, col)
# Add iso_data by summing across theta/phi
self.iso_data = np.sum(self.data, axis=(-2, -1))
# Compute DEF, DNF, and PSD
self.def_data = compute_def(self.dim1_e, self.data)
self.dnf_data = compute_dnf(self.dim1_e, self.def_data)
self.psd_data = compute_psd(self.dim1_e, self.def_data) | cstr = ''.join(columns[cname])
col = np.fromstring(cstr, dtype=ctype, count=nrecords*citems)
col = np.squeeze(col.reshape((nrecords, citems)))
|
utils.py | from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.one_to_many and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.opts.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField):
return mark_safe('<a href="%s">%s</a>' % (
conditional_escape(value.url),
conditional_escape(value),
))
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def | (field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| get_model_from_relation |
server.go | package main
import (
"io"
"net"
"strings"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"github.com/suzuki-shunsuke/example/golang/grpc/client-streaming/hello"
)
type ( | func (s *server) SayHello(stream hello.Hello_SayHelloServer) error {
names := []string{}
for {
req, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&hello.HelloReply{
Message: strings.Join(names, ", "),
})
}
if err != nil {
return err
}
names = append(names, req.GetName())
}
}
func main() {
lis, _ := net.Listen("tcp", ":5000")
s := grpc.NewServer()
srv := server{}
hello.RegisterHelloServer(s, &srv)
reflection.Register(s)
s.Serve(lis)
} | server struct{}
)
|
tutorial4.py | import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import math
from collections import deque
from keras.applications.xception import Xception
from keras.layers import Dense, GlobalAveragePooling2D
from keras.optimizers import Adam
from keras.models import Model
'''
Carla 패키지가 사용하는 egg파일 탐색
'''
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
SHOW_PREVIEW = False
IM_WIDTH = 640
IM_HEIGHT = 480
SECONDS_PER_EPISODE = 10
REPLAY_MEMORY_SIZE = 5_000
MIN_REPLAY_MEMORY_SIZE = 1_000
MINIBATCH_SIZE = 16
PREDICTION_BATCH_SIZE = 1
TRAINING_BATCH_SIZE = MINIBATCH_SIZE // 4
UPDATE_TARGET_EVERY = 5
MODEL_NAME = "Xception"
MEMORY_FRACTION = 0.8
MIN_REWARD = -200
EPISODES = 100
DISCOUNT = 0.99
epsilon = 1
EPSILON_DECAY = 0.95
MIN_EPSILON = 0.001
AGGREGATE_STATS_EVERY = 10
'''
환경 class 세팅
'''
class CarEnv:
SHOW_CAM = SHOW_PREVIEW # 미리보기 여부
STEER_AMT = 1.0
im_width = IM_WIDTH
im_height = IM_HEIGHT
front_camera = None
actor_list = []
collision_hist = [] # collision 목록
def __init__(self):
self.client = carla.Client("localhost", 2000)
self.client.set_timeout(2.0)
# client가 켜져 있다면, world 검색 가능.
self.world = self.client.get_world()
# world에는 우리가 시뮬레이션에 액터를 새로 추가할 때 사용할 수 있는 bp 목록이 있다.
self.blueprint_library = self.world.get_blueprint_library()
# 차량 모델 지정
self.model_3 = self.blueprint_library.filter("model3")[0]
def reset(self):
self.collision_hist = []
self.actor_list = []
# 랜덤한 위치에 차량 생성 후 actor list에 추가
self.transform = | m.choice(self.world.get_map().get_spawn_points())
self.vehicle = self.world.spawn_actor(self.model_3, self.transform)
self.actor_list.append(self.vehicle)
# rgb Camera 센서의 bp 가져오기
self.rgb_cam = self.blueprint_library.find('sensor.camera.rgb')
# rgb Camera 센서로 입력 받은 이미지의 크기 조절
self.rgb_cam.set_attribute("image_size_x", f"{self.im_width}")
self.rgb_cam.set_attribute("image_size_y", f"{self.im_height}")
self.rgb_cam.set_attribute("fov", f"110")
# sensor의 위치 조정
transform = carla.Transform(carla.Location(x=2.5, z=0.7))
# 센서의 생성 및 리스트 추가.
self.sensor = self.world.spawn_actor(self.rgb_cam, transform, attach_to=self.vehicle)
self.actor_list.append(self.sensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.sensor.listen(lambda data: self.process_img(data))
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
'''
차량 생성 시 차가 지면에 부딪히면 충돌이 발생.
또는 센서들이 초기화되고 값을 반환하는 데 시간이 걸릴 수 있음.
따라서 4초 정도의 대기시간을 사용.
'''
time.sleep(4)
# collision 센서의 bp 가져오기
colsensor = self.blueprint_library.find("sensor.other.collision")
# 센서의 생성 및 리스트 추가
self.colsensor = self.world.spawn_actor(colsensor, transform, attach_to=self.vehicle)
self.actor_list.append(self.colsensor)
# 센서로 입력 받은 데이터를 활용하기 위해 lambda 함수 사용
self.colsensor.listen(lambda event: self.collision_data(event))
while self.front_camera is None:
time.sleep(0.01)
'''
에피소드의 실제 확인 시간 기록.
브레이크와 스로틀이 사용되지 않는지 확인 후
첫 번째 관찰 결과 반환.
'''
self.episode_start = time.time()
self.vehicle.apply_control(carla.VehicleControl(throttle=0.0, brake=0.0))
return self.front_camera
# collision data 처리
def collision_data(self, event):
self.collision_hist.append(event)
# image data 처리
def process_img(self, image):
i = np.array(image.raw_data)
#print(i.shape)
i2 = i.reshape((self.im_height, self.im_width, 4))
i3 = i2[:, :, :3]
if self.SHOW_CAM:
cv2.imshow("", i3)
cv2.waitKey(1)
self.front_camera = i3
# action, reward, done, any_extra_info 관리
def step(self, action):
if action == 0:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1*self.STEER_AMT))
elif action == 1:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer= 0))
elif action == 2:
self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1*self.STEER_AMT))
v = self.vehicle.get_velocity()
kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))
if len(self.collision_hist) != 0:
done = True
reward = -200
elif kmh < 50:
done = False
reward = -1
else:
done = False
reward = 1
if self.episode_start + SECONDS_PER_EPISODE < time.time():
done = True
return self.front_camera, reward, done, None
# 강화 학습
class DQNAgent:
def __init__(self):
self.model = self.create_model()
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.tensorboard = ModifiedTensorBoard(log_dir=f"logs/{MODEL_NAME}-{int(time.time())}")
self.target_update_counter = 0
self.graph = tf.get_default_graph()
self.terminate = False
self.last_logged_episode = 0
self.training_initialized = False
# 모델 생성
def create_model(self):
base_model = Xception(weights=None, include_top=False, input_shape=(IM_HEIGHT, IM_WIDTH,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(3, activation="linear")(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=["accuracy"])
return model
def update_replay_memory(self, transition):
# transition = (current_state, action, reward, new_state, done)
self.replay_memory.append(transition)
def train(self):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
with self.graph.as_default():
current_qs_list = self.model.predict(current_states, PREDICTION_BATCH_SIZE)
new_current_states = np.array([transition[3] for transition in minibatch])/255
with self.graph.as_default():
future_qs_list = self.target_model.predict(new_current_states, PREDICTION_BATCH_SIZE)
# x = input / y = output
X = []
y = []
for index, (current_state, action, reward, new_state, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
'''
step 단위가 아니라 episode 단위로 log 기록
log_this_step이 true일 때만 TensorBoard에 log 기록
'''
log_this_step = False
if self.tensorboard.step > self.last_logged_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
with self.graph.as_default():
self.model.fit(np.array(X)/255, np.array(y), batch_size=TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
if log_this_step:
self.target_update_counter += 1
# target_model 업데이트 여부 확인
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1 *state.shape)/255)[0]
# train 진행
def train_in_loop(self):
X = np.random.uniform(size=(1, IM_HEIGHT, IM_WIDTH, 3)).astype(np.float32)
y = np.random.uniform(size=(1, 3)).astype(np.float32)
with self.graph.as_default():
self.model.fit(X,y, verbose=False, batch_size=1)
self.training_initialized = True
while True:
if self.terminate:
return
self.train()
time.sleep(0.01) | rando |
main.rs | use chrono::Utc;
use htmlescape::encode_minimal;
use scraper::{ElementRef, Html, Selector};
use std::env;
use std::fs::File;
use std::io::Write;
#[derive(Debug)]
struct Notice {
index: u32,
title: String,
author: String,
category: String,
link: String,
expired_at: String,
}
fn fetch_html(base_url: &str, offset: u8) -> String {
let url = format!(
"{}?mode=list&board_no=304&pager.offset={}",
base_url, offset
);
let res = reqwest::blocking::Client::builder()
.danger_accept_invalid_certs(true)
.build()
.unwrap()
.get(&url)
.header("User-Agent", "Mozilla/5.0")
.send()
.unwrap();
assert!(res.status().is_success());
res.text().unwrap()
}
fn parse_text(row: &ElementRef, selector: &Selector) -> String {
row.select(&selector)
.flat_map(|datum| datum.text().collect::<Vec<_>>())
.map(|datum| datum.trim().replace("\n", "").replace("\t", ""))
.filter(|datum| !datum.is_empty())
.collect::<Vec<_>>()
.first()
.unwrap_or(&String::from(""))
.clone()
}
fn parse_attr(row: &ElementRef, selector: &Selector) -> String {
row.select(&selector)
.flat_map(|datum| datum.value().attr("href"))
.collect::<Vec<_>>()
.first()
.unwrap_or(&"")
.to_string()
}
fn parse_html(html: &str, base_url: &str) -> Vec<Notice> {
let fragment = Html::parse_document(html);
let row_selector = Selector::parse("table.list_table > tbody > tr").unwrap();
fragment
.select(&row_selector)
.map(|row| -> Notice {
let index_selector = Selector::parse("td:nth-child(1)").unwrap();
let category_selector = Selector::parse("td:nth-child(5)").unwrap();
let title_selector = Selector::parse("td:nth-child(2)").unwrap();
let link_selector = Selector::parse("td:nth-child(2) > a").unwrap();
let author_selector = Selector::parse("td:nth-child(7)").unwrap();
let expired_at_selector = Selector::parse("td:nth-child(6)").unwrap();
Notice {
index: parse_text(&row, &index_selector).parse::<u32>().unwrap(),
category: encode_minimal(&parse_text(&row, &category_selector)),
title: encode_minimal(&parse_text(&row, &title_selector)),
author: encode_minimal(&parse_text(&row, &author_selector)),
link: encode_minimal(&format!("{}{}", base_url, parse_attr(&row, &link_selector))),
expired_at: encode_minimal(&parse_text(&row, &expired_at_selector)),
}
})
.collect::<Vec<_>>()
}
fn compose_xml(notices: &[Notice]) -> String {
let header = format!(
"<rss version=\"2.0\">\n \
<channel>\n \
<title>Ajou University Department of Digital Media Notices</title>\n \
<link>https://media.ajou.ac.kr/media/board/board01.jsp</link>\n \
<description>Recently published notices</description>\n \
<language>ko-kr</language>\n \
<lastBuildDate>{}</lastBuildDate>",
Utc::now().to_rfc2822()
);
let footer = "</channel>\n \
</rss>";
let items = notices
.iter()
.map(|notice| -> String {
let description = format!(
"[{}] - {} (~{})",
notice.category, notice.author, notice.expired_at
);
format!(
"<item>\n \
<title>{}</title>\n \
<link>{}</link>\n \
<description>{}</description>\n \
</item>",
notice.title, notice.link, description
)
})
.collect::<Vec<String>>()
.join("\n");
format!("{}\n{}\n{}", header, items, footer)
}
fn compose_md(notices: &[Notice]) -> String {
let header = "# 미디어학과 최근 공지사항";
let items = notices
.iter()
.map(|notice| -> String {
let description = format!(
"[{}] - {} (~{})",
notice.category, notice.author, notice.expired_at
);
format!(
r"* **[{}]({})**\n {}",
notice.title, notice.link, description
)
})
.collect::<Vec<String>>()
.join(r"\n\n");
format!(r"{}\n\n{}", header, items)
}
fn compose_commit_message(notices: &[Notice], last_index: u32) -> String {
let header = format!("dist: {}", last_index);
let items = notices
.iter()
.map(|notice| format!("* {}", notice.title))
.collect::<Vec<String>>()
.join("\n");
format!("{}\n\n{}", header, items)
}
fn write_last_index(last_index: u32) {
let current_exe = env::current_exe().unwrap();
let current_dir = current_exe.parent().unwrap();
let path = format!("{}/last_index", current_dir.display());
let mut file = File::create(&path).unwrap();
file.write_all(last_index.to_string().as_bytes()).unwrap();
}
fn main() {
const BASE_URL: &str = "https://media.ajou.ac.kr/media/board/board01.jsp";
const OFFSET: u8 = 0;
let args = env::args().collect::<Vec<String>>();
let last_index = args[1].parse::<u32>().unwrap();
let mode = args[2]
.parse::<String>()
.unwrap_or_else(|_| "xml".to_string());
let html = fetch_html(BASE_URL, OFFSET);
let notices = parse_html(&html, BASE_URL);
let latest_index = notices.first().unwrap().index;
if last_index != latest_index {
match mode.as_str() {
"xml" => println!("{}", compose_xml(¬ices)),
"md" => println!("{}", compose_md(¬ices)),
"cm" => println!("{}", compose_commit_message(¬ices, last_index)),
_ => eprintln!("unknown mode '{}'", mode),
}
write_last_index(latest_index);
} else {
eprintln!("n | ew notices not found")
}
}
|
|
struct_number_groups.go | package cloudcallcenter
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
// | //See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// NumberGroups is a nested struct in cloudcallcenter response
type NumberGroups struct {
NumberGroupDomain []NumberGroupDomain `json:"NumberGroupDomain" xml:"NumberGroupDomain"`
} | //Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
lib.rs | // Copyright 2021 Gregory Oakes
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![allow(dead_code)]
use proc_macro::TokenStream;
use quote::{format_ident, quote};
use syn::parse_macro_input;
mod structurally_asserted;
use structurally_asserted::{StructurallyAsserted, StructurallyAssertedAttrs};
#[proc_macro_attribute]
pub fn test_structure(attrs: TokenStream, tokens: TokenStream) -> TokenStream {
let mut item = parse_macro_input!(tokens as StructurallyAsserted);
let attrs = parse_macro_input!(attrs as StructurallyAssertedAttrs);
let size_assertion = attrs.into_assertions(&item.ident);
let loc_assertions = item.drain_loc_assertions();
let func_name = format_ident!("structure_{}", item.ident);
let output = quote! {
#item
#[cfg(test)]
#[allow(non_snake_case)]
#[test]
fn #func_name() {
#loc_assertions
#size_assertion
}
};
output.into()
}
mod endianness;
use endianness::{Endianness, EndiannessAttrs};
#[proc_macro_attribute]
pub fn endianness(attrs: TokenStream, tokens: TokenStream) -> TokenStream {
let mut item = parse_macro_input!(tokens as Endianness);
let attrs = parse_macro_input!(attrs as EndiannessAttrs);
let fns = item.drain_field_endianness(attrs.0);
let ident = &item.ident;
let output = quote! {
#item
impl #ident {
#fns | }
};
output.into()
} |
|
serialize.rs | //! Contains impls of `ZcashSerialize`, `ZcashDeserialize` for all of the
//! transaction types, so that all of the serialization logic is in one place.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::{
convert::TryInto,
io::{self, Read},
sync::Arc,
};
use crate::notes;
use crate::proofs::ZkSnarkProof;
use crate::serialization::{
ReadZcashExt, SerializationError, WriteZcashExt, ZcashDeserialize, ZcashSerialize,
};
use crate::types::Script;
use super::*;
const OVERWINTER_VERSION_GROUP_ID: u32 = 0x03C4_8270;
const SAPLING_VERSION_GROUP_ID: u32 = 0x892F_2085;
/// The coinbase data for a genesis block.
///
/// Zcash uses the same coinbase data for the Mainnet, Testnet, and Regtest
/// genesis blocks.
const GENESIS_COINBASE_DATA: [u8; 77] = [
4, 255, 255, 7, 31, 1, 4, 69, 90, 99, 97, 115, 104, 48, 98, 57, 99, 52, 101, 101, 102, 56, 98,
55, 99, 99, 52, 49, 55, 101, 101, 53, 48, 48, 49, 101, 51, 53, 48, 48, 57, 56, 52, 98, 54, 102,
101, 97, 51, 53, 54, 56, 51, 97, 55, 99, 97, 99, 49, 52, 49, 97, 48, 52, 51, 99, 52, 50, 48,
54, 52, 56, 51, 53, 100, 51, 52,
];
impl ZcashSerialize for OutPoint {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_all(&self.hash.0[..])?;
writer.write_u32::<LittleEndian>(self.index)?;
Ok(())
}
}
impl ZcashDeserialize for OutPoint {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(OutPoint {
hash: TransactionHash(reader.read_32_bytes()?),
index: reader.read_u32::<LittleEndian>()?,
})
}
}
// Coinbase inputs include block heights (BIP34). These are not encoded
// directly, but as a Bitcoin script that pushes the block height to the stack
// when executed. The script data is otherwise unused. Because we want to
// *parse* transactions into an internal representation where illegal states are
// unrepresentable, we need just enough parsing of Bitcoin scripts to parse the
// coinbase height and split off the rest of the (inert) coinbase data.
fn parse_coinbase_height(
mut data: Vec<u8>,
) -> Result<(BlockHeight, CoinbaseData), SerializationError> {
match (data.get(0), data.len()) {
// Blocks 1 through 16 inclusive encode block height with OP_N opcodes.
(Some(op_n @ 0x51..=0x60), len) if len >= 1 => Ok((
BlockHeight((op_n - 0x50) as u32),
CoinbaseData(data.split_off(1)),
)),
// Blocks 17 through 256 exclusive encode block height with the `0x01` opcode.
(Some(0x01), len) if len >= 2 => {
Ok((BlockHeight(data[1] as u32), CoinbaseData(data.split_off(2))))
}
// Blocks 256 through 65536 exclusive encode block height with the `0x02` opcode.
(Some(0x02), len) if len >= 3 => Ok((
BlockHeight(data[1] as u32 + ((data[2] as u32) << 8)),
CoinbaseData(data.split_off(3)),
)),
// Blocks 65536 through 2**24 exclusive encode block height with the `0x03` opcode.
(Some(0x03), len) if len >= 4 => Ok((
BlockHeight(data[1] as u32 + ((data[2] as u32) << 8) + ((data[3] as u32) << 16)),
CoinbaseData(data.split_off(4)),
)),
// The genesis block does not encode the block height by mistake; special case it.
// The first five bytes are [4, 255, 255, 7, 31], the little-endian encoding of
// 520_617_983. This is lucky because it means we can special-case the genesis block
// while remaining below the maximum `BlockHeight` of 500_000_000 forced by `LockTime`.
// While it's unlikely this code will ever process a block height that high, this means
// we don't need to maintain a cascade of different invariants for allowable `BlockHeight`s.
(Some(0x04), _) if data[..] == GENESIS_COINBASE_DATA[..] => {
Ok((BlockHeight(0), CoinbaseData(data)))
}
// As noted above, this is included for completeness.
(Some(0x04), len) if len >= 5 => {
let h = data[1] as u32
+ ((data[2] as u32) << 8)
+ ((data[3] as u32) << 16)
+ ((data[4] as u32) << 24);
if h <= BlockHeight::MAX.0 {
Ok((BlockHeight(h), CoinbaseData(data.split_off(5))))
} else {
Err(SerializationError::Parse("Invalid block height"))
}
}
_ => Err(SerializationError::Parse(
"Could not parse BIP34 height in coinbase data",
)),
}
}
fn coinbase_height_len(height: BlockHeight) -> usize {
// We can't write this as a match statement on stable until exclusive range
// guards are stabilized.
if let 0 = height.0 {
0
} else if let _h @ 1..=16 = height.0 {
1
} else if let _h @ 17..=255 = height.0 {
2
} else if let _h @ 256..=65535 = height.0 {
3
} else if let _h @ 65536..=16_777_215 = height.0 {
4
} else if let _h @ 16_777_216..=BlockHeight::MAX_AS_U32 = height.0 {
5
} else {
panic!("Invalid coinbase height");
}
}
fn write_coinbase_height<W: io::Write>(height: BlockHeight, mut w: W) -> Result<(), io::Error> {
// We can't write this as a match statement on stable until exclusive range
// guards are stabilized.
if let 0 = height.0 {
// Genesis block does not include height.
} else if let h @ 1..=16 = height.0 {
w.write_u8(0x50 + (h as u8))?;
} else if let h @ 17..=255 = height.0 {
w.write_u8(0x01)?;
w.write_u8(h as u8)?;
} else if let h @ 256..=65535 = height.0 {
w.write_u8(0x02)?;
w.write_u16::<LittleEndian>(h as u16)?;
} else if let h @ 65536..=16_777_215 = height.0 {
w.write_u8(0x03)?;
w.write_u8(h as u8)?;
w.write_u8((h >> 8) as u8)?;
w.write_u8((h >> 16) as u8)?;
} else if let h @ 16_777_216..=BlockHeight::MAX_AS_U32 = height.0 {
w.write_u8(0x04)?;
w.write_u32::<LittleEndian>(h)?;
} else {
panic!("Invalid coinbase height");
}
Ok(())
}
impl ZcashSerialize for TransparentInput {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
match self {
TransparentInput::PrevOut {
outpoint,
script,
sequence,
} => {
outpoint.zcash_serialize(&mut writer)?;
script.zcash_serialize(&mut writer)?;
writer.write_u32::<LittleEndian>(*sequence)?;
}
TransparentInput::Coinbase {
height,
data,
sequence,
} => {
writer.write_all(&[0; 32][..])?;
writer.write_u32::<LittleEndian>(0xffff_ffff)?;
let height_len = coinbase_height_len(*height);
let total_len = height_len + data.as_ref().len();
writer.write_compactsize(total_len as u64)?;
write_coinbase_height(*height, &mut writer)?;
writer.write_all(&data.as_ref()[..])?;
writer.write_u32::<LittleEndian>(*sequence)?;
}
}
Ok(())
}
}
impl ZcashDeserialize for TransparentInput {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
// This inlines the OutPoint deserialization to peek at the hash value
// and detect whether we have a coinbase input.
let bytes = reader.read_32_bytes()?;
if bytes == [0; 32] {
if reader.read_u32::<LittleEndian>()? != 0xffff_ffff {
return Err(SerializationError::Parse("wrong index in coinbase"));
}
let len = reader.read_compactsize()?;
if len > 100 {
return Err(SerializationError::Parse("coinbase has too much data"));
}
let mut data = Vec::with_capacity(len as usize);
(&mut reader).take(len).read_to_end(&mut data)?;
let (height, data) = parse_coinbase_height(data)?;
let sequence = reader.read_u32::<LittleEndian>()?;
Ok(TransparentInput::Coinbase {
height,
data,
sequence,
})
} else {
Ok(TransparentInput::PrevOut {
outpoint: OutPoint {
hash: TransactionHash(bytes),
index: reader.read_u32::<LittleEndian>()?,
},
script: Script::zcash_deserialize(&mut reader)?,
sequence: reader.read_u32::<LittleEndian>()?,
})
}
}
}
impl ZcashSerialize for TransparentOutput {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_u64::<LittleEndian>(self.value.into())?;
self.pk_script.zcash_serialize(&mut writer)?;
Ok(())
}
}
impl ZcashDeserialize for TransparentOutput {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(TransparentOutput {
value: reader.read_u64::<LittleEndian>()?.try_into()?,
pk_script: Script::zcash_deserialize(&mut reader)?,
})
}
}
impl<P: ZkSnarkProof> ZcashSerialize for JoinSplit<P> {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_u64::<LittleEndian>(self.vpub_old.into())?;
writer.write_u64::<LittleEndian>(self.vpub_new.into())?;
writer.write_all(&self.anchor[..])?;
self.nullifiers[0].zcash_serialize(&mut writer)?;
self.nullifiers[1].zcash_serialize(&mut writer)?;
writer.write_all(&self.commitments[0][..])?;
writer.write_all(&self.commitments[1][..])?;
writer.write_all(&self.ephemeral_key.as_bytes()[..])?;
writer.write_all(&self.random_seed[..])?;
self.vmacs[0].zcash_serialize(&mut writer)?;
self.vmacs[1].zcash_serialize(&mut writer)?;
self.zkproof.zcash_serialize(&mut writer)?;
self.enc_ciphertexts[0].zcash_serialize(&mut writer)?;
self.enc_ciphertexts[1].zcash_serialize(&mut writer)?;
Ok(())
}
}
impl<P: ZkSnarkProof> ZcashDeserialize for JoinSplit<P> {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(JoinSplit::<P> {
vpub_old: reader.read_u64::<LittleEndian>()?.try_into()?,
vpub_new: reader.read_u64::<LittleEndian>()?.try_into()?,
anchor: reader.read_32_bytes()?,
nullifiers: [
crate::nullifier::sprout::Nullifier::zcash_deserialize(&mut reader)?,
crate::nullifier::sprout::Nullifier::zcash_deserialize(&mut reader)?,
],
commitments: [reader.read_32_bytes()?, reader.read_32_bytes()?],
ephemeral_key: x25519_dalek::PublicKey::from(reader.read_32_bytes()?),
random_seed: reader.read_32_bytes()?,
vmacs: [
crate::types::MAC::zcash_deserialize(&mut reader)?,
crate::types::MAC::zcash_deserialize(&mut reader)?,
],
zkproof: P::zcash_deserialize(&mut reader)?,
enc_ciphertexts: [
notes::sprout::EncryptedCiphertext::zcash_deserialize(&mut reader)?,
notes::sprout::EncryptedCiphertext::zcash_deserialize(&mut reader)?,
],
})
}
}
impl<P: ZkSnarkProof> ZcashSerialize for JoinSplitData<P> {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_compactsize(self.joinsplits().count() as u64)?;
for joinsplit in self.joinsplits() {
joinsplit.zcash_serialize(&mut writer)?;
}
writer.write_all(&<[u8; 32]>::from(self.pub_key)[..])?;
writer.write_all(&<[u8; 64]>::from(self.sig)[..])?;
Ok(())
}
}
impl<P: ZkSnarkProof> ZcashDeserialize for Option<JoinSplitData<P>> {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
let num_joinsplits = reader.read_compactsize()?;
match num_joinsplits {
0 => Ok(None),
n => {
let first = JoinSplit::zcash_deserialize(&mut reader)?;
let mut rest = Vec::with_capacity((n - 1) as usize);
for _ in 0..(n - 1) {
rest.push(JoinSplit::zcash_deserialize(&mut reader)?);
}
let pub_key = reader.read_32_bytes()?.into();
let sig = reader.read_64_bytes()?.into();
Ok(Some(JoinSplitData {
first,
rest,
pub_key,
sig,
}))
}
}
}
}
impl ZcashSerialize for Spend {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_all(&self.cv[..])?;
writer.write_all(&self.anchor.0[..])?;
self.nullifier.zcash_serialize(&mut writer)?;
writer.write_all(&<[u8; 32]>::from(self.rk)[..])?;
self.zkproof.zcash_serialize(&mut writer)?;
writer.write_all(&<[u8; 64]>::from(self.spend_auth_sig)[..])?;
Ok(())
}
}
impl ZcashDeserialize for Spend {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
use crate::note_commitment_tree::SaplingNoteTreeRootHash;
Ok(Spend {
cv: reader.read_32_bytes()?,
anchor: SaplingNoteTreeRootHash(reader.read_32_bytes()?),
nullifier: crate::nullifier::sapling::Nullifier::zcash_deserialize(&mut reader)?,
rk: reader.read_32_bytes()?.into(),
zkproof: Groth16Proof::zcash_deserialize(&mut reader)?,
spend_auth_sig: reader.read_64_bytes()?.into(),
})
}
}
impl ZcashSerialize for Output {
fn | <W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
writer.write_all(&self.cv[..])?;
writer.write_all(&self.cmu[..])?;
writer.write_all(&self.ephemeral_key.to_bytes())?;
self.enc_ciphertext.zcash_serialize(&mut writer)?;
self.out_ciphertext.zcash_serialize(&mut writer)?;
self.zkproof.zcash_serialize(&mut writer)?;
Ok(())
}
}
impl ZcashDeserialize for Output {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
Ok(Output {
cv: reader.read_32_bytes()?,
cmu: reader.read_32_bytes()?,
ephemeral_key: jubjub::AffinePoint::from_bytes(reader.read_32_bytes()?).unwrap(),
enc_ciphertext: notes::sapling::EncryptedCiphertext::zcash_deserialize(&mut reader)?,
out_ciphertext: notes::sapling::OutCiphertext::zcash_deserialize(&mut reader)?,
zkproof: Groth16Proof::zcash_deserialize(&mut reader)?,
})
}
}
impl ZcashSerialize for Transaction {
fn zcash_serialize<W: io::Write>(&self, mut writer: W) -> Result<(), io::Error> {
// Post-Sapling, transaction size is limited to MAX_BLOCK_BYTES.
// (Strictly, the maximum transaction size is about 1.5 kB less,
// because blocks also include a block header.)
//
// Currently, all transaction structs are parsed as part of a
// block. So we don't need to check transaction size here, until
// we start parsing mempool transactions, or generating our own
// transactions (see #483).
//
// Since we checkpoint on Sapling activation, we won't ever need
// to check the smaller pre-Sapling transaction size limit.
match self {
Transaction::V1 {
inputs,
outputs,
lock_time,
} => {
writer.write_u32::<LittleEndian>(1)?;
inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?;
}
Transaction::V2 {
inputs,
outputs,
lock_time,
joinsplit_data,
} => {
writer.write_u32::<LittleEndian>(2)?;
inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?;
match joinsplit_data {
// Write 0 for nJoinSplits to signal no JoinSplitData.
None => writer.write_compactsize(0)?,
Some(jsd) => jsd.zcash_serialize(&mut writer)?,
}
}
Transaction::V3 {
inputs,
outputs,
lock_time,
expiry_height,
joinsplit_data,
} => {
// Write version 3 and set the fOverwintered bit.
writer.write_u32::<LittleEndian>(3 | (1 << 31))?;
writer.write_u32::<LittleEndian>(OVERWINTER_VERSION_GROUP_ID)?;
inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?;
writer.write_u32::<LittleEndian>(expiry_height.0)?;
match joinsplit_data {
// Write 0 for nJoinSplits to signal no JoinSplitData.
None => writer.write_compactsize(0)?,
Some(jsd) => jsd.zcash_serialize(&mut writer)?,
}
}
Transaction::V4 {
inputs,
outputs,
lock_time,
expiry_height,
value_balance,
shielded_data,
joinsplit_data,
} => {
// Write version 4 and set the fOverwintered bit.
writer.write_u32::<LittleEndian>(4 | (1 << 31))?;
writer.write_u32::<LittleEndian>(SAPLING_VERSION_GROUP_ID)?;
inputs.zcash_serialize(&mut writer)?;
outputs.zcash_serialize(&mut writer)?;
lock_time.zcash_serialize(&mut writer)?;
writer.write_u32::<LittleEndian>(expiry_height.0)?;
writer.write_i64::<LittleEndian>((*value_balance).into())?;
// The previous match arms serialize in one go, because the
// internal structure happens to nicely line up with the
// serialized structure. However, this is not possible for
// version 4 transactions, as the binding_sig for the
// ShieldedData is placed at the end of the transaction. So
// instead we have to interleave serialization of the
// ShieldedData and the JoinSplitData.
match shielded_data {
None => {
// Signal no shielded spends and no shielded outputs.
writer.write_compactsize(0)?;
writer.write_compactsize(0)?;
}
Some(shielded_data) => {
writer.write_compactsize(shielded_data.spends().count() as u64)?;
for spend in shielded_data.spends() {
spend.zcash_serialize(&mut writer)?;
}
writer.write_compactsize(shielded_data.outputs().count() as u64)?;
for output in shielded_data.outputs() {
output.zcash_serialize(&mut writer)?;
}
}
}
match joinsplit_data {
None => writer.write_compactsize(0)?,
Some(jsd) => jsd.zcash_serialize(&mut writer)?,
}
match shielded_data {
Some(sd) => writer.write_all(&<[u8; 64]>::from(sd.binding_sig)[..])?,
None => {}
}
}
}
Ok(())
}
}
impl ZcashDeserialize for Transaction {
fn zcash_deserialize<R: io::Read>(mut reader: R) -> Result<Self, SerializationError> {
let (version, overwintered) = {
const LOW_31_BITS: u32 = (1 << 31) - 1;
let header = reader.read_u32::<LittleEndian>()?;
(header & LOW_31_BITS, header >> 31 != 0)
};
// The overwintered flag MUST NOT be set for version 1 and 2 transactions.
match (version, overwintered) {
(1, false) => Ok(Transaction::V1 {
inputs: Vec::zcash_deserialize(&mut reader)?,
outputs: Vec::zcash_deserialize(&mut reader)?,
lock_time: LockTime::zcash_deserialize(&mut reader)?,
}),
(2, false) => {
// Version 2 transactions use Sprout-on-BCTV14.
type OptV2JSD = Option<JoinSplitData<Bctv14Proof>>;
Ok(Transaction::V2 {
inputs: Vec::zcash_deserialize(&mut reader)?,
outputs: Vec::zcash_deserialize(&mut reader)?,
lock_time: LockTime::zcash_deserialize(&mut reader)?,
joinsplit_data: OptV2JSD::zcash_deserialize(&mut reader)?,
})
}
(3, true) => {
let id = reader.read_u32::<LittleEndian>()?;
if id != OVERWINTER_VERSION_GROUP_ID {
return Err(SerializationError::Parse(
"expected OVERWINTER_VERSION_GROUP_ID",
));
}
// Version 3 transactions use Sprout-on-BCTV14.
type OptV3JSD = Option<JoinSplitData<Bctv14Proof>>;
Ok(Transaction::V3 {
inputs: Vec::zcash_deserialize(&mut reader)?,
outputs: Vec::zcash_deserialize(&mut reader)?,
lock_time: LockTime::zcash_deserialize(&mut reader)?,
expiry_height: BlockHeight(reader.read_u32::<LittleEndian>()?),
joinsplit_data: OptV3JSD::zcash_deserialize(&mut reader)?,
})
}
(4, true) => {
let id = reader.read_u32::<LittleEndian>()?;
if id != SAPLING_VERSION_GROUP_ID {
return Err(SerializationError::Parse(
"expected SAPLING_VERSION_GROUP_ID",
));
}
// Version 4 transactions use Sprout-on-Groth16.
type OptV4JSD = Option<JoinSplitData<Groth16Proof>>;
// The previous match arms deserialize in one go, because the
// internal structure happens to nicely line up with the
// serialized structure. However, this is not possible for
// version 4 transactions, as the binding_sig for the
// ShieldedData is placed at the end of the transaction. So
// instead we have to pull the component parts out manually and
// then assemble them.
let inputs = Vec::zcash_deserialize(&mut reader)?;
let outputs = Vec::zcash_deserialize(&mut reader)?;
let lock_time = LockTime::zcash_deserialize(&mut reader)?;
let expiry_height = BlockHeight(reader.read_u32::<LittleEndian>()?);
let value_balance = reader.read_i64::<LittleEndian>()?.try_into()?;
let mut shielded_spends = Vec::zcash_deserialize(&mut reader)?;
let mut shielded_outputs = Vec::zcash_deserialize(&mut reader)?;
let joinsplit_data = OptV4JSD::zcash_deserialize(&mut reader)?;
use futures::future::Either::*;
let shielded_data = if !shielded_spends.is_empty() {
Some(ShieldedData {
first: Left(shielded_spends.remove(0)),
rest_spends: shielded_spends,
rest_outputs: shielded_outputs,
binding_sig: reader.read_64_bytes()?.into(),
})
} else if !shielded_outputs.is_empty() {
Some(ShieldedData {
first: Right(shielded_outputs.remove(0)),
rest_spends: shielded_spends,
rest_outputs: shielded_outputs,
binding_sig: reader.read_64_bytes()?.into(),
})
} else {
None
};
Ok(Transaction::V4 {
inputs,
outputs,
lock_time,
expiry_height,
value_balance,
shielded_data,
joinsplit_data,
})
}
(_, _) => Err(SerializationError::Parse("bad tx header")),
}
}
}
impl<T> ZcashDeserialize for Arc<T>
where
T: ZcashDeserialize,
{
fn zcash_deserialize<R: io::Read>(reader: R) -> Result<Self, SerializationError> {
Ok(Arc::new(T::zcash_deserialize(reader)?))
}
}
impl<T> ZcashSerialize for Arc<T>
where
T: ZcashSerialize,
{
fn zcash_serialize<W: io::Write>(&self, writer: W) -> Result<(), io::Error> {
T::zcash_serialize(self, writer)
}
}
| zcash_serialize |
index.js | import React,{Component} from "react";
import { Link } from "react-router-dom";
import "./style.css";
import DropDown from "../DropDown";
import Navheadings from "../Navheadings"
class Navbar extends Component{
render(){
return(
<nav className="navbar navbar-expand-lg navbar-dark navbg">
{this.props.id === "1" ?
<div className="collapse navbar-collapse" id="navbarSupportedContent">
<ul className="navbar-nav mr-auto">
<Navheadings href={"/entertainment/"+this.props.id} heading="Entertainment"></Navheadings>
<Navheadings href={"/safety/"+this.props.id} heading="Health"></Navheadings>
<li className="nav-item dropdown">
<DropDown id={this.props.id}><a className="navbar-brand dropdown-toggle" href="#" id="dropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Videos</a></DropDown>
</li> | <Link to="/login"><button className="btn btn-outline-light mr-sm-2" type="submit">Sign In</button></Link>
<Link to="/signup"><button className="btn btn-outline-light my-2 my-sm-0" type="submit">Sign Up</button></Link>
</form>
</div>
: <div className="collapse navbar-collapse" id="navbarSupportedContent">
<ul className="navbar-nav mr-auto">
<Navheadings href={"/entertainment/"+this.props.id} heading="Entertainment"></Navheadings>
<Navheadings href={"/safety/"+this.props.id} heading="Health"></Navheadings>
<Navheadings href={"/scheduler/"+this.props.id} heading="Scheduler"></Navheadings>
<Navheadings href={"/tracker/"+this.props.id} heading="Tracker"></Navheadings>
<li className="nav-item dropdown">
<DropDown id={this.props.id}><a className="navbar-brand dropdown-toggle" href="#" id="dropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Videos</a></DropDown>
</li>
</ul>
<form className="form-inline my-2 my-lg-0">
<Link to="/"><button className="btn btn-outline-light mr-sm-2" type="submit">Logout</button></Link>
</form>
</div>}
</nav>
);
}
}
export default Navbar; | </ul>
<form className="form-inline my-2 my-lg-0"> |
type.py | # Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.node.strategy import NodeStrategy
from ros2topic.api import get_topic_names_and_types
from ros2topic.api import TopicNameCompleter
from ros2topic.verb import VerbExtension
class TypeVerb(VerbExtension):
"""Print a topic's type."""
def | (self, parser, cli_name):
arg = parser.add_argument(
'topic_name',
help="Name of the ROS topic to get type (e.g. '/chatter')")
arg.completer = TopicNameCompleter(
include_hidden_topics_key='include_hidden_topics')
def main(self, *, args):
with NodeStrategy(args) as node:
topic_names_and_types = get_topic_names_and_types(
node=node,
include_hidden_topics=args.include_hidden_topics)
for (topic_name, topic_types) in topic_names_and_types:
if args.topic_name == topic_name:
for topic_type in topic_types:
print(topic_type)
return 0
return 1
| add_arguments |
__init__.py | from .camera import * | from .ray_tracer import *
from .scene_parser import *
from .world import * | from .obj_file import *
from .obj_parser import * |
index.ts | export * from './SVGContainer';
import './icon.scss';
// icons
import { IconError } from './icon/error';
import { IconInfo } from './icon/info';
import { IconLoading } from './icon/loading';
import { IconSuccess } from './icon/success';
import { IconWarn } from './icon/warn';
export const Icon = { | Info: IconInfo,
Loading: IconLoading,
Success: IconSuccess,
Warn: IconWarn,
};
export default Icon; | Error: IconError, |
partition.go | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ddl
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/format"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/opcode"
"github.com/pingcap/tidb/ddl/placement"
"github.com/pingcap/tidb/ddl/util"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/meta"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/tikv"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/sqlexec"
"go.uber.org/zap"
)
const (
partitionMaxValue = "MAXVALUE"
)
func checkAddPartition(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.PartitionInfo, []model.PartitionDefinition, error) {
schemaID := job.SchemaID
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID)
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
partInfo := &model.PartitionInfo{}
err = job.DecodeArgs(&partInfo)
if err != nil {
job.State = model.JobStateCancelled
return nil, nil, nil, errors.Trace(err)
}
if len(tblInfo.Partition.AddingDefinitions) > 0 {
return tblInfo, partInfo, tblInfo.Partition.AddingDefinitions, nil
}
return tblInfo, partInfo, []model.PartitionDefinition{}, nil
}
func onAddTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
// Handle the rolling back job
if job.IsRollingback() {
ver, err := onDropTablePartition(t, job)
if err != nil {
return ver, errors.Trace(err)
}
return ver, nil
}
tblInfo, partInfo, addingDefinitions, err := checkAddPartition(t, job)
if err != nil {
return ver, err
}
// In order to skip maintaining the state check in partitionDefinition, TiDB use addingDefinition instead of state field.
// So here using `job.SchemaState` to judge what the stage of this job is.
switch job.SchemaState {
case model.StateNone:
// job.SchemaState == model.StateNone means the job is in the initial state of add partition.
// Here should use partInfo from job directly and do some check action.
err = checkAddPartitionTooManyPartitions(uint64(len(tblInfo.Partition.Definitions) + len(partInfo.Definitions)))
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
err = checkAddPartitionValue(tblInfo, partInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
err = checkAddPartitionNameUnique(tblInfo, partInfo)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
// none -> replica only
job.SchemaState = model.StateReplicaOnly
// move the adding definition into tableInfo.
updateAddingPartitionInfo(partInfo, tblInfo)
ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, true)
case model.StateReplicaOnly:
// replica only -> public
// Here need do some tiflash replica complement check.
// TODO: If a table is with no TiFlashReplica or it is not available, the replica-only state can be eliminated.
if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available {
// For available state, the new added partition should wait it's replica to
// be finished. Otherwise the query to this partition will be blocked.
needWait, err := checkPartitionReplica(addingDefinitions, d)
if err != nil {
ver, err = convertAddTablePartitionJob2RollbackJob(t, job, err, tblInfo)
return ver, err
}
if needWait {
// The new added partition hasn't been replicated.
// Do nothing to the job this time, wait next worker round.
time.Sleep(tiflashCheckTiDBHTTPAPIHalfInterval)
return ver, nil
}
}
// For normal and replica finished table, move the `addingDefinitions` into `Definitions`.
updatePartitionInfo(tblInfo)
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddTablePartition, TableInfo: tblInfo, PartInfo: partInfo})
default:
err = ErrInvalidDDLState.GenWithStackByArgs("partition", job.SchemaState)
}
return ver, errors.Trace(err)
}
// updatePartitionInfo merge `addingDefinitions` into `Definitions` in the tableInfo.
func updatePartitionInfo(tblInfo *model.TableInfo) {
parInfo := &model.PartitionInfo{}
oldDefs, newDefs := tblInfo.Partition.Definitions, tblInfo.Partition.AddingDefinitions
parInfo.Definitions = make([]model.PartitionDefinition, 0, len(newDefs)+len(oldDefs))
parInfo.Definitions = append(parInfo.Definitions, oldDefs...)
parInfo.Definitions = append(parInfo.Definitions, newDefs...)
tblInfo.Partition.Definitions = parInfo.Definitions
tblInfo.Partition.AddingDefinitions = nil
}
// updateAddingPartitionInfo write adding partitions into `addingDefinitions` field in the tableInfo.
func updateAddingPartitionInfo(partitionInfo *model.PartitionInfo, tblInfo *model.TableInfo) {
newDefs := partitionInfo.Definitions
tblInfo.Partition.AddingDefinitions = make([]model.PartitionDefinition, 0, len(newDefs))
tblInfo.Partition.AddingDefinitions = append(tblInfo.Partition.AddingDefinitions, newDefs...)
}
// rollbackAddingPartitionInfo remove the `addingDefinitions` in the tableInfo.
func rollbackAddingPartitionInfo(tblInfo *model.TableInfo) []int64 {
physicalTableIDs := make([]int64, 0, len(tblInfo.Partition.AddingDefinitions))
for _, one := range tblInfo.Partition.AddingDefinitions {
physicalTableIDs = append(physicalTableIDs, one.ID)
}
tblInfo.Partition.AddingDefinitions = nil
return physicalTableIDs
}
// checkAddPartitionValue values less than value must be strictly increasing for each partition.
func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) error {
if meta.Partition.Type == model.PartitionTypeRange && len(meta.Partition.Columns) == 0 {
newDefs, oldDefs := part.Definitions, meta.Partition.Definitions
rangeValue := oldDefs[len(oldDefs)-1].LessThan[0]
if strings.EqualFold(rangeValue, "MAXVALUE") {
return errors.Trace(ErrPartitionMaxvalue)
}
currentRangeValue, err := strconv.Atoi(rangeValue)
if err != nil {
return errors.Trace(err)
}
for i := 0; i < len(newDefs); i++ {
ifMaxvalue := strings.EqualFold(newDefs[i].LessThan[0], "MAXVALUE")
if ifMaxvalue && i == len(newDefs)-1 {
return nil
} else if ifMaxvalue && i != len(newDefs)-1 {
return errors.Trace(ErrPartitionMaxvalue)
}
nextRangeValue, err := strconv.Atoi(newDefs[i].LessThan[0])
if err != nil {
return errors.Trace(err)
}
if nextRangeValue <= currentRangeValue {
return errors.Trace(ErrRangeNotIncreasing)
}
currentRangeValue = nextRangeValue
}
}
return nil
}
func checkPartitionReplica(addingDefinitions []model.PartitionDefinition, d *ddlCtx) (needWait bool, err error) {
ctx := context.Background()
pdCli := d.store.(tikv.Storage).GetRegionCache().PDClient()
stores, err := pdCli.GetAllStores(ctx)
if err != nil {
return needWait, errors.Trace(err)
}
for _, pd := range addingDefinitions {
startKey, endKey := tablecodec.GetTableHandleKeyRange(pd.ID)
regions, err := pdCli.ScanRegions(ctx, startKey, endKey, -1)
if err != nil {
return needWait, errors.Trace(err)
}
// For every region in the partition, if it has some corresponding peers and
// no pending peers, that means the replication has completed.
for _, region := range regions {
regionState, err := pdCli.GetRegionByID(ctx, region.Meta.Id)
if err != nil {
return needWait, errors.Trace(err)
}
tiflashPeerAtLeastOne := checkTiFlashPeerStoreAtLeastOne(stores, regionState.Meta.Peers)
// It's unnecessary to wait all tiflash peer to be replicated.
// Here only make sure that tiflash peer count > 0 (at least one).
if tiflashPeerAtLeastOne {
continue
}
needWait = true
logutil.BgLogger().Info("[ddl] partition replicas check failed in replica-only DDL state", zap.Int64("pID", pd.ID), zap.Uint64("wait region ID", region.Meta.Id), zap.Bool("tiflash peer at least one", tiflashPeerAtLeastOne), zap.Time("check time", time.Now()))
return needWait, nil
}
}
logutil.BgLogger().Info("[ddl] partition replicas check ok in replica-only DDL state")
return needWait, nil
}
func checkTiFlashPeerStoreAtLeastOne(stores []*metapb.Store, peers []*metapb.Peer) bool {
for _, peer := range peers {
for _, store := range stores {
if peer.StoreId == store.Id && storeHasEngineTiFlashLabel(store) {
return true
}
}
}
return false
}
func storeHasEngineTiFlashLabel(store *metapb.Store) bool {
for _, label := range store.Labels {
if label.Key == "engine" && label.Value == "tiflash" {
return true
}
}
return false
}
// buildTablePartitionInfo builds partition info and checks for some errors.
func buildTablePartitionInfo(ctx sessionctx.Context, s *ast.CreateTableStmt) (*model.PartitionInfo, error) {
if s.Partition == nil {
return nil, nil
}
if ctx.GetSessionVars().EnableTablePartition == "off" {
ctx.GetSessionVars().StmtCtx.AppendWarning(errTablePartitionDisabled)
return nil, nil
}
var enable bool
// When tidb_enable_table_partition is 'on' or 'auto'.
if s.Partition.Tp == model.PartitionTypeRange {
if s.Partition.Sub == nil {
// Partition by range expression is enabled by default.
if s.Partition.ColumnNames == nil {
enable = true
}
// Partition by range columns and just one column.
if len(s.Partition.ColumnNames) == 1 {
enable = true
}
}
}
// Partition by hash is enabled by default.
// Note that linear hash is not enabled.
if s.Partition.Tp == model.PartitionTypeHash {
if !s.Partition.Linear && s.Partition.Sub == nil {
enable = true
}
}
if !enable {
ctx.GetSessionVars().StmtCtx.AppendWarning(errUnsupportedCreatePartition)
return nil, nil
}
pi := &model.PartitionInfo{
Type: s.Partition.Tp,
Enable: enable,
Num: s.Partition.Num,
}
if s.Partition.Expr != nil {
buf := new(bytes.Buffer)
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, buf)
if err := s.Partition.Expr.Restore(restoreCtx); err != nil {
return nil, err
}
pi.Expr = buf.String()
} else if s.Partition.ColumnNames != nil {
// TODO: Support multiple columns for 'PARTITION BY RANGE COLUMNS'.
if len(s.Partition.ColumnNames) != 1 {
pi.Enable = false
ctx.GetSessionVars().StmtCtx.AppendWarning(ErrUnsupportedPartitionByRangeColumns)
}
pi.Columns = make([]model.CIStr, 0, len(s.Partition.ColumnNames))
for _, cn := range s.Partition.ColumnNames {
pi.Columns = append(pi.Columns, cn.Name)
}
}
if s.Partition.Tp == model.PartitionTypeRange {
if err := buildRangePartitionDefinitions(ctx, s, pi); err != nil {
return nil, errors.Trace(err)
}
} else if s.Partition.Tp == model.PartitionTypeHash {
if err := buildHashPartitionDefinitions(ctx, s, pi); err != nil {
return nil, errors.Trace(err)
}
}
return pi, nil
}
func buildHashPartitionDefinitions(ctx sessionctx.Context, s *ast.CreateTableStmt, pi *model.PartitionInfo) error {
if err := checkAddPartitionTooManyPartitions(pi.Num); err != nil {
return err
}
defs := make([]model.PartitionDefinition, pi.Num)
for i := 0; i < len(defs); i++ {
if len(s.Partition.Definitions) == 0 {
defs[i].Name = model.NewCIStr(fmt.Sprintf("p%v", i))
} else {
def := s.Partition.Definitions[i]
defs[i].Name = def.Name
defs[i].Comment, _ = def.Comment()
}
}
pi.Definitions = defs
return nil
}
func buildRangePartitionDefinitions(ctx sessionctx.Context, s *ast.CreateTableStmt, pi *model.PartitionInfo) (err error) {
for _, def := range s.Partition.Definitions {
comment, _ := def.Comment()
err = checkTooLongTable(def.Name)
if err != nil {
return err
}
piDef := model.PartitionDefinition{
Name: def.Name,
Comment: comment,
}
buf := new(bytes.Buffer)
// Range columns partitions support multi-column partitions.
for _, expr := range def.Clause.(*ast.PartitionDefinitionClauseLessThan).Exprs {
expr.Format(buf)
piDef.LessThan = append(piDef.LessThan, buf.String())
buf.Reset()
}
pi.Definitions = append(pi.Definitions, piDef)
}
return nil
}
func checkPartitionNameUnique(pi *model.PartitionInfo) error {
newPars := pi.Definitions
partNames := make(map[string]struct{}, len(newPars))
for _, newPar := range newPars {
if _, ok := partNames[newPar.Name.L]; ok {
return ErrSameNamePartition.GenWithStackByArgs(newPar.Name)
}
partNames[newPar.Name.L] = struct{}{}
}
return nil
}
func checkAddPartitionNameUnique(tbInfo *model.TableInfo, pi *model.PartitionInfo) error {
partNames := make(map[string]struct{})
if tbInfo.Partition != nil {
oldPars := tbInfo.Partition.Definitions
for _, oldPar := range oldPars {
partNames[oldPar.Name.L] = struct{}{}
}
}
newPars := pi.Definitions
for _, newPar := range newPars {
if _, ok := partNames[newPar.Name.L]; ok {
return ErrSameNamePartition.GenWithStackByArgs(newPar.Name)
}
partNames[newPar.Name.L] = struct{}{}
}
return nil
}
func checkAndOverridePartitionID(newTableInfo, oldTableInfo *model.TableInfo) error {
// If any old partitionInfo has lost, that means the partition ID lost too, so did the data, repair failed.
if newTableInfo.Partition == nil {
return nil
}
if oldTableInfo.Partition == nil {
return ErrRepairTableFail.GenWithStackByArgs("Old table doesn't have partitions")
}
if newTableInfo.Partition.Type != oldTableInfo.Partition.Type {
return ErrRepairTableFail.GenWithStackByArgs("Partition type should be the same")
}
// Check whether partitionType is hash partition.
if newTableInfo.Partition.Type == model.PartitionTypeHash {
if newTableInfo.Partition.Num != oldTableInfo.Partition.Num {
return ErrRepairTableFail.GenWithStackByArgs("Hash partition num should be the same")
}
}
for i, newOne := range newTableInfo.Partition.Definitions {
found := false
for _, oldOne := range oldTableInfo.Partition.Definitions {
// Fix issue 17952 which wanna substitute partition range expr.
// So eliminate stringSliceEqual(newOne.LessThan, oldOne.LessThan) here.
if newOne.Name.L == oldOne.Name.L {
newTableInfo.Partition.Definitions[i].ID = oldOne.ID
found = true
break
}
}
if !found {
return ErrRepairTableFail.GenWithStackByArgs("Partition " + newOne.Name.L + " has lost")
}
}
return nil
}
func stringSliceEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
if len(a) == 0 {
return true
}
// Accelerate the compare by eliminate index bound check.
b = b[:len(a)]
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
// hasTimestampField derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L387
func hasTimestampField(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) (bool, error) {
partCols, err := checkPartitionColumns(tblInfo, expr)
if err != nil {
return false, err
}
for _, c := range partCols {
if c.FieldType.Tp == mysql.TypeTimestamp {
return true, nil
}
}
return false, nil
}
// hasDateField derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L399
func hasDateField(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) (bool, error) {
partCols, err := checkPartitionColumns(tblInfo, expr)
if err != nil {
return false, err
}
for _, c := range partCols {
if c.FieldType.Tp == mysql.TypeDate || c.FieldType.Tp == mysql.TypeDatetime {
return true, nil
}
}
return false, nil
}
// hasTimeField derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L412
func hasTimeField(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) (bool, error) {
partCols, err := checkPartitionColumns(tblInfo, expr)
if err != nil {
return false, err
}
for _, c := range partCols {
if c.FieldType.Tp == mysql.TypeDatetime || c.FieldType.Tp == mysql.TypeDuration {
return true, nil
}
}
return false, nil
}
// defaultTimezoneDependent derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L445
// We assume the result of any function that has a TIMESTAMP argument to be
// timezone-dependent, since a TIMESTAMP value in both numeric and string
// contexts is interpreted according to the current timezone.
// The only exception is UNIX_TIMESTAMP() which returns the internal
// representation of a TIMESTAMP argument verbatim, and thus does not depend on
// the timezone.
func defaultTimezoneDependent(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) (bool, error) {
v, err := hasTimestampField(ctx, tblInfo, expr)
if err != nil {
return false, err
}
return !v, nil
}
func checkPartitionFuncCallValid(ctx sessionctx.Context, tblInfo *model.TableInfo, expr *ast.FuncCallExpr) error {
// We assume the result of any function that has a TIMESTAMP argument to be
// timezone-dependent, since a TIMESTAMP value in both numeric and string
// contexts is interpreted according to the current timezone.
// The only exception is UNIX_TIMESTAMP() which returns the internal
// representation of a TIMESTAMP argument verbatim, and thus does not depend on
// the timezone.
// See https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L445
if expr.FnName.L != ast.UnixTimestamp {
for _, arg := range expr.Args {
if colName, ok := arg.(*ast.ColumnNameExpr); ok {
col := findColumnByName(colName.Name.Name.L, tblInfo)
if col == nil {
return ErrBadField.GenWithStackByArgs(colName.Name.Name.O, "expression")
}
if ok && col.FieldType.Tp == mysql.TypeTimestamp {
return errors.Trace(errWrongExprInPartitionFunc)
}
}
}
}
// check function which allowed in partitioning expressions
// see https://dev.mysql.com/doc/mysql-partitioning-excerpt/5.7/en/partitioning-limitations-functions.html
switch expr.FnName.L {
// Mysql don't allow creating partitions with expressions with non matching
// arguments as a (sub)partitioning function,
// but we want to allow such expressions when opening existing tables for
// easier maintenance. This exception should be deprecated at some point in future so that we always throw an error.
// See https://github.com/mysql/mysql-server/blob/5.7/sql/sql_partition.cc#L1072
case ast.Day, ast.DayOfMonth, ast.DayOfWeek, ast.DayOfYear, ast.Month, ast.Quarter, ast.ToDays, ast.ToSeconds,
ast.Weekday, ast.Year, ast.YearWeek:
return checkResultOK(hasDateField(ctx, tblInfo, expr))
case ast.Hour, ast.MicroSecond, ast.Minute, ast.Second, ast.TimeToSec:
return checkResultOK(hasTimeField(ctx, tblInfo, expr))
case ast.UnixTimestamp:
if len(expr.Args) != 1 {
return errors.Trace(errWrongExprInPartitionFunc)
}
col, err := expression.RewriteSimpleExprWithTableInfo(ctx, tblInfo, expr.Args[0])
if err != nil {
return errors.Trace(err)
}
if col.GetType().Tp != mysql.TypeTimestamp {
return errors.Trace(errWrongExprInPartitionFunc)
}
return nil
case ast.Abs, ast.Ceiling, ast.DateDiff, ast.Extract, ast.Floor, ast.Mod:
for _, arg := range expr.Args {
if err := checkPartitionExprValid(ctx, tblInfo, arg); err != nil {
return err
}
}
return nil
}
return errors.Trace(ErrPartitionFunctionIsNotAllowed)
}
// checkPartitionExprValid checks partition expression validly.
func checkPartitionExprValid(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) error {
switch v := expr.(type) {
case *ast.FuncCastExpr, *ast.CaseExpr, *ast.SubqueryExpr, *ast.WindowFuncExpr, *ast.RowExpr, *ast.DefaultExpr, *ast.ValuesExpr:
return errors.Trace(ErrPartitionFunctionIsNotAllowed)
case *ast.FuncCallExpr:
return checkPartitionFuncCallValid(ctx, tblInfo, v)
case *ast.BinaryOperationExpr:
// The DIV operator (opcode.IntDiv) is also supported; the / operator ( opcode.Div ) is not permitted.
// see https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations.html
switch v.Op {
case opcode.Or, opcode.And, opcode.Xor, opcode.LeftShift, opcode.RightShift, opcode.BitNeg, opcode.Div:
return errors.Trace(ErrPartitionFunctionIsNotAllowed)
default:
if err := checkPartitionExprValid(ctx, tblInfo, v.L); err != nil {
return errors.Trace(err)
}
if err := checkPartitionExprValid(ctx, tblInfo, v.R); err != nil {
return errors.Trace(err)
}
}
return nil
case *ast.UnaryOperationExpr:
if v.Op == opcode.BitNeg {
return errors.Trace(ErrPartitionFunctionIsNotAllowed)
}
if err := checkPartitionExprValid(ctx, tblInfo, v.V); err != nil {
return errors.Trace(err)
}
return nil
case *ast.ParenthesesExpr:
return checkPartitionExprValid(ctx, tblInfo, v.Expr)
}
return nil
}
// checkPartitionFuncValid checks partition function validly.
func checkPartitionFuncValid(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) error {
err := checkPartitionExprValid(ctx, tblInfo, expr)
if err != nil {
return err
}
// check constant.
_, err = checkPartitionColumns(tblInfo, expr)
return err
}
// checkResultOK derives from https://github.com/mysql/mysql-server/blob/5.7/sql/item_timefunc
// For partition tables, mysql do not support Constant, random or timezone-dependent expressions
// Based on mysql code to check whether field is valid, every time related type has check_valid_arguments_processor function.
func checkResultOK(ok bool, err error) error {
if err != nil {
return err
}
if !ok {
return errors.Trace(errWrongExprInPartitionFunc)
}
return nil
}
func checkPartitionColumns(tblInfo *model.TableInfo, expr ast.ExprNode) ([]*model.ColumnInfo, error) {
var buf strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &buf)
err := expr.Restore(restoreCtx)
if err != nil {
return nil, errors.Trace(err)
}
partCols, err := extractPartitionColumns(buf.String(), tblInfo)
if err != nil {
return nil, err
}
if len(partCols) == 0 {
return nil, errors.Trace(errWrongExprInPartitionFunc)
}
return partCols, nil
}
// checkPartitionFuncType checks partition function return type.
func checkPartitionFuncType(ctx sessionctx.Context, s *ast.CreateTableStmt, tblInfo *model.TableInfo) error {
if s.Partition.Expr == nil {
return nil
}
var buf strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &buf)
if err := s.Partition.Expr.Restore(restoreCtx); err != nil {
return errors.Trace(err)
}
exprStr := buf.String()
if s.Partition.Tp == model.PartitionTypeRange || s.Partition.Tp == model.PartitionTypeHash {
// if partition by columnExpr, check the column type
if _, ok := s.Partition.Expr.(*ast.ColumnNameExpr); ok {
for _, col := range tblInfo.Columns {
name := strings.Replace(col.Name.String(), ".", "`.`", -1)
// Range partitioning key supported types: tinyint, smallint, mediumint, int and bigint.
if !validRangePartitionType(col) && fmt.Sprintf("`%s`", name) == exprStr {
return errors.Trace(ErrNotAllowedTypeInPartition.GenWithStackByArgs(exprStr))
}
}
}
}
e, err := expression.ParseSimpleExprWithTableInfo(ctx, exprStr, tblInfo)
if err != nil {
return errors.Trace(err)
}
if e.GetType().EvalType() == types.ETInt {
return nil
}
if s.Partition.Tp == model.PartitionTypeHash {
if _, ok := s.Partition.Expr.(*ast.ColumnNameExpr); ok {
return ErrNotAllowedTypeInPartition.GenWithStackByArgs(exprStr)
}
}
return ErrPartitionFuncNotAllowed.GenWithStackByArgs("PARTITION")
}
// checkCreatePartitionValue checks whether `less than value` is strictly increasing for each partition.
// Side effect: it may simplify the partition range definition from a constant expression to an integer.
func checkCreatePartitionValue(ctx sessionctx.Context, tblInfo *model.TableInfo) error {
pi := tblInfo.Partition
defs := pi.Definitions
if len(defs) == 0 {
return nil
}
cols := tblInfo.Columns
if strings.EqualFold(defs[len(defs)-1].LessThan[0], partitionMaxValue) {
defs = defs[:len(defs)-1]
}
isUnsignedBigint := isRangePartitionColUnsignedBigint(cols, pi)
var prevRangeValue interface{}
for i := 0; i < len(defs); i++ {
if strings.EqualFold(defs[i].LessThan[0], partitionMaxValue) {
return errors.Trace(ErrPartitionMaxvalue)
}
currentRangeValue, fromExpr, err := getRangeValue(ctx, defs[i].LessThan[0], isUnsignedBigint)
if err != nil {
return errors.Trace(err)
}
if fromExpr {
// Constant fold the expression.
defs[i].LessThan[0] = fmt.Sprintf("%d", currentRangeValue)
}
if i == 0 {
prevRangeValue = currentRangeValue
continue
}
if isUnsignedBigint {
if currentRangeValue.(uint64) <= prevRangeValue.(uint64) {
return errors.Trace(ErrRangeNotIncreasing)
}
} else {
if currentRangeValue.(int64) <= prevRangeValue.(int64) {
return errors.Trace(ErrRangeNotIncreasing)
}
}
prevRangeValue = currentRangeValue
}
return nil
}
// getRangeValue gets an integer from the range value string.
// The returned boolean value indicates whether the input string is a constant expression.
func getRangeValue(ctx sessionctx.Context, str string, unsignedBigint bool) (interface{}, bool, error) {
// Unsigned bigint was converted to uint64 handle.
if unsignedBigint {
if value, err := strconv.ParseUint(str, 10, 64); err == nil {
return value, false, nil
}
e, err1 := expression.ParseSimpleExprWithTableInfo(ctx, str, &model.TableInfo{})
if err1 != nil {
return 0, false, err1
}
res, isNull, err2 := e.EvalInt(ctx, chunk.Row{})
if err2 == nil && !isNull {
return uint64(res), true, nil
}
} else {
if value, err := strconv.ParseInt(str, 10, 64); err == nil {
return value, false, nil
}
// The range value maybe not an integer, it could be a constant expression.
// For example, the following two cases are the same:
// PARTITION p0 VALUES LESS THAN (TO_SECONDS('2004-01-01'))
// PARTITION p0 VALUES LESS THAN (63340531200)
e, err1 := expression.ParseSimpleExprWithTableInfo(ctx, str, &model.TableInfo{})
if err1 != nil {
return 0, false, err1
}
res, isNull, err2 := e.EvalInt(ctx, chunk.Row{})
if err2 == nil && !isNull {
return res, true, nil
}
}
return 0, false, ErrNotAllowedTypeInPartition.GenWithStackByArgs(str)
}
// validRangePartitionType checks the type supported by the range partitioning key.
func validRangePartitionType(col *model.ColumnInfo) bool {
switch col.FieldType.EvalType() {
case types.ETInt:
return true
default:
return false
}
}
// checkDropTablePartition checks if the partition exists and does not allow deleting the last existing partition in the table.
func checkDropTablePartition(meta *model.TableInfo, partLowerNames []string) error {
pi := meta.Partition
if pi.Type != model.PartitionTypeRange && pi.Type != model.PartitionTypeList {
return errOnlyOnRangeListPartition.GenWithStackByArgs("DROP")
}
oldDefs := pi.Definitions
for _, pn := range partLowerNames {
found := false
for _, def := range oldDefs {
if def.Name.L == pn {
found = true
break
}
}
if !found {
return errors.Trace(ErrDropPartitionNonExistent.GenWithStackByArgs(pn))
}
}
if len(oldDefs) == len(partLowerNames) {
return errors.Trace(ErrDropLastPartition)
}
return nil
}
// removePartitionInfo each ddl job deletes a partition.
func removePartitionInfo(tblInfo *model.TableInfo, partLowerNames []string) []int64 {
oldDefs := tblInfo.Partition.Definitions
newDefs := make([]model.PartitionDefinition, 0, len(oldDefs)-len(partLowerNames))
pids := make([]int64, 0, len(partLowerNames))
// consider using a map to probe partLowerNames if too many partLowerNames
for i := range oldDefs {
found := false
for _, partName := range partLowerNames {
if oldDefs[i].Name.L == partName {
found = true
break
}
}
if found {
pids = append(pids, oldDefs[i].ID)
} else {
newDefs = append(newDefs, oldDefs[i])
}
}
tblInfo.Partition.Definitions = newDefs
return pids
}
func getPartitionDef(tblInfo *model.TableInfo, partName string) (index int, def *model.PartitionDefinition, _ error) {
defs := tblInfo.Partition.Definitions
for i := 0; i < len(defs); i++ {
if strings.EqualFold(defs[i].Name.L, strings.ToLower(partName)) {
return i, &(defs[i]), nil
}
}
return index, nil, table.ErrUnknownPartition.GenWithStackByArgs(partName, tblInfo.Name.O)
}
func buildPlacementDropRules(schemaID, tableID int64, partitionIDs []int64) []*placement.RuleOp {
rules := make([]*placement.RuleOp, 0, len(partitionIDs))
for _, partitionID := range partitionIDs {
rules = append(rules, &placement.RuleOp{
Action: placement.RuleOpDel,
DeleteByIDPrefix: true,
Rule: &placement.Rule{
GroupID: placement.RuleDefaultGroupID,
ID: fmt.Sprintf("%d_t%d_p%d", schemaID, tableID, partitionID),
},
})
}
return rules
}
// onDropTablePartition deletes old partition meta.
func onDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) {
var partNames []string
if err := job.DecodeArgs(&partNames); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
var physicalTableIDs []int64
if job.Type == model.ActionAddTablePartition {
// It is rollbacked from adding table partition, just remove addingDefinitions from tableInfo.
physicalTableIDs = rollbackAddingPartitionInfo(tblInfo)
} else {
// If an error occurs, it returns that it cannot delete all partitions or that the partition doesn't exist.
err = checkDropTablePartition(tblInfo, partNames)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
physicalTableIDs = removePartitionInfo(tblInfo, partNames)
}
rules := buildPlacementDropRules(job.SchemaID, tblInfo.ID, physicalTableIDs)
err = infosync.UpdatePlacementRules(nil, rules)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to notify PD the placement rules")
}
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
if job.IsRollingback() {
job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo)
} else {
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
}
// A background job will be created to delete old partition data.
job.Args = []interface{}{physicalTableIDs}
return ver, nil
}
func buildPlacementTruncateRules(rules []*placement.RuleOp, schemaID, tableID, jobID int64, oldIDs []int64, newPartitions []model.PartitionDefinition) []*placement.RuleOp {
newRules := make([]*placement.RuleOp, 0, len(oldIDs))
for i, oldID := range oldIDs {
prefix := fmt.Sprintf("%d_t%d_p%d", schemaID, tableID, oldID)
for _, rule := range rules {
if strings.HasPrefix(rule.ID, prefix) {
// delete the old rule
newRules = append(newRules, &placement.RuleOp{
Action: placement.RuleOpDel,
Rule: &placement.Rule{
GroupID: placement.RuleDefaultGroupID,
ID: rule.ID,
},
})
// add the new rule
rule.Action = placement.RuleOpAdd
rule.ID = fmt.Sprintf("%d_t%d_p%d_%s_%d_%d", schemaID, tableID, newPartitions[i].ID, rule.Role, jobID, i)
newRules = append(newRules, rule)
break
}
}
}
return newRules
}
// onTruncateTablePartition truncates old partition meta.
func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, error) {
var ver int64
var oldIDs []int64
if err := job.DecodeArgs(&oldIDs); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
pi := tblInfo.GetPartitionInfo()
if pi == nil {
return ver, errors.Trace(ErrPartitionMgmtOnNonpartitioned)
}
newPartitions := make([]model.PartitionDefinition, 0, len(oldIDs))
for _, oldID := range oldIDs {
for i := 0; i < len(pi.Definitions); i++ {
def := &pi.Definitions[i]
if def.ID == oldID {
pid, err1 := t.GenGlobalID()
if err != nil {
return ver, errors.Trace(err1)
}
def.ID = pid
// Shallow copy only use the def.ID in event handle.
newPartitions = append(newPartitions, *def)
break
}
}
} | // Clear the tiflash replica available status.
if tblInfo.TiFlashReplica != nil {
tblInfo.TiFlashReplica.Available = false
// Set partition replica become unavailable.
for _, oldID := range oldIDs {
for i, id := range tblInfo.TiFlashReplica.AvailablePartitionIDs {
if id == oldID {
newIDs := tblInfo.TiFlashReplica.AvailablePartitionIDs[:i]
newIDs = append(newIDs, tblInfo.TiFlashReplica.AvailablePartitionIDs[i+1:]...)
tblInfo.TiFlashReplica.AvailablePartitionIDs = newIDs
break
}
}
}
}
var rules []*placement.RuleOp
// TODO: maybe add a middle state
rules, err = infosync.GetPlacementRules(nil)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to retrieve placement rules from PD")
}
// TODO: simplify the definition and logic use new PD group bundle API
rules = buildPlacementTruncateRules(rules, job.SchemaID, tblInfo.ID, job.ID, oldIDs, newPartitions)
err = infosync.UpdatePlacementRules(nil, rules)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to notify PD the placement rules")
}
ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
if err != nil {
return ver, errors.Trace(err)
}
// Finish this job.
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo)
asyncNotifyEvent(d, &util.Event{Tp: model.ActionTruncateTablePartition, TableInfo: tblInfo, PartInfo: &model.PartitionInfo{Definitions: newPartitions}})
// A background job will be created to delete old partition data.
job.Args = []interface{}{oldIDs}
return ver, nil
}
// onExchangeTablePartition exchange partition data
func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
var (
// defID only for updateSchemaVersion
defID int64
ptSchemaID int64
ptID int64
partName string
withValidation bool
)
if err := job.DecodeArgs(&defID, &ptSchemaID, &ptID, &partName, &withValidation); err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
ntDbInfo, err := checkSchemaExistAndCancelNotExistJob(t, job)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
nt, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return ver, errors.Trace(err)
}
pt, err := getTableInfo(t, ptID, ptSchemaID)
if err != nil {
if infoschema.ErrDatabaseNotExists.Equal(err) || infoschema.ErrTableNotExists.Equal(err) {
job.State = model.JobStateCancelled
}
return ver, errors.Trace(err)
}
if pt.State != model.StatePublic {
job.State = model.JobStateCancelled
return ver, ErrInvalidDDLState.GenWithStack("table %s is not in public, but %s", pt.Name, pt.State)
}
err = checkExchangePartition(pt, nt)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
err = checkTableDefCompatible(pt, nt)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
index, _, err := getPartitionDef(pt, partName)
if err != nil {
return ver, errors.Trace(err)
}
if withValidation {
err = checkExchangePartitionRecordValidation(w, pt, index, ntDbInfo.Name, nt.Name)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
// partition table base auto id
ptBaseID, err := t.GetAutoTableID(ptSchemaID, pt.ID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
ptRandID, err := t.GetAutoRandomID(ptSchemaID, pt.ID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
// non-partition table base auto id
ntBaseID, err := t.GetAutoTableID(job.SchemaID, nt.ID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
ntRandID, err := t.GetAutoRandomID(job.SchemaID, nt.ID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
_, partDef, err := getPartitionDef(pt, partName)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
tempID := partDef.ID
// exchange table meta id
partDef.ID = nt.ID
if pt.TiFlashReplica != nil {
for i, id := range pt.TiFlashReplica.AvailablePartitionIDs {
if id == tempID {
pt.TiFlashReplica.AvailablePartitionIDs[i] = partDef.ID
break
}
}
}
err = t.UpdateTable(ptSchemaID, pt)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
failpoint.Inject("exchangePartitionErr", func(val failpoint.Value) {
if val.(bool) {
job.State = model.JobStateCancelled
failpoint.Return(ver, errors.New("occur an error after updating partition id"))
}
})
// recreate non-partition table meta info
err = t.DropTableOrView(job.SchemaID, nt.ID, true)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
nt.ID = tempID
err = t.CreateTableOrView(job.SchemaID, nt)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
// both pt and nt set the maximum auto_id between ntBaseID and ptBaseID
if ntBaseID > ptBaseID {
_, err = t.GenAutoTableID(ptSchemaID, pt.ID, ntBaseID-ptBaseID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
_, err = t.GenAutoTableID(job.SchemaID, nt.ID, mathutil.MaxInt64(ptBaseID, ntBaseID))
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
if ntRandID != 0 || ptRandID != 0 {
if ntRandID > ptRandID {
_, err = t.GenAutoRandomID(ptSchemaID, pt.ID, ntRandID-ptRandID)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
_, err = t.GenAutoRandomID(job.SchemaID, nt.ID, mathutil.MaxInt64(ptRandID, ntRandID))
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Trace(err)
}
}
ver, err = updateSchemaVersion(t, job)
if err != nil {
return ver, errors.Trace(err)
}
job.FinishTableJob(model.JobStateDone, model.StateNone, ver, pt)
return ver, nil
}
func checkExchangePartitionRecordValidation(w *worker, pt *model.TableInfo, index int, schemaName, tableName model.CIStr) error {
var sql string
pi := pt.Partition
switch pi.Type {
case model.PartitionTypeHash:
if pi.Num == 1 {
return nil
}
sql = fmt.Sprintf("select 1 from `%s`.`%s` where mod(%s, %d) != %d limit 1", schemaName.L, tableName.L, pi.Expr, pi.Num, index)
case model.PartitionTypeRange:
// Table has only one partition and has the maximum value
if len(pi.Definitions) == 1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) {
return nil
}
// For range expression and range columns
if len(pi.Columns) == 0 {
sql = buildCheckSQLForRangeExprPartition(pi, index, schemaName, tableName)
} else if len(pi.Columns) == 1 {
sql = buildCheckSQLForRangeColumnsPartition(pi, index, schemaName, tableName)
}
default:
return errUnsupportedPartitionType.GenWithStackByArgs(pt.Name.O)
}
var ctx sessionctx.Context
ctx, err := w.sessPool.get()
if err != nil {
return errors.Trace(err)
}
defer w.sessPool.put(ctx)
rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql)
if err != nil {
return errors.Trace(err)
}
rowCount := len(rows)
if rowCount != 0 {
return errors.Trace(ErrRowDoesNotMatchPartition)
}
return nil
}
func buildCheckSQLForRangeExprPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) string {
if index == 0 {
return fmt.Sprintf("select 1 from `%s`.`%s` where %s >= %s limit 1", schemaName.L, tableName.L, pi.Expr, pi.Definitions[index].LessThan[0])
} else if index == len(pi.Definitions)-1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) {
return fmt.Sprintf("select 1 from `%s`.`%s` where %s < %s limit 1", schemaName.L, tableName.L, pi.Expr, pi.Definitions[index-1].LessThan[0])
} else {
return fmt.Sprintf("select 1 from `%s`.`%s` where %s < %s or %s >= %s limit 1", schemaName.L, tableName.L, pi.Expr, pi.Definitions[index-1].LessThan[0], pi.Expr, pi.Definitions[index].LessThan[0])
}
}
func buildCheckSQLForRangeColumnsPartition(pi *model.PartitionInfo, index int, schemaName, tableName model.CIStr) string {
colName := pi.Columns[0].L
if index == 0 {
return fmt.Sprintf("select 1 from `%s`.`%s` where `%s` >= %s limit 1", schemaName.L, tableName.L, colName, pi.Definitions[index].LessThan[0])
} else if index == len(pi.Definitions)-1 && strings.EqualFold(pi.Definitions[index].LessThan[0], partitionMaxValue) {
return fmt.Sprintf("select 1 from `%s`.`%s` where `%s` < %s limit 1", schemaName.L, tableName.L, colName, pi.Definitions[index-1].LessThan[0])
} else {
return fmt.Sprintf("select 1 from `%s`.`%s` where `%s` < %s or `%s` >= %s limit 1", schemaName.L, tableName.L, colName, pi.Definitions[index-1].LessThan[0], colName, pi.Definitions[index].LessThan[0])
}
}
func checkAddPartitionTooManyPartitions(piDefs uint64) error {
if piDefs > uint64(PartitionCountLimit) {
return errors.Trace(ErrTooManyPartitions)
}
return nil
}
func checkNoHashPartitions(ctx sessionctx.Context, partitionNum uint64) error {
if partitionNum == 0 {
return ast.ErrNoParts.GenWithStackByArgs("partitions")
}
return nil
}
func checkNoRangePartitions(partitionNum int) error {
if partitionNum == 0 {
return ast.ErrPartitionsMustBeDefined.GenWithStackByArgs("RANGE")
}
return nil
}
func getPartitionIDs(table *model.TableInfo) []int64 {
if table.GetPartitionInfo() == nil {
return []int64{}
}
physicalTableIDs := make([]int64, 0, len(table.Partition.Definitions))
for _, def := range table.Partition.Definitions {
physicalTableIDs = append(physicalTableIDs, def.ID)
}
return physicalTableIDs
}
// checkPartitioningKeysConstraints checks that the range partitioning key is included in the table constraint.
func checkPartitioningKeysConstraints(sctx sessionctx.Context, s *ast.CreateTableStmt, tblInfo *model.TableInfo) error {
// Returns directly if there are no unique keys in the table.
if len(tblInfo.Indices) == 0 && !tblInfo.PKIsHandle {
return nil
}
var partCols stringSlice
if s.Partition.Expr != nil {
// Parse partitioning key, extract the column names in the partitioning key to slice.
buf := new(bytes.Buffer)
s.Partition.Expr.Format(buf)
partColumns, err := extractPartitionColumns(buf.String(), tblInfo)
if err != nil {
return err
}
partCols = columnInfoSlice(partColumns)
} else if len(s.Partition.ColumnNames) > 0 {
partCols = columnNameSlice(s.Partition.ColumnNames)
} else {
// TODO: Check keys constraints for list, key partition type and so on.
return nil
}
// Checks that the partitioning key is included in the constraint.
// Every unique key on the table must use every column in the table's partitioning expression.
// See https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations-partitioning-keys-unique-keys.html
for _, index := range tblInfo.Indices {
if index.Unique && !checkUniqueKeyIncludePartKey(partCols, index.Columns) {
if index.Primary {
return ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("PRIMARY KEY")
}
return ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("UNIQUE INDEX")
}
}
// when PKIsHandle, tblInfo.Indices will not contain the primary key.
if tblInfo.PKIsHandle {
indexCols := []*model.IndexColumn{{
Name: tblInfo.GetPkName(),
Length: types.UnspecifiedLength,
}}
if !checkUniqueKeyIncludePartKey(partCols, indexCols) {
return ErrUniqueKeyNeedAllFieldsInPf.GenWithStackByArgs("PRIMARY KEY")
}
}
return nil
}
func checkPartitionKeysConstraint(pi *model.PartitionInfo, indexColumns []*model.IndexColumn, tblInfo *model.TableInfo) (bool, error) {
var (
partCols []*model.ColumnInfo
err error
)
// The expr will be an empty string if the partition is defined by:
// CREATE TABLE t (...) PARTITION BY RANGE COLUMNS(...)
if partExpr := pi.Expr; partExpr != "" {
// Parse partitioning key, extract the column names in the partitioning key to slice.
partCols, err = extractPartitionColumns(partExpr, tblInfo)
if err != nil {
return false, err
}
} else {
partCols = make([]*model.ColumnInfo, 0, len(pi.Columns))
for _, col := range pi.Columns {
colInfo := getColumnInfoByName(tblInfo, col.L)
if colInfo == nil {
return false, infoschema.ErrColumnNotExists.GenWithStackByArgs(col, tblInfo.Name)
}
partCols = append(partCols, colInfo)
}
}
// In MySQL, every unique key on the table must use every column in the table's partitioning expression.(This
// also includes the table's primary key.)
// In TiDB, global index will be built when this constraint is not satisfied and EnableGlobalIndex is set.
// See https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations-partitioning-keys-unique-keys.html
return checkUniqueKeyIncludePartKey(columnInfoSlice(partCols), indexColumns), nil
}
type columnNameExtractor struct {
extractedColumns []*model.ColumnInfo
tblInfo *model.TableInfo
err error
}
func (cne *columnNameExtractor) Enter(node ast.Node) (ast.Node, bool) {
return node, false
}
func (cne *columnNameExtractor) Leave(node ast.Node) (ast.Node, bool) {
if c, ok := node.(*ast.ColumnNameExpr); ok {
info := findColumnByName(c.Name.Name.L, cne.tblInfo)
if info != nil {
cne.extractedColumns = append(cne.extractedColumns, info)
return node, true
}
cne.err = ErrBadField.GenWithStackByArgs(c.Name.Name.O, "expression")
return nil, false
}
return node, true
}
func findColumnByName(colName string, tblInfo *model.TableInfo) *model.ColumnInfo {
for _, info := range tblInfo.Columns {
if info.Name.L == colName {
return info
}
}
return nil
}
func extractPartitionColumns(partExpr string, tblInfo *model.TableInfo) ([]*model.ColumnInfo, error) {
partExpr = "select " + partExpr
stmts, _, err := parser.New().Parse(partExpr, "", "")
if err != nil {
return nil, errors.Trace(err)
}
extractor := &columnNameExtractor{
tblInfo: tblInfo,
extractedColumns: make([]*model.ColumnInfo, 0),
}
stmts[0].Accept(extractor)
if extractor.err != nil {
return nil, errors.Trace(extractor.err)
}
return extractor.extractedColumns, nil
}
// stringSlice is defined for checkUniqueKeyIncludePartKey.
// if Go supports covariance, the code shouldn't be so complex.
type stringSlice interface {
Len() int
At(i int) string
}
// checkUniqueKeyIncludePartKey checks that the partitioning key is included in the constraint.
func checkUniqueKeyIncludePartKey(partCols stringSlice, idxCols []*model.IndexColumn) bool {
for i := 0; i < partCols.Len(); i++ {
partCol := partCols.At(i)
idxCol := findColumnInIndexCols(partCol, idxCols)
if idxCol == nil {
// Partition column is not found in the index columns.
return false
}
if idxCol.Length > 0 {
// The partition column is found in the index columns, but the index column is a prefix index
return false
}
}
return true
}
// columnInfoSlice implements the stringSlice interface.
type columnInfoSlice []*model.ColumnInfo
func (cis columnInfoSlice) Len() int {
return len(cis)
}
func (cis columnInfoSlice) At(i int) string {
return cis[i].Name.L
}
// columnNameSlice implements the stringSlice interface.
type columnNameSlice []*ast.ColumnName
func (cns columnNameSlice) Len() int {
return len(cns)
}
func (cns columnNameSlice) At(i int) string {
return cns[i].Name.L
}
// isRangePartitionColUnsignedBigint returns true if the partitioning key column type is unsigned bigint type.
func isRangePartitionColUnsignedBigint(cols []*model.ColumnInfo, pi *model.PartitionInfo) bool {
for _, col := range cols {
isUnsigned := col.Tp == mysql.TypeLonglong && mysql.HasUnsignedFlag(col.Flag)
if isUnsigned && strings.Contains(strings.ToLower(pi.Expr), col.Name.L) {
return true
}
}
return false
}
// truncateTableByReassignPartitionIDs reassigns new partition ids.
func truncateTableByReassignPartitionIDs(t *meta.Meta, tblInfo *model.TableInfo) error {
newDefs := make([]model.PartitionDefinition, 0, len(tblInfo.Partition.Definitions))
for _, def := range tblInfo.Partition.Definitions {
pid, err := t.GenGlobalID()
if err != nil {
return errors.Trace(err)
}
newDef := def
newDef.ID = pid
newDefs = append(newDefs, newDef)
}
tblInfo.Partition.Definitions = newDefs
return nil
}
func onAlterTablePartition(t *meta.Meta, job *model.Job) (int64, error) {
var partitionID int64
var rules []*placement.RuleOp
err := job.DecodeArgs(&partitionID, &rules)
if err != nil {
job.State = model.JobStateCancelled
return 0, errors.Trace(err)
}
tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID)
if err != nil {
return 0, err
}
ptInfo := tblInfo.GetPartitionInfo()
if ptInfo.GetNameByID(partitionID) == "" {
job.State = model.JobStateCancelled
return 0, errors.Trace(table.ErrUnknownPartition.GenWithStackByArgs("drop?", tblInfo.Name.O))
}
for i, rule := range rules {
if rule.Action == placement.RuleOpDel {
rule.ID = fmt.Sprintf("%d_t%d_p%d_%s", job.SchemaID, tblInfo.ID, partitionID, rule.Role)
} else {
rule.ID = fmt.Sprintf("%d_t%d_p%d_%s_%d_%d", job.SchemaID, tblInfo.ID, partitionID, rule.Role, job.ID, i)
}
}
ver, err := t.GetSchemaVersion()
if err != nil {
return ver, errors.Trace(err)
}
err = infosync.UpdatePlacementRules(nil, rules)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to notify PD the placement rules")
}
job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo)
return ver, nil
} | if len(newPartitions) == 0 {
return ver, table.ErrUnknownPartition.GenWithStackByArgs("drop?", tblInfo.Name.O)
}
|
mod.rs | mod and;
mod and_then;
mod boxed;
mod map;
mod map_err;
mod or;
mod or_else;
mod recover;
mod service;
mod unify;
mod unit;
mod wrap;
use futures::{future, Future, IntoFuture};
pub(crate) use ::generic::{Combine, Either, Func, HList, One, one, Tuple};
use ::reject::{CombineRejection, Reject, Rejection};
use ::route::{self, Route};
pub(crate) use self::and::And;
use self::and_then::AndThen;
pub use self::boxed::BoxedFilter;
pub(crate) use self::map::Map;
pub(crate) use self::map_err::MapErr;
pub(crate) use self::or::Or;
use self::or_else::OrElse;
use self::recover::Recover;
use self::unify::Unify;
use self::unit::Unit;
pub(crate) use self::wrap::{WrapSealed, Wrap};
// A crate-private base trait, allowing the actual `filter` method to change
// signatures without it being a breaking change.
pub trait FilterBase {
type Extract: Tuple; // + Send;
type Error: Reject;
type Future: Future<Item=Self::Extract, Error=Self::Error> + Send;
fn filter(&self) -> Self::Future;
// crate-private for now
fn map_err<F, E>(self, fun: F) -> MapErr<Self, F>
where
Self: Sized,
F: Fn(Self::Error) -> E + Clone,
E: ::std::fmt::Debug + Send,
{
MapErr {
filter: self,
callback: fun,
}
}
fn unit(self) -> Unit<Self>
where
Self: Filter<Extract=((),)> + Sized,
{
Unit {
filter: self,
}
}
}
/// This just makes use of rustdoc's ability to make compile_fail tests.
/// This is specifically testing to make sure `Filter::filter` isn't
/// able to be called from outside the crate (since rustdoc tests are
/// compiled as new crates).
///
/// ```compile_fail
/// use warp::Filter;
///
/// let _ = warp::any().filter();
/// ```
pub fn __warp_filter_compilefail_doctest() {
// Duplicate code to make sure the code is otherwise valid.
let _ = ::any().filter();
}
/// Composable request filters.
///
/// A `Filter` can optionally extract some data from a request, combine
/// it with others, mutate it, and return back some value as a reply. The
/// power of `Filter`s come from being able to isolate small subsets, and then
/// chain and reuse them in various parts of your app.
///
/// # Extracting Tuples
///
/// You may notice that several of these filters extract some tuple, often
/// times a tuple of just 1 item! Why?
///
/// If a filter extracts a `(String,)`, that simply means that it
/// extracts a `String`. If you were to `map` the filter, the argument type
/// would be exactly that, just a `String`.
///
/// What is it? It's just some type magic that allows for automatic combining
/// and flattening of tuples. Without it, combining two filters together with
/// `and`, where one extracted `()`, and another `String`, would mean the
/// `map` would be given a single argument of `((), String,)`, which is just
/// no fun.
pub trait Filter: FilterBase {
/// Composes a new `Filter` that requires both this and the other to filter a request.
///
/// Additionally, this will join together the extracted values of both
/// filters, so that `map` and `and_then` receive them as separate arguments.
///
/// If a `Filter` extracts nothing (so, `()`), combining with any other
/// filter will simply discard the `()`. If a `Filter` extracts one or
/// more items, combining will mean it extracts the values of itself
/// combined with the other.
///
/// # Example
///
/// ```
/// use warp::Filter;
///
/// // Match `/hello/:name`...
/// warp::path("hello")
/// .and(warp::path::param::<String>());
/// ```
fn and<F>(self, other: F) -> And<Self, F>
where
Self: Sized,
//Self::Extract: HList + Combine<F::Extract>,
<Self::Extract as Tuple>::HList: Combine<<F::Extract as Tuple>::HList>,
F: Filter + Clone,
F::Error: CombineRejection<Self::Error>,
{
And {
first: self,
second: other,
}
}
/// Composes a new `Filter` of either this or the other filter.
///
/// # Example
///
/// ```
/// use std::net::SocketAddr;
/// use warp::Filter;
///
/// // Match either `/:u32` or `/:socketaddr`
/// warp::path::param::<u32>()
/// .or(warp::path::param::<SocketAddr>());
/// ```
fn or<F>(self, other: F) -> Or<Self, F>
where
Self: Sized,
F: Filter,
F::Error: CombineRejection<Self::Error>,
{
Or {
first: self,
second: other,
}
}
/// Composes this `Filter` with a function receiving the extracted value.
///
///
/// # Example
///
/// ```
/// use warp::Filter;
///
/// // Map `/:id`
/// warp::path::param().map(|id: u64| {
/// format!("Hello #{}", id)
/// });
/// ```
///
/// # `Func`
///
/// The generic `Func` trait is implemented for any function that receives
/// the same arguments as this `Filter` extracts. In practice, this
/// shouldn't ever bother you, and simply makes things feel more natural.
///
/// For example, if three `Filter`s were combined together, suppose one
/// extracts nothing (so `()`), and the other two extract two integers,
/// a function that accepts exactly two integer arguments is allowed.
/// Specifically, any `Fn(u32, u32)`.
///
/// Without `Product` and `Func`, this would be a lot messier. First of
/// all, the `()`s couldn't be discarded, and the tuples would be nested.
/// So, instead, you'd need to pass an `Fn(((), (u32, u32)))`. That's just
/// a single argument. Bleck!
///
/// Even worse, the tuples would shuffle the types around depending on
/// the exact invocation of `and`s. So, `unit.and(int).and(int)` would
/// result in a different extracted type from `unit.and(int.and(int)`,
/// or from `int.and(unit).and(int)`. If you changed around the order
/// of filters, while still having them be semantically equivalent, you'd
/// need to update all your `map`s as well.
///
/// `Product`, `HList`, and `Func` do all the heavy work so that none of
/// this is a bother to you. What's more, the types are enforced at
/// compile-time, and tuple flattening is optimized away to nothing by
/// LLVM.
fn map<F>(self, fun: F) -> Map<Self, F>
where
Self: Sized,
F: Func<Self::Extract> + Clone,
{
Map {
filter: self,
callback: fun,
}
}
/// Composes this `Filter` with a function receiving the extracted value.
///
/// The function should return some `IntoFuture` type.
///
/// The `Error` type of the return `Future` needs be a `Rejection`, which
/// means most futures will need to have their error mapped into one.
///
/// # Example
///
/// ```
/// use warp::Filter;
///
/// // Validate after `/:id`
/// warp::path::param().and_then(|id: u64| {
/// if id != 0 {
/// Ok(format!("Hello #{}", id))
/// } else {
/// Err(warp::reject())
/// }
/// });
/// ```
fn and_then<F>(self, fun: F) -> AndThen<Self, F>
where
Self: Sized,
F: Func<Self::Extract> + Clone,
F::Output: IntoFuture + Send,
<F::Output as IntoFuture>::Error: CombineRejection<Self::Error>,
<F::Output as IntoFuture>::Future: Send,
{
AndThen {
filter: self,
callback: fun,
}
}
/// Compose this `Filter` with a function receiving an error.
///
/// The function should return some `IntoFuture` type yielding the
/// same item and error types.
fn or_else<F>(self, fun: F) -> OrElse<Self, F>
where
Self: Sized,
F: Func<Self::Error>,
F::Output: IntoFuture<Item=Self::Extract, Error=Self::Error> + Send,
<F::Output as IntoFuture>::Future: Send,
{
OrElse {
filter: self,
callback: fun,
}
}
/// Compose this `Filter` with a function receiving an error and
/// returning a *new* type, instead of the *same* type.
///
/// This is useful for "customizing" rejections into new response types.
/// See also the [errors example][ex].
///
/// [ex]: https://github.com/seanmonstar/warp/blob/master/examples/errors.rs
fn recover<F>(self, fun: F) -> Recover<Self, F>
where
Self: Sized,
F: Func<Self::Error>,
F::Output: IntoFuture<Error=Self::Error> + Send,
<F::Output as IntoFuture>::Future: Send,
{
Recover {
filter: self,
callback: fun,
}
}
/// Unifies the extracted value of `Filter`s composed with `or`.
///
/// When a `Filter` extracts some `Either<T, T>`, where both sides
/// are the same type, this combinator can be used to grab the
/// inner value, regardless of which side of `Either` it was. This
/// is useful for values that could be extracted from multiple parts
/// of a request, and the exact place isn't important.
///
/// # Example
///
/// ```rust
/// use std::net::SocketAddr;
/// use warp::Filter;
///
/// let client_ip = warp::header("x-real-ip")
/// .or(warp::header("x-forwarded-for"))
/// .unify()
/// .map(|ip: SocketAddr| {
/// // Get the IP from either header,
/// // and unify into the inner type.
/// });
/// ```
fn unify<T>(self) -> Unify<Self>
where
Self: Filter<Extract=(Either<T, T>,)> + Sized,
T: Tuple,
{
Unify {
filter: self,
}
}
/// Wraps the current filter with some wrapper.
///
/// The wrapper may do some preparation work before starting this filter,
/// and may do post-processing after the filter completes.
///
/// # Example
///
/// ```
/// use warp::Filter;
///
/// let route = warp::any()
/// .map(warp::reply);
///
/// // Wrap the route with a log wrapper.
/// let route = route.with(warp::log("example"));
/// ```
fn with<W>(self, wrapper: W) -> W::Wrapped
where
Self: Sized,
W: Wrap<Self>,
{
wrapper.wrap(self)
}
/// Boxes this filter into a trait object, making it easier to name the type.
///
/// # Example
///
/// ```
/// use warp::Filter;
///
/// fn impl_reply() -> warp::filters::BoxedFilter<(impl warp::Reply,)> {
/// warp::any()
/// .map(warp::reply)
/// .boxed()
/// }
///
/// fn named_i32() -> warp::filters::BoxedFilter<(i32,)> {
/// warp::path::param::<i32>()
/// .boxed()
/// }
///
/// fn named_and() -> warp::filters::BoxedFilter<(i32, String)> {
/// warp::path::param::<i32>()
/// .and(warp::header::<String>("host"))
/// .boxed()
/// }
/// ```
fn boxed(self) -> BoxedFilter<Self::Extract>
where
Self: Sized + Send + Sync + 'static,
Self::Extract: Send,
Rejection: From<Self::Error>,
{
BoxedFilter::new(self)
}
}
impl<T: FilterBase> Filter for T {}
pub trait FilterClone: Filter + Clone {}
impl<T: Filter + Clone> FilterClone for T {}
fn _assert_object_safe() |
// ===== FilterFn =====
pub(crate) fn filter_fn<F, U>(func: F) -> FilterFn<F>
where
F: Fn(&mut Route) -> U,
U: IntoFuture,
U::Item: Tuple,
U::Error: Reject,
{
FilterFn {
func,
}
}
pub(crate) fn filter_fn_one<F, U>(func: F)
-> FilterFn<impl Fn(&mut Route) -> future::Map<U::Future, fn(U::Item) -> (U::Item,)> + Copy>
where
F: Fn(&mut Route) -> U + Copy,
U: IntoFuture,
U::Error: Reject,
{
filter_fn(move |route| {
func(route)
.into_future()
.map(tup_one as _)
})
}
fn tup_one<T>(item: T) -> (T,) {
(item,)
}
#[derive(Copy, Clone)]
#[allow(missing_debug_implementations)]
pub(crate) struct FilterFn<F> {
// TODO: could include a `debug_str: &'static str` to be used in Debug impl
func: F,
}
impl<F, U> FilterBase for FilterFn<F>
where
F: Fn(&mut Route) -> U,
U: IntoFuture,
U::Future: Send,
U::Item: Tuple,
U::Error: Reject,
{
type Extract = U::Item;
type Error = U::Error;
type Future = U::Future;
#[inline]
fn filter(&self) -> Self::Future {
route::with(|route| {
(self.func)(route).into_future()
})
}
}
| {
fn _assert(_f: &Filter<
Extract=(),
Error=(),
Future=future::FutureResult<(), ()>
>) {}
} |
server.go | package rpcsupport
import (
"log"
"net"
"net/rpc"
)
func | (host string, service interface{}) error {
rpc.Register(service)
listener, err := net.Listen("tcp", host)
if err != nil {
return err
}
for {
conn, err := listener.Accept()
if err != nil {
log.Printf("listener.Accept error:%s", err)
continue
}
go rpc.ServeConn(conn)
}
return nil
}
| ServeRpc |
fundrawtransaction.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx) | fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 SUNCOIN to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main() | |
audio_spec.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio model specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import csv
import io
import os
import tempfile
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core.api.api_util import mm_export
from tensorflow_examples.lite.model_maker.core.task import model_util
import tensorflow_hub as hub
try:
from tflite_support.metadata_writers import audio_classifier as md_writer # pylint: disable=g-import-not-at-top
from tflite_support.metadata_writers import metadata_info as md_info # pylint: disable=g-import-not-at-top
from tflite_support.metadata_writers import writer_utils # pylint: disable=g-import-not-at-top
ENABLE_METADATA = True
except ImportError:
ENABLE_METADATA = False
class MetadataWriter:
"""Helper class to populate Audio Metadata, to be used in `with` statement.
Simple usage for model with two classification heads.
with MetadataWriter(tflite_path) as writer:
writer.add_input(sample_rate=16000, channels=1)
writer.add_output(name='animal_sound', labels=['dog', 'cat'])
writer.add_output(name='speech_command', labels=['yes', 'no'])
writer.save(tflite_path, json_filepath)
`add_output` can also take an ordered dict for multiple locales, example:
writer.add_output(name='animal_sound', labels=collections.OrderedDict([
('en', ['bird', 'cat']),
('fr', ['oiseau', 'chat']) | ]))
"""
def __init__(self, tflite_filepath, **kwargs):
self._model = writer_utils.load_file(tflite_filepath)
self._general_md = md_info.GeneralMd(**kwargs)
self._inputs = []
self._outputs = []
def __enter__(self):
self._temp_folder = tempfile.TemporaryDirectory()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._temp_folder.cleanup()
# Delete the attribute so that it errors out if not in `with` statement.
delattr(self, '_temp_folder')
def add_input(self, **kwargs):
"""Add metadta for the input tensor."""
self._inputs.append(md_info.InputAudioTensorMd(**kwargs))
def add_output(self, name, labels, **kwargs):
"""Add metadata for output tensor in order."""
if isinstance(labels, list):
default_locale = None
labels = collections.OrderedDict([(default_locale, labels)])
return self.add_output(name, labels, **kwargs)
label_files = []
if isinstance(labels, collections.OrderedDict):
for locale, label_list in labels.items():
full_path = os.path.join(
self._temp_folder.name,
'{}_labels_{}.txt'.format(name, locale or 'default'))
model_util.export_labels(full_path, label_list)
label_files.append(
md_info.LabelFileMd(file_path=full_path, locale=locale))
else:
raise ValueError(
'`labels` should be either a list of labels or an ordered dict mapping `locale` -> list of labels. got: {}'
.format(labels))
idx = len(self._outputs)
self._outputs.append(
md_info.ClassificationTensorMd(
name=name,
label_files=label_files,
tensor_type=writer_utils.get_output_tensor_types(self._model)[idx],
**kwargs))
def save(self, tflite_filepath=None, json_filepath=None):
"""Persist model with metadata."""
if len(self._inputs) > 1:
raise ValueError('Only supports single input, got {}'.format(
len(self._inputs)))
input_md = self._inputs[0]
writer = md_writer.MetadataWriter.create_from_metadata_info_for_multihead(
model_buffer=self._model,
general_md=self._general_md,
input_md=input_md,
output_md_list=self._outputs)
if tflite_filepath:
writer_utils.save_file(writer.populate(), tflite_filepath, mode='wb')
if json_filepath:
writer_utils.save_file(
writer.get_metadata_json(), json_filepath, mode='wt')
def _ensure_tf25(version):
if version < '2.5':
raise RuntimeError(
'Audio Tasks requires TF2.5 or later. For example, you can run the '
'following command to install TF2.5.0rc2:\n\n'
'pip3 install tensorflow==2.5.0rc2\n\n')
def _get_tf_version():
return tf.__version__
class BaseSpec(abc.ABC):
"""Base model spec for audio classification."""
def __init__(self, model_dir=None, strategy=None):
_ensure_tf25(_get_tf_version())
self.model_dir = model_dir
if not model_dir:
self.model_dir = tempfile.mkdtemp()
tf.compat.v1.logging.info('Checkpoints are stored in %s', self.model_dir)
self.strategy = strategy or tf.distribute.get_strategy()
@abc.abstractproperty
def target_sample_rate(self):
pass
@abc.abstractmethod
def create_model(self, num_classes, train_whole_model=False):
pass
@abc.abstractmethod
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
pass
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
"""Returns a preprocessed dataset."""
_ = is_training
_ = cache_fn
return ds
def get_default_quantization_config(self):
"""Gets the default quantization configuration."""
return None
def _remove_suffix_if_possible(text, suffix):
return text.rsplit(suffix, 1)[0]
TFJS_MODEL_ROOT = 'https://storage.googleapis.com/tfjs-models/tfjs'
def _load_browser_fft_preprocess_model():
"""Load a model replicating WebAudio's AnalyzerNode.getFloatFrequencyData."""
model_name = 'sc_preproc_model'
file_extension = '.tar.gz'
filename = model_name + file_extension
# Load the preprocessing model, which transforms audio waveform into
# spectrograms (2D image-like representation of sound).
# This model replicates WebAudio's AnalyzerNode.getFloatFrequencyData
# (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).
# It performs short-time Fourier transform (STFT) using a length-2048 Blackman
# window. It opeartes on mono audio at the 44100-Hz sample rate.
filepath = tf.keras.utils.get_file(
filename,
f'{TFJS_MODEL_ROOT}/speech-commands/conversion/{filename}',
cache_subdir='model_maker',
extract=True)
model_path = _remove_suffix_if_possible(filepath, file_extension)
return tf.keras.models.load_model(model_path)
def _load_tfjs_speech_command_model():
"""Download TFJS speech command model for fine-tune."""
origin_root = f'{TFJS_MODEL_ROOT}/speech-commands/v0.3/browser_fft/18w'
files_to_download = [
'metadata.json', 'model.json', 'group1-shard1of2', 'group1-shard2of2'
]
for filename in files_to_download:
filepath = tf.keras.utils.get_file(
filename,
f'{origin_root}/{filename}',
cache_subdir='model_maker/tfjs-sc-model')
model_path = os.path.join(os.path.dirname(filepath), 'model.json')
return model_util.load_tfjs_keras_model(model_path)
@mm_export('audio_classifier.BrowserFftSpec')
class BrowserFFTSpec(BaseSpec):
"""Model good at detecting speech commands, using Browser FFT spectrum."""
EXPECTED_WAVEFORM_LENGTH = 44032
# Information used to populate TFLite metadata.
_MODEL_NAME = 'AudioClassifier'
_MODEL_DESCRIPTION = ('Identify the most prominent type in the audio clip '
'from a known set of categories.')
_MODEL_VERSION = 'v1'
_MODEL_AUTHOR = 'TensorFlow Lite Model Maker'
_MODEL_LICENSES = ('Apache License. Version 2.0 '
'http://www.apache.org/licenses/LICENSE-2.0.')
_SAMPLE_RATE = 44100
_CHANNELS = 1
_INPUT_NAME = 'audio_clip'
_INPUT_DESCRIPTION = 'Input audio clip to be classified.'
_OUTPUT_NAME = 'probability'
_OUTPUT_DESCRIPTION = 'Scores of the labels respectively.'
def __init__(self, model_dir=None, strategy=None):
"""Initialize a new instance for BrowserFFT spec.
Args:
model_dir: The location to save the model checkpoint files.
strategy: An instance of TF distribute strategy. If none, it will use the
default strategy (either SingleDeviceStrategy or the current scoped
strategy.
"""
super(BrowserFFTSpec, self).__init__(model_dir, strategy)
self._preprocess_model = _load_browser_fft_preprocess_model()
self._tfjs_sc_model = _load_tfjs_speech_command_model()
@property
def target_sample_rate(self):
return 44100
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _ensure_length(self, wav, unused_label):
return len(wav) >= self.EXPECTED_WAVEFORM_LENGTH
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _split(self, wav, label):
"""Split the long audio samples into multiple trunks."""
# wav shape: (audio_samples, )
chunks = tf.math.floordiv(len(wav), self.EXPECTED_WAVEFORM_LENGTH)
unused = tf.math.floormod(len(wav), self.EXPECTED_WAVEFORM_LENGTH)
# Drop unused data
wav = wav[:len(wav) - unused]
# Split the audio sample into multiple chunks
wav = tf.reshape(wav, (chunks, 1, self.EXPECTED_WAVEFORM_LENGTH))
return wav, tf.repeat(tf.expand_dims(label, 0), len(wav))
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, EXPECTED_WAVEFORM_LENGTH], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _preprocess(self, x, label):
"""Preprocess the dataset to extract the spectrum."""
# x has shape (1, EXPECTED_WAVEFORM_LENGTH)
spectrum = self._preprocess_model(x)
# y has shape (1, embedding_len)
spectrum = tf.squeeze(spectrum, axis=0)
# y has shape (embedding_len,)
return spectrum, label
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
del is_training
autotune = tf.data.AUTOTUNE
ds = ds.filter(self._ensure_length)
ds = ds.map(self._split, num_parallel_calls=autotune).unbatch()
ds = ds.map(self._preprocess, num_parallel_calls=autotune)
if cache_fn:
ds = cache_fn(ds)
return ds
def create_model(self, num_classes, train_whole_model=False):
if num_classes <= 1:
raise ValueError(
'AudioClassifier expects `num_classes` to be greater than 1')
model = tf.keras.Sequential()
for layer in self._tfjs_sc_model.layers[:-1]:
model.add(layer)
model.add(
tf.keras.layers.Dense(
name='classification_head', units=num_classes,
activation='softmax'))
if not train_whole_model:
# Freeze all but the last layer of the model. The last layer will be
# fine-tuned during transfer learning.
for layer in model.layers[:-1]:
layer.trainable = False
return model
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
model.compile(
optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
hist = model.fit(
train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)
return hist
def create_serving_model(self, training_model):
"""Create a model for serving."""
combined = tf.keras.Sequential()
combined.add(self._preprocess_model)
combined.add(training_model)
# Build the model.
combined.build([None, self.EXPECTED_WAVEFORM_LENGTH])
return combined
def _export_metadata(self, tflite_filepath, index_to_label,
export_metadata_json_file):
"""Export TFLite metadata."""
with MetadataWriter(
tflite_filepath,
name=self._MODEL_NAME,
description=self._MODEL_DESCRIPTION,
version=self._MODEL_VERSION,
author=self._MODEL_AUTHOR,
licenses=self._MODEL_LICENSES) as writer:
writer.add_input(
name=self._INPUT_NAME,
description=self._INPUT_DESCRIPTION,
sample_rate=self._SAMPLE_RATE,
channels=self._CHANNELS)
writer.add_output(
labels=index_to_label,
name=self._OUTPUT_NAME,
description=self._OUTPUT_DESCRIPTION)
json_filepath = (os.path.splitext(tflite_filepath)[0] +
'.json') if export_metadata_json_file else None
writer.save(tflite_filepath, json_filepath)
def export_tflite(self,
model,
tflite_filepath,
with_metadata=True,
export_metadata_json_file=True,
index_to_label=None):
"""Converts the retrained model to tflite format and saves it.
This method overrides the default `CustomModel._export_tflite` method, and
include the pre-processing in the exported TFLite library since support
library can't handle audio tasks yet.
Args:
model: An instance of the keras classification model to be exported.
tflite_filepath: File path to save tflite model.
with_metadata: Whether the output tflite model contains metadata.
export_metadata_json_file: Whether to export metadata in json file. If
True, export the metadata in the same directory as tflite model.Used
only if `with_metadata` is True.
index_to_label: A list that map from index to label class name.
"""
combined = self.create_serving_model(model)
# Sets batch size from None to 1 when converting to tflite.
model_util.set_batch_size(model, batch_size=1)
model_util.export_tflite(
combined, tflite_filepath, quantization_config=None)
# Sets batch size back to None to support retraining later.
model_util.set_batch_size(model, batch_size=None)
if with_metadata:
if not ENABLE_METADATA:
print('Writing Metadata is not support in the installed tflite-support '
'version. Please use tflite-support >= 0.2.*')
else:
self._export_metadata(tflite_filepath, index_to_label,
export_metadata_json_file)
@mm_export('audio_classifier.YamNetSpec')
class YAMNetSpec(BaseSpec):
"""Model good at detecting environmental sounds, using YAMNet embedding."""
EXPECTED_WAVEFORM_LENGTH = 15600 # effectively 0.975s
EMBEDDING_SIZE = 1024
# Information used to populate TFLite metadata.
_MODEL_NAME = 'yamnet/classification'
_MODEL_DESCRIPTION = 'Recognizes sound events'
_MODEL_VERSION = 'v1'
_MODEL_AUTHOR = 'TensorFlow Lite Model Maker'
_MODEL_LICENSES = ('Apache License. Version 2.0 '
'http://www.apache.org/licenses/LICENSE-2.0.')
_SAMPLE_RATE = 16000
_CHANNELS = 1
_INPUT_NAME = 'audio_clip'
_INPUT_DESCRIPTION = 'Input audio clip to be classified.'
_YAMNET_OUTPUT_NAME = 'yamnet'
_YAMNET_OUTPUT_DESCRIPTION = ('Scores in range 0..1.0 for each of the 521 '
'output classes.')
_CUSTOM_OUTPUT_NAME = 'custom'
_CUSTOM_OUTPUT_DESCRIPTION = (
'Scores in range 0..1.0 for each output classes.')
def __init__(
self,
model_dir: None = None,
strategy: None = None,
yamnet_model_handle='https://tfhub.dev/google/yamnet/1',
frame_length=EXPECTED_WAVEFORM_LENGTH, # Window size 0.975 s
frame_step=EXPECTED_WAVEFORM_LENGTH // 2, # Hop of 0.975 /2 s
keep_yamnet_and_custom_heads=True):
"""Initialize a new instance for YAMNet spec.
Args:
model_dir: The location to save the model checkpoint files.
strategy: An instance of TF distribute strategy. If none, it will use the
default strategy (either SingleDeviceStrategy or the current scoped
strategy.
yamnet_model_handle: Path of the TFHub model for retrining.
frame_length: The number of samples in each audio frame. If the audio file
is shorter than `frame_length`, then the audio file will be ignored.
frame_step: The number of samples between two audio frames. This value
should be bigger than `frame_length`.
keep_yamnet_and_custom_heads: Boolean, decides if the final TFLite model
contains both YAMNet and custom trained classification heads. When set
to False, only the trained custom head will be preserved.
"""
super(YAMNetSpec, self).__init__(model_dir, strategy)
self._yamnet_model_handle = yamnet_model_handle
self._yamnet_model = hub.load(yamnet_model_handle)
self._frame_length = frame_length
self._frame_step = frame_step
self._keep_yamnet_and_custom_heads = keep_yamnet_and_custom_heads
@property
def target_sample_rate(self):
return self._SAMPLE_RATE
def create_model(self, num_classes, train_whole_model=False):
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(
input_shape=(YAMNetSpec.EMBEDDING_SIZE),
dtype=tf.float32,
name='embedding'),
tf.keras.layers.Dense(
num_classes, name='classification_head', activation='softmax')
])
return model
def run_classifier(self, model, epochs, train_ds, validation_ds, **kwargs):
model.compile(
optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
hist = model.fit(
train_ds, validation_data=validation_ds, epochs=epochs, **kwargs)
return hist
# Annotate the TF function with input_signature to avoid re-tracing. Otherwise
# the TF function gets retraced everytime the input shape is changed.
# Check https://www.tensorflow.org/api_docs/python/tf/function#args_1 for more
# information.
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _frame(self, wav, label):
clips = tf.signal.frame(
wav, frame_length=self._frame_length, frame_step=self._frame_step)
batch_labels = tf.repeat(tf.expand_dims(label, 0), len(clips))
return clips, batch_labels
@tf.function(input_signature=[
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _extract_embedding(self, wav, label):
_, embeddings, _ = self._yamnet_model(wav) # (chunks, EMBEDDING_SIZE)
embedding = tf.reduce_mean(embeddings, axis=0)
return embedding, label
@tf.function(input_signature=[
tf.TensorSpec(shape=[EMBEDDING_SIZE], dtype=tf.float32),
tf.TensorSpec([], dtype=tf.int32)
])
def _add_noise(self, embedding, label):
noise = tf.random.normal(
embedding.shape, mean=0.0, stddev=.2, dtype=tf.dtypes.float32)
return noise + embedding, label
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
autotune = tf.data.AUTOTUNE
ds = ds.map(self._frame, num_parallel_calls=autotune).unbatch()
ds = ds.map(self._extract_embedding, num_parallel_calls=autotune)
# Cache intermediate results right before data augmentation.
if cache_fn:
ds = cache_fn(ds)
if is_training:
ds = ds.map(self._add_noise, num_parallel_calls=autotune)
return ds
def _yamnet_labels(self):
class_map_path = self._yamnet_model.class_map_path().numpy()
class_map_csv_text = tf.io.read_file(class_map_path).numpy().decode('utf-8')
class_map_csv = io.StringIO(class_map_csv_text)
class_names = [
display_name for (class_index, mid,
display_name) in csv.reader(class_map_csv)
]
class_names = class_names[1:] # Skip CSV header
return class_names
def _export_metadata(self, tflite_filepath, index_to_label,
export_metadata_json_file):
"""Export TFLite metadata."""
with MetadataWriter(
tflite_filepath,
name=self._MODEL_NAME,
description=self._MODEL_DESCRIPTION,
version=self._MODEL_VERSION,
author=self._MODEL_AUTHOR,
licenses=self._MODEL_LICENSES) as writer:
writer.add_input(
name=self._INPUT_NAME,
description=self._INPUT_DESCRIPTION,
sample_rate=self._SAMPLE_RATE,
channels=self._CHANNELS)
if self._keep_yamnet_and_custom_heads:
writer.add_output(
labels=self._yamnet_labels(),
name=self._YAMNET_OUTPUT_NAME,
description=self._YAMNET_OUTPUT_DESCRIPTION)
writer.add_output(
labels=index_to_label,
name=self._CUSTOM_OUTPUT_NAME,
description=self._CUSTOM_OUTPUT_DESCRIPTION)
json_filepath = (os.path.splitext(tflite_filepath)[0] +
'.json') if export_metadata_json_file else None
writer.save(tflite_filepath, json_filepath)
def create_serving_model(self, training_model):
"""Create a model for serving."""
embedding_extraction_layer = hub.KerasLayer(
self._yamnet_model_handle, trainable=False)
keras_input = tf.keras.Input(
shape=(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,),
dtype=tf.float32,
name='audio') # (1, wav)
reshaped_input = tf.reshape(keras_input,
(YAMNetSpec.EXPECTED_WAVEFORM_LENGTH,)) # (wav)
scores, embeddings, _ = embedding_extraction_layer(reshaped_input)
serving_outputs = training_model(embeddings)
if self._keep_yamnet_and_custom_heads:
serving_model = tf.keras.Model(keras_input, [scores, serving_outputs])
else:
serving_model = tf.keras.Model(keras_input, serving_outputs)
return serving_model
def export_tflite(self,
model,
tflite_filepath,
with_metadata=True,
export_metadata_json_file=True,
index_to_label=None):
"""Converts the retrained model to tflite format and saves it.
This method overrides the default `CustomModel._export_tflite` method, and
include the spectrom extraction in the model.
The exported model has input shape (1, number of wav samples)
Args:
model: An instance of the keras classification model to be exported.
tflite_filepath: File path to save tflite model.
with_metadata: Whether the output tflite model contains metadata.
export_metadata_json_file: Whether to export metadata in json file. If
True, export the metadata in the same directory as tflite model. Used
only if `with_metadata` is True.
index_to_label: A list that map from index to label class name.
"""
serving_model = self.create_serving_model(model)
# TODO(b/164229433): Remove SELECT_TF_OPS once changes in the bug are
# released.
model_util.export_tflite(
serving_model, tflite_filepath, quantization_config=None)
if with_metadata:
if not ENABLE_METADATA:
print('Writing Metadata is not support in the current tflite-support '
'version. Please use tflite-support >= 0.2.*')
else:
self._export_metadata(tflite_filepath, index_to_label,
export_metadata_json_file) | |
utils.py | from pathlib import Path
from PIL import Image, ImageOps
def | (file_path, max_height):
size = (max_height, max_height)
thumbnail = ImageOps.fit(Image.open(file_path), size, Image.ANTIALIAS)
thumbnail.save(f'{Path(file_path).stem}_thumb_{max_height}.jpg', 'JPEG')
return thumbnail
| generate_thumbnail |
sphero_adaptor.go | package sphero
import (
"io"
"github.com/Krajiyah/gobot"
"go.bug.st/serial"
)
// Adaptor represents a Connection to a Sphero
type Adaptor struct {
name string
port string
sp io.ReadWriteCloser
connected bool
connect func(string) (io.ReadWriteCloser, error)
}
// NewAdaptor returns a new Sphero Adaptor given a port
func | (port string) *Adaptor {
return &Adaptor{
name: gobot.DefaultName("Sphero"),
port: port,
connect: func(port string) (io.ReadWriteCloser, error) {
return serial.Open(port, &serial.Mode{BaudRate: 115200})
},
}
}
// Name returns the Adaptor's name
func (a *Adaptor) Name() string { return a.name }
// SetName sets the Adaptor's name
func (a *Adaptor) SetName(n string) { a.name = n }
// Port returns the Adaptor's port
func (a *Adaptor) Port() string { return a.port }
// SetPort sets the Adaptor's port
func (a *Adaptor) SetPort(p string) { a.port = p }
// Connect initiates a connection to the Sphero. Returns true on successful connection.
func (a *Adaptor) Connect() (err error) {
sp, e := a.connect(a.Port())
if e != nil {
return e
}
a.sp = sp
a.connected = true
return
}
// Reconnect attempts to reconnect to the Sphero. If the Sphero has an active connection
// it will first close that connection and then establish a new connection.
// Returns true on Successful reconnection
func (a *Adaptor) Reconnect() (err error) {
if a.connected {
a.Disconnect()
}
return a.Connect()
}
// Disconnect terminates the connection to the Sphero. Returns true on successful disconnect.
func (a *Adaptor) Disconnect() error {
if a.connected {
if e := a.sp.Close(); e != nil {
return e
}
a.connected = false
}
return nil
}
// Finalize finalizes the Sphero Adaptor
func (a *Adaptor) Finalize() error {
return a.Disconnect()
}
| NewAdaptor |
cmdcommon.go | package cmdcommon
import (
"context"
"flag"
"fmt"
"net/http"
"net/http/pprof"
"os"
"runtime"
hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/manager"
| )
type HcCmdHelper struct {
Logger logr.Logger
runInLocal bool
Name string
}
func NewHelper(logger logr.Logger, name string) *HcCmdHelper {
return &HcCmdHelper{
Logger: logger,
Name: name,
runInLocal: hcoutil.IsRunModeLocal(),
}
}
// InitiateCommand adds flags registered by imported packages (e.g. glog and
// controller-runtime)
func (h HcCmdHelper) InitiateCommand() {
zapFlagSet := flag.NewFlagSet("zap", flag.ExitOnError)
updateFlagSet(flag.CommandLine, zapFlagSet)
pflag.Parse()
zapLogger := getZapLogger(zapFlagSet)
logf.SetLogger(zapLogger)
h.printVersion()
h.checkNameSpace()
}
const pprofAddrEnvVar = "HCO_PPROF_ADDR"
// Registers a pprof server for cpu and memory profiling the running operator.
func (h HcCmdHelper) RegisterPPROFServer(mgr manager.Manager) error {
pprofAddr := os.Getenv(pprofAddrEnvVar)
if len(pprofAddr) == 0 {
return nil
}
h.Logger.Info("Registering pprof server.")
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
s := &http.Server{Addr: pprofAddr, Handler: mux}
return mgr.Add(manager.RunnableFunc(func(ctx context.Context) error {
errCh := make(chan error)
defer func() {
for range errCh {
} // drain errCh for GC
}()
go func() {
// start http Server
defer close(errCh)
errCh <- s.ListenAndServe()
}()
select {
case err := <-errCh:
return err
case <-ctx.Done():
s.Close()
return nil
}
}))
}
func (h HcCmdHelper) GetWatchNS() string {
if !h.runInLocal {
watchNamespace, err := hcoutil.GetWatchNamespace()
h.ExitOnError(err, "Failed to get watch namespace")
return watchNamespace
}
return ""
}
func (h HcCmdHelper) ExitOnError(err error, message string, keysAndValues ...interface{}) {
if err != nil {
h.Logger.Error(err, message, keysAndValues...)
os.Exit(1)
}
}
func (h HcCmdHelper) IsRunInLocal() bool {
return h.runInLocal
}
func (h HcCmdHelper) AddToScheme(mgr manager.Manager, addToSchemeFuncs []func(*apiruntime.Scheme) error) {
for _, f := range addToSchemeFuncs {
err := f(mgr.GetScheme())
h.ExitOnError(err, "Failed to add to scheme")
}
}
func (h HcCmdHelper) printVersion() {
h.Logger.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
h.Logger.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
}
func (h HcCmdHelper) checkNameSpace() {
// Get the namespace that we should be deployed in.
requiredNS, err := hcoutil.GetOperatorNamespaceFromEnv()
h.ExitOnError(err, "Failed to get namespace from the environment")
// Get the namespace the we are currently deployed in.
var actualNS string
if !h.runInLocal {
var err error
actualNS, err = hcoutil.GetOperatorNamespace(h.Logger)
h.ExitOnError(err, "Failed to get namespace")
} else {
h.Logger.Info("running locally")
actualNS = requiredNS
}
// Allowing the operator to be deployed in OperatorTestNamespace, in addition to OPERATOR_NAMESPACE env var,
// to unblock its publish in OperatorHub.io
nsAllowList := []string{requiredNS, hcoutil.OperatorTestNamespace, hcoutil.OperatorHubNamespace}
if !stringInSlice(actualNS, nsAllowList) {
err := fmt.Errorf("%s is running in different namespace than expected", h.Name)
msg := fmt.Sprintf("Please re-deploy this %s into %v namespace", h.Name, requiredNS)
h.ExitOnError(err, msg, "Expected.Namespace", requiredNS, "Deployed.Namespace", actualNS)
}
}
func getZapLogger(zapFlagSet *flag.FlagSet) logr.Logger {
// Use a zap logr.Logger implementation. If none of the zap
// flags are configured (or if the zap flag set is not being
// used), this defaults to a production zap logger.
zapOpts := &zap.Options{}
zapOpts.BindFlags(zapFlagSet)
return zap.New(zap.UseFlagOptions(zapOpts))
}
func updateFlagSet(flags ...*flag.FlagSet) {
for _, f := range flags {
pflag.CommandLine.AddGoFlagSet(f)
}
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
} | "github.com/go-logr/logr"
"github.com/spf13/pflag"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap" |
mergeConfig.ts | import { AxiosRequestConfig } from '../types'
import { isPlainObject, deepMerge } from '../helpers/utils'
const strats = Object.create(null)
// 默认策略如果有配置2则优先使用配置2
function defaultStrat(val1: any, val2: any): any {
return typeof val2 !== 'undefined' ? val2 : val1
}
// 只使用配置2中的参数
function fromVal2Strat(val1: any, val2: any): any {
if (typeof val2 !== 'undefined') {
return val2
}
}
// 针对对象的复杂合并策略
function deepMergeStrat(val1: any, val2: any): any {
if (isPlainObject(val2)) { | 象形式则深拷贝val2
return deepMerge(val1, val2)
} else if (typeof val2 !== 'undefined') {
// 如果不是对象形式也不是undefined 即简单类型则直接返回
return val2
} else if (isPlainObject(val1)) {
return deepMerge(val1)
} else if (typeof val1 !== 'undefined') {
return val1
}
}
const stratKeysFromVal2 = ['url', 'params', 'data']
stratKeysFromVal2.forEach(key => {
strats[key] = fromVal2Strat
})
const stratKeyDeepMerge = ['headers', 'auth']
stratKeyDeepMerge.forEach(key => {
strats[key] = deepMergeStrat
})
// 配置合并的策略
// 如果配置2中的参数是stratKeysFromVal2中列举的 则后者覆盖前者
// 否则使用配置1中的参数
export default function mergeConfig(
config1: AxiosRequestConfig,
config2?: AxiosRequestConfig
): AxiosRequestConfig {
if (!config2) {
config2 = {}
}
// 正对两个config中不同的字段制定不同的合并策略,最终赋值给config
const config = Object.create(null)
// 遍历配置2中的k&v
for (let key in config2) {
mergeField(key)
}
// 遍历配置1中的k&v
for (let key in config1) {
// 且配置1中出现的参数不得出现在配置2中
if (!config2[key]) {
mergeField(key)
}
}
// 这个函数的意义在于 根据传入的key的类型
// 选择合适的合并策略方法
function mergeField(key: string): void {
const strat = strats[key] || defaultStrat
config[key] = strat(config1[key], config2![key])
}
return config
}
|
// 如果是一个对 |
docs.py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/calculation"
# docs_base_url = "https://[org_name].github.io/calculation"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
| context.brand_html = "Calculation" |
|
run_training.py | import argparse
import imp
import importlib
import random
from opentamp.src.policy_hooks.vae.vae_main import MultiProcessMain
def load_config(args, reload_module=None):
config_file = args.config
if config_file != '':
if reload_module is not None:
config_module = reload_module
imp.reload(config_module)
else:
config_module = importlib.import_module('policy_hooks.'+config_file)
config = config_module.config
else:
config_module = None
config = {}
config['use_local'] = not args.remote
config['num_conds'] = args.nconds if args.nconds > 0 else config['num_conds'] if 'num_conds' in config else 1
if 'common' in config:
config['common']['num_conds'] = config['num_conds']
config['num_objs'] = args.nobjs if args.nobjs > 0 else config['num_objs'] if 'num_objs' in config else 1
config['weight_dir'] = config['base_weight_dir'] + str(config['num_objs']) if 'base_weight_dir' in config else args.weight_dir
config['log_timing'] = args.timing
config['hl_timeout'] = 0
config['rollout_server'] = args.rollout_server or args.all_servers
config['vae_server'] = args.vae_server or args.all_servers
config['viewer'] = args.viewer
config['server_id'] = args.server_id if args.server_id != '' else str(random.randint(0,2**32))
config['n_rollout_servers'] = args.n_rollout_servers
config['no_child_process'] = args.no_child_process
config['rollout_len'] = args.rollout_len
config['train_vae'] = args.train_vae
config['unconditional'] = args.unconditional
config['train_reward'] = args.train_reward
config['load_step'] = args.load_step
config['train_params'] = {
'use_recurrent_dynamics': args.use_recurrent_dynamics,
'use_overshooting': args.use_overshooting,
'data_limit': args.train_samples if args.train_samples > 0 else None,
'beta': args.beta,
'overshoot_beta': args.overshoot_beta,
'dist_constraint': args.dist_constraint,
}
return config, config_module
def | (args, reload_module=None):
env_path = args.environment_path
if reload_module is not None:
module = reload_module
imp.reload(module)
else:
module = importlib.import_module(env_path)
env = args.environment
return getattr(module, env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='')
parser.add_argument('-wd', '--weight_dir', type=str, default='')
parser.add_argument('-nf', '--nofull', action='store_true', default=False)
parser.add_argument('-n', '--nconds', type=int, default=0)
parser.add_argument('-o', '--nobjs', type=int, default=0)
# parser.add_argument('-ptt', '--pretrain_timeout', type=int, default=300)
parser.add_argument('-hlt', '--hl_timeout', type=int, default=0)
parser.add_argument('-k', '--killall', action='store_true', default=True)
parser.add_argument('-r', '--remote', action='store_true', default=False)
parser.add_argument('-t', '--timing', action='store_true', default=False)
parser.add_argument('-vae', '--vae_server', action='store_true', default=False)
parser.add_argument('-sim', '--rollout_server', action='store_true', default=False)
parser.add_argument('-all', '--all_servers', action='store_true', default=False)
parser.add_argument('-v', '--viewer', action='store_true', default=False)
parser.add_argument('-id', '--server_id', type=str, default='')
parser.add_argument('-env_path', '--environment_path', type=str, default='')
parser.add_argument('-env', '--environment', type=str, default='')
parser.add_argument('-tamp', '--use_tamp', type=str, default='')
parser.add_argument('-nrs', '--n_rollout_servers', type=int, default=1)
parser.add_argument('-ncp', '--no_child_process', action='store_true', default=False)
parser.add_argument('-rl', '--rollout_len', type=int, default=0)
parser.add_argument('-tv', '--train_vae', action='store_true', default=False)
parser.add_argument('-uncond', '--unconditional', action='store_true', default=False)
parser.add_argument('-tr', '--train_reward', action='store_true', default=False)
parser.add_argument('-loadstep', '--load_step', type=int, default=-1)
parser.add_argument('-beta', '--beta', type=int, default=1)
parser.add_argument('-beta_d', '--overshoot_beta', type=int, default=1)
parser.add_argument('-nts', '--train_samples', type=int, default=-1)
parser.add_argument('-rnn', '--use_recurrent_dynamics', action='store_true', default=False)
parser.add_argument('-over', '--use_overshooting', action='store_true', default=False)
parser.add_argument('-dist', '--dist_constraint', action='store_true', default=False)
args = parser.parse_args()
config, config_module = load_config(args)
if args.config != '':
main = MultiProcessMain(config)
else:
env_cls = load_env(args)
main = MultiProcessMain.no_config_load(env_cls, args.environment, config)
main.start(kill_all=args.killall)
if __name__ == '__main__':
main()
| load_env |
index.js | /**
* The REST entity generator
* @namespace entity
*/
'use strict';
const generator = require ('yeoman-generator'),
to = require ('to-case'),
validators = require ('../util/validators'),
editors = require ('../util/editors');
var index;
require ('harmony-reflect');
module.exports = generator.Base.extend ({
/**
* Generator for adding custom entities
* @class EntityGenerator
* @memberOf entity
*/
constructor: function constructor () {
Reflect.apply (generator.Base, this, arguments);
/**
* Entity definition
* @member {Object} entity.EntityGenerator~entity
* @private
*/
this.entity = {
collectionName: '',
fields: []
};
},
/**
* Intiializes items that cannot be initialized in the constructor
* @function entity.EntitypGenerator~init
*/
init () {
/**
* camelCase app name
* @member {String} entity.EntityGenerator~appCamel
* @private
*/
this.appCamel = to.camel (this.config.get ('cfgName'));
/**
* slug-case app name
* @member {String} entity.EntityGenerator~appSlug
* @private
*/
this.appSlug = to.slug (this.config.get ('cfgName'));
/**
* moment instance | */
this.moment = require ('moment');
},
/**
* Returns true if the currenly prompted field is not a Boolean
* @function entity.EntityGenerator~_isNotBoolean
* @private
* @param {Object} answers - current prompt answers
* @returns {Boolean} true if field is not a Boolean
*/
_isNotBoolean (answers) {
return 'Boolean' !== answers [`type${ index }`];
},
/**
* Returns true if the currenly prompted field is a Date
* @function entity.EntityGenerator~_isDate
* @private
* @param {Object} answers - current prompt answers
* @returns {Boolean} true if field is a Date
*/
_isDate (answers) {
return 'Date' === answers [`type${ index }`];
},
/**
* Returns true if the currenly prompted field is a Number (and not an Integer)
* @function entity.EntityGenerator~_isNumber
* @private
* @param {Object} answers - current prompt answers
* @returns {Boolean} true if field is a Number
*/
_isNumber (answers) {
return 'Number' === answers [`type${ index }`] && !answers [`integer${ index }`];
},
/**
* Returns true if the currenly prompted field is an Integer
* @function entity.EntityGenerator~_isInteger
* @private
* @param {Object} answers - current prompt answers
* @returns {Boolean} true if field is an Integer
*/
_isInteger (answers) {
return 'Number' === answers [`type${ index }`] && answers [`integer${ index }`];
},
/**
* Returns true if the currenly prompted field is an String
* @function entity.EntityGenerator~_isInteger
* @private
* @param {Object} answers - current prompt answers
* @returns {Boolean} true if field is an String
*/
_isString (answers) {
return 'String' === answers [`type${ index }`];
},
/**
* Prompts user for a field
* @function entity.EntityGenerator~_promptField
* @private
* @param {Function} done - async done callback
*/
_promptField (done) {
var prompt = [
{ name: `name${ index }`, message: 'Field Name (leave empty if done)', validate: validators.fieldName }
],
field = {};
this.prompt (prompt, answers => {
if (answers [`name${ index }`]) {
field.name = answers [`name${ index }`];
field.slug = to.slug (answers [`name${ index }`]);
field.camel = to.camel (answers [`name${ index }`]);
prompt = [
{ name: `type${ index }`, message: 'Field Type', type: 'list', choices: [ 'Boolean', 'Date', 'Number', 'String' ] },
{ name: `required${ index }`, message: 'Required', type: 'confirm', when: this._isNotBoolean },
{ name: `min${ index }`, message: 'Minimum Date (MM-DD-YYYY) (leave empty for no minimum)', validate: validators.date, when: this._isDate },
{ name: `max${ index }`, message: 'Maximum Date (MM-DD-YYYY) (leave empty for no maximum)', validate: validators.date, when: this._isDate },
{ name: `integer${ index }`, message: 'Integer', type: 'confirm', when: this._isNumber },
{ name: `min${ index }`, message: 'Minimum Value (leave empty for no minimum)', validate: validators.integer, when: this._isInteger },
{ name: `max${ index }`, message: 'Maximum Value (leave empty for no maximum)', validate: validators.integer, when: this._isInteger },
{ name: `min${ index }`, message: 'Minimum Value (leave empty for no minimum)', validate: validators.number, when: this._isNumber },
{ name: `max${ index }`, message: 'Maximum Value (leave empty for no maximum)', validate: validators.number, when: this._isNumber },
{ name: `min${ index }`, message: 'Minimum Length (leave empty for no minimum)', validate: validators.integer, when: this._isString },
{ name: `max${ index }`, message: 'Maximum Length (leave empty for no maximum)', validate: validators.integer, when: this._isString }
];
this.prompt (prompt, fieldAnswers => {
field.type = fieldAnswers [`type${ index }`];
field.required = fieldAnswers [`required${ index }`];
field.min = fieldAnswers [`min${ index }`];
field.max = fieldAnswers [`max${ index }`];
field.integer = fieldAnswers [`integer${ index }`];
this.entity.fields.push (field);
index++;
this._promptField (done);
});
} else {
done ();
}
});
},
/**
* Generates locale strings for a Date
* @function entity.EntityGenerator~_dateStrings
* @private
* @param {Object} json - object to store strings in
* @param {Object} field - field to generate strings for
*/
_dateStrings (json, field) {
if (field.min) {
json.msg.validate [field.camel].min = `${ field.name } is outside of the allowed range.`;
}
if (field.max) {
json.msg.validate [field.camel].max = `${ field.name } is outside of the allowed range.`;
}
json.msg.validate [field.camel].date = 'Invalid date.';
},
/**
* Generates locale strings for a Number
* @function entity.EntityGenerator~_numberStrings
* @private
* @param {Object} json - object to store strings in
* @param {Object} field - field to generate strings for
*/
_numberStrings (json, field) {
if (field.min) {
json.msg.validate [field.camel].min = `${ field.name } must be greater than or equal to ${ field.min }.`;
}
if (field.max) {
json.msg.validate [field.camel].max = `${ field.name } must be less than or equal to ${ field.max }.`;
}
if (field.integer) {
json.msg.validate [field.camel].pattern = `${ field.name } must be an integer.`;
}
},
/**
* Generates locale strings for a String
* @function entity.EntityGenerator~_stringStrings
* @private
* @param {Object} json - object to store strings in
* @param {Object} field - field to generate strings for
*/
_stringStrings (json, field) {
if (field.min) {
json.msg.validate [field.camel].minlength = `${ field.name } must be greater than or equal to ${ field.min } characters.`;
}
if (field.max) {
json.msg.validate [field.camel].maxlength = `${ field.name } must be less than or equal to ${ field.min } characters.`;
}
},
/**
* Generates entity files for AngularJS
* @function entity.EntityGenerator~_angular
* @private
*/
_angular () {
var json = {
nav: this.entity.collectionName,
title: this.entity.collectionName,
titleNew: `New ${ this.entity.collectionName }`,
titleEdit: `Edit ${ this.entity.collectionName }`,
field: {},
msg: {
error: {
createFailed: 'Create Failed!',
updateFailed: 'Update Failed!',
updateSuccess: 'Update Succeded!'
},
validate: {
}
},
header: {},
btn: {
create: 'Create',
search: 'Search',
cancel: 'Cancel',
save: 'Save',
back: 'Back',
update: 'Update',
delete: 'Delete'
}
},
data = {};
this.entity.fields.forEach (field => {
json.field [field.camel] = field.name;
json.header [field.camel] = field.name;
json.msg.validate [field.camel] = {};
switch (field.type) {
case 'Date':
this._dateStrings (json, field);
break;
case 'Number':
this._numberStrings (json, field);
break;
default:
case 'String':
this._stringStrings (json, field);
break;
}
if (field.required) {
json.msg.validate [field.camel].required = `${ field.name } is required.`;
}
});
editors.appendHtml (this, 'src/web/index.html', '<!-- build:js app/app.min.js -->', '<!-- /build -->', `<script src="app/components/${ this.entity.collectionSlug }/${ this.entity.collectionSlug }.component.js"></script>`);
editors.appendHtml (this,
'src/web/app/components/topnav/topnav.view.html',
'<!-- entity -->',
'<!-- /entity -->',
`<li><a ${ this.appSlug }-authenticated ui-sref="${ this.entity.collectionSlug }" translate="${ this.entity.collectionSlug }.nav">${ this.entity.collectionName }</a></li>`
);
data [this.entity.collectionCamel] = json;
editors.appendJSON (this, 'src/web/assets/locale/locale-en.json', data);
this.template ('component.js', `src/web/app/components/${ this.entity.collectionSlug }/${ this.entity.collectionSlug }.component.js`);
this.template ('view.html', `src/web/app/components/${ this.entity.collectionSlug }/${ this.entity.collectionSlug }.view.html`);
this.template ('test.angular.js', `test/unit/web/app/components/${ this.entity.collectionSlug }/${ this.entity.collectionSlug }.component.js`);
},
/**
* Initial set of user prompts
* @function entity.EntityGenerator~askFor
*/
askFor () {
const done = this.async (),
prompts = [
{ name: 'collectionName', message: 'Collection Name', validate: validators.collectionName }
];
index = 0;
this.prompt (prompts, answers => {
this.entity.collectionName = answers.collectionName;
this.entity.collectionSlug = to.slug (answers.collectionName);
this.entity.collectionPascal = to.pascal (answers.collectionName);
this.entity.collectionCamel = to.camel (answers.collectionName);
this._promptField (done);
});
},
/**
* Generates the entity based on prompt values
* @function entity.EntityGenerator~app
*/
app () {
this.template ('model.js', `src/server/models/${ this.entity.collectionSlug }.js`);
this.template ('route.js', `src/server/routes/${ this.entity.collectionSlug }.js`);
this.template ('test.js', `test/unit/server/routes/${ this.entity.collectionSlug }.js`);
this._angular ();
}
}); | * @member {Object} entity.EntityGenerator~moment
* @private |
mamUtils.js | const Mam = require('./mam.node.js');
const iota = require('./iotaSetup.js');
const Cert = require('./cert.js');
const tools = require('./tools.js');
const now = require('moment');
const debug = process.env.DEBUG || 0;
const localStore = require('./localStore.js');
const AccountStore = localStore.Account;
const ContactStore = localStore.Contact;
const MessageStore = localStore.Message;
const path = require('path');
const mainPath = path.dirname(require.main.filename);
class | {
/* make the callback function into promise */
constructor(callback) {
const self = this;
const databaseInitialize = () => {
Mam.init(iota);
self.accountStore = new AccountStore(path.resolve(mainPath, 'json', 'account.json'), () => {
self.contactStore = new ContactStore(path.resolve(mainPath, 'json', 'contact.json'), () => {
self.messageStore = new MessageStore(path.resolve(mainPath, 'json', 'message.json'), () => {
callback();
});
});
});
};
databaseInitialize();
}
async publishMessage(message, params) {
const packet = { msg: message };
/* add timestamp */
packet.timestamp = now.utc().unix();
const mamState = await this.getActiveMamState(params);
await this.sendMamMsg(packet, params, mamState);
this.messageUpdate(packet, params);
}
async recoverMamMsg(params) {
console.log('message recovering');
const { initRoot, sideKey } = this.getRecoveryProps(params);
const messages = [];
await Mam.fetch(initRoot, 'restricted', sideKey, (trytes) => {
const data = JSON.parse(iota.utils.fromTrytes(tryte));
data.fromSelf = true;
messages.push(data); /* data wrapped message and other things */
});
const store = this.messageStore.findOnly({ id: params.sender });
store.messages[params.receiver] = [].concat(messages);
this.messageStore.update(store);
}
getRecoveryProps(params) {
/* make it restart from first root */
let store = this.contactStore.findOnly({ id: params.sender });
let receiver = store.contacts[params.receiver];
const mamState = deepCopy(receiver.activateMamState);
mamState.channel.start = 0;
Mam.create(mamState, 'INITIALMESSAGE');
return {
initRoot: mamState.channel.next_root,
sideKey: mamState.channel.side_key
};
}
async fetchMessages(uuid) {
const recvPriv = this.accountStore.findOnly({ id: uuid }).sk;
const roots = await Cert.getBundles(uuid, 'M')
.then(async data =>
await Promise.all(data.map(async item => {
try {
const message = JSON.parse(item.message);
const msg = JSON.parse(tools.decryptUTF(message.msg, recvPriv));
const bundles = await Cert.getBundles(msg.id, 'I');
const sendPub = bundles[0].message.pk;
const verified = tools.verify(msg.root, message.sign, sendPub);
/* TODO: signature verification */
if (verified) {
return {
id: msg.id,
root: msg.root,
sideKey: msg.sideKey,
};
}
} catch (err) {
console.log(err);
}
}))
);
console.log('Fetched roots:', roots);
const messages = {};
const contacts = this.contactStore.findOnly({ id: uuid }).contacts;
/* TODO: filter messages in contact */
// var contactRoots = roots.filter(msg => msg.id in contacts)
const contactRoots = roots;
/* fetch from local */
const localmsg = this.messageStore.findOnly({ id: uuid }).messages;
// console.log('local:', localmsg)
await Promise.all(contactRoots.map(async (tx) => {
const id = tx.id;
const root = tx.root;
const sideKey = tx.sideKey;
messages[id] = [];
/* fetch from tangle */
await Mam.fetch(root, 'restricted', sideKey, (tryte) => {
const data = JSON.parse(iota.utils.fromTrytes(tryte));
data.fromSelf = false;
messages[id].push(data); /* data wrapped message and other things */
});
}));
Object.keys(localmsg).map((id) => {
if (id in messages) {
/* sort by timestamp */
messages[id] = combineSortedArray(
messages[id], localmsg[id],
(i, j) => i.timestamp >= j.timestamp,
);
} else {
messages[id] = [].concat(localmsg[id]);
}
});
console.log(messages);
return messages;
}
async sendMamMsg(packet, params, mamState) {
console.log('sendMamMsg');
let recvPub = await Cert.getBundles(params.receiver, 'I');
if (recvPub.length !== 1) { console.log('error: Get initial claim failed'); }
recvPub = recvPub[0].message.pk;
const sendPriv = this.accountStore.findOnly({ id: params.sender }).sk;
const msg = JSON.stringify(packet);
const trytes = iota.utils.toTrytes(msg);
const message = Mam.create(mamState, trytes);
this.updateActiveMamState(params, message.state);
if (debug) {
console.log('Root: ', message.root);
console.log('Address: ', message.address);
console.log('length: ', message.payload.length);
}
const tx = await Mam.attach(message.payload, message.address);
console.log('sendMamMsg finished');
}
addContact(userID, contactorID) {
const store = this.contactStore.findOnly({ id: userID });
if (contactorID in store.contacts) {
console.log('[ERROR] error adding exist contact');
return;
}
store.contacts[contactorID] = {};
this.contactStore.update(store);
}
async initMamClaim(params) {
console.log('initMamClaim');
// console.log(params)
const receiverBundles = await Cert.getBundles(params.receiver, 'I');
const receiverPublicKey = receiverBundles[0].message.pk;
// console.log('recvPub:', receiverPublicKey)
const senderPrivateKey = this.accountStore.findOnly({ id: params.sender }).sk;
// console.log('sendPriv:', senderPrivatyKey)
const state = this.getInitMamState(params);
const root = Mam.getRoot(state);
const sideKey = state.channel.side_key;
// console.log(root)
const info = {
id: params.sender,
root,
/* TODO: find proper key length */
sideKey,
};
const enc = tools.encrypt(JSON.stringify(info), receiverPublicKey);
const packet = {
msg: enc,
sign: tools.sign(root, senderPrivateKey)
};
if (debug) {
console.log('id: ', params.sender);
console.log('root: ', root);
console.log(`len:${JSON.stringify(packet).length}`);
}
const tx = await Cert.attach(JSON.stringify(packet), params.receiver, 'M', null);
}
getInitMamState(params) {
console.log('getInitMamState');
const store = this.contactStore.findOnly({ id: params.sender });
/* init mam state */
let mamState = Mam.init(iota);
mamState = Mam.changeMode(mamState, 'restricted', tools.seedGen(56));
/* update next_root, doesn't care return value */
Mam.create(mamState, 'INITIALMESSAGE');
store.contacts[params.receiver] = {
activeMamState: deepCopy(mamState),
};
if (debug) { console.log('Store updated: ', store); }
this.contactStore.update(store);
return deepCopy(mamState);
}
async getActiveMamState(params) {
console.log('getActiveMamState');
let store = this.contactStore.findOnly({ id: params.sender });
// console.log(store)
let receiver = store.contacts[params.receiver];
if (!(params.receiver in store.contacts)) {
await this.initMamClaim(params);
store = this.contactStore.findOnly({ id: params.sender });
receiver = store.contacts[params.receiver];
}
return deepCopy(receiver.activeMamState);
}
updateActiveMamState(params, mamState) {
const store = this.contactStore.findOnly({ id: params.sender });
store.contacts[params.receiver].activeMamState = deepCopy(mamState);
this.contactStore.update(store);
}
updateBackupMamState(params, bkState) {
const store = this.contactStore.findOnly({ id: params.sender });
store.contacts[params.receiver].backupMamState = deepCopy(bkState);
this.contactStore.update(store);
}
/* temporary workaround */
async createIdentity(packet) {
packet = deepCopy(packet);
delete packet.sk;
await Cert.attach(packet, packet.uuid, 'I');
}
accountUpdate(account) {
this.accountStore.insert(account);
this.contactStore.insert({
id: account.id,
contacts: {},
});
this.messageStore.insert({
id: account.id,
messages: {},
});
}
messageUpdate(packet, params) {
const store = this.messageStore.findOnly({ id: params.sender });
packet.fromSelf = true;
if (!(params.receiver in store.messages)) {
store.messages[params.receiver] = [];
}
store.messages[params.receiver].push(packet);
this.messageStore.update(store);
}
}
function deepCopy(data) {
return JSON.parse(JSON.stringify(data));
}
function combineSortedArray(arr1, arr2, cmp) {
let target = deepCopy(arr1);
let idx = 0;
for (let i = 0; i < arr2.length; idx++) {
if (idx === target.length) {
target = target.concat(arr2.slice(i));
break;
}
if (cmp(target[idx], arr2[i])) {
target.splice(idx, 0, arr2[i]);
i++;
}
}
return target;
}
module.exports = mam;
| mam |
window_settings.rs | use bevy::{prelude::*, window::WindowMode};
/// This example illustrates how to customize the default window settings
fn main() | {
App::build()
.add_resource(WindowDescriptor {
title: "I am a window!".to_string(),
width: 300,
height: 300,
vsync: true,
resizable: false,
mode: WindowMode::Fullscreen { use_size: false },
..Default::default()
})
.add_default_plugins()
.run();
} |
|
structs3.rs | // structs3.rs
// Structs contain data, but can also have logic. In this exercise we have
// defined the Package struct and we want to test some logic attached to it.
// Make the code compile and the tests pass!
// If you have issues execute `rustlings hint structs3`
#[derive(Debug)]
struct Package {
sender_country: String,
recipient_country: String,
weight_in_grams: i32,
}
impl Package {
fn new(sender_country: String, recipient_country: String, weight_in_grams: i32) -> Package {
if weight_in_grams <= 0 {
panic!();
} else {
return Package {
sender_country,
recipient_country,
weight_in_grams,
};
}
}
fn is_international(&self) -> bool {
self.sender_country != self.recipient_country
}
fn get_fees(&self, cents_per_gram: i32) -> i32 {
return self.weight_in_grams * cents_per_gram;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn fail_creating_weightless_package() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Austria");
Package::new(sender_country, recipient_country, -2210);
}
#[test]
fn create_international_package() {
let sender_country = String::from("Spain");
let recipient_country = String::from("Russia");
let package = Package::new(sender_country, recipient_country, 1200);
assert!(package.is_international());
}
#[test]
fn create_local_package() {
let sender_country = String::from("Canada");
let recipient_country = sender_country.clone();
let package = Package::new(sender_country, recipient_country, 1200);
assert!(!package.is_international());
}
#[test]
fn calculate_transport_fees() |
}
| {
let sender_country = String::from("Spain");
let recipient_country = String::from("Spain");
let cents_per_gram = 3;
let package = Package::new(sender_country, recipient_country, 1500);
assert_eq!(package.get_fees(cents_per_gram), 4500);
} |
oauth.go | package oauth
import (
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strings"
)
type HttpClientFactory func() *http.Client
// TokenResponse represents successful token response
type TokenResponse struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
RefreshToken string `json:"refresh_token"`
}
// ErrorResponse repsents a failed response
type ErrorResponse struct {
Error string `json:"error"`
Description string `json:"error_description"`
}
// OauthClient represents a stateful Oauth client
type OauthClient struct {
Service string
Client *http.Client
Headers map[string]string
ClientID string
ClientSecret string
SourceHeader string
ResponseHeaders http.Header
AppID string
Scope string
Token string
SkipCertVerify bool
}
// OauthConfig represents configuration used to create Oauth clients
type OauthConfig struct {
ClientID string `json:"clientID" structs:"clientID" mapstructure:"clientID"`
ClientSecret string `json:"clientSecret" structs:"clientSecret" mapstructure:"clientSecret"`
ServiceURL string `json:"serviceUrl" structs:"serviceUrl" mapstructure:"serviceUrl"`
AppID string `json:"appID" structs:"appID" mapstructure:"appID"`
Scope string `json:"scope,omitempty" structs:"scope" mapstructure:"scope"`
Policies []string `json:"policies,omitempty" structs:"policies" mapstructure:"policies"`
SkipCertVerify bool `json:"skipCertVerify,omitempty" structs:"skipCertVerify" mapstructure:"skipCertVerify"`
}
// GetConfig reads config from a file specified as arg or from the environment
func GetConfig(configPath string) (*OauthConfig, error) {
var config OauthConfig
// No path specified; use environment
if configPath == "" {
path, found := os.LookupEnv("CENTRIFY_OAUTH_CONFIGPATH")
if found {
configPath = path
}
}
if configPath != "" {
fileBytes, err := ioutil.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("error reading configuration from path '%s': %s", configPath, err)
}
err = json.Unmarshal(fileBytes, &config)
if err != nil {
return nil, fmt.Errorf("error demarshalling configuration from path '%s': %s", configPath, err)
}
}
return &config, nil
}
// GetNewClient creates a new client for the specified endpoint
func GetNewClient(service string, httpFactory HttpClientFactory) (*OauthClient, error) {
jar, err := cookiejar.New(nil)
if err != nil {
return nil, err
}
// Munge on the service a little bit, force it to have no trailing / and always start with https://
url, err := url.Parse(service)
if err != nil {
return nil, err
}
url.Scheme = "https"
url.Path = ""
client := &OauthClient{}
client.Service = url.String()
if httpFactory != nil {
client.Client = httpFactory()
} else {
client.Client = &http.Client{}
}
client.Client.Jar = jar
client.Headers = make(map[string]string)
client.SourceHeader = "cloud-golang-sdk"
return client, err
}
// GetNewConfidentialClient creates a new client for the specified endpoint
func GetNewConfidentialClient(service string, clientID string, clientSecret string, httpFactory HttpClientFactory) (*OauthClient, error) {
client, err := GetNewClient(service, httpFactory)
if err != nil {
return nil, err
}
client.ClientID = clientID
client.ClientSecret = clientSecret
return client, nil
}
// ResourceOwner implements the ResourceOwner flow
func (c *OauthClient) ResourceOwner(appID string, scope string, owner string, ownerPassword string) (*TokenResponse, *ErrorResponse, error) {
args := make(map[string]string)
args["grant_type"] = "password"
args["username"] = owner
args["password"] = ownerPassword
args["scope"] = scope
return c.postAndGetResponse("/oauth2/token/"+appID, args)
}
func (c *OauthClient) ClientCredentials(appID string, scope string) (*TokenResponse, *ErrorResponse, error) {
args := make(map[string]string)
args["grant_type"] = "client_credentials"
args["scope"] = scope
return c.postAndGetResponse("/oauth2/token/"+appID, args)
}
func (c *OauthClient) RefreshToken(appID string, refreshToken string) (*TokenResponse, *ErrorResponse, error) {
args := make(map[string]string)
args["grant_type"] = "refresh_token"
args["refresh_token"] = refreshToken
return c.postAndGetResponse("/oauth2/token/"+appID, args)
}
func (c *OauthClient) postAndGetResponse(method string, args map[string]string) (*TokenResponse, *ErrorResponse, error) {
body, status, err := c.postAndGetBody(method, args)
if err != nil {
return nil, nil, err
}
if status == 200 {
response, err := bodyToTokenResponse(body)
if err != nil {
return nil, nil, err
}
return response, nil, nil
}
response, err := bodyToErrorResponse(body)
if err != nil {
return nil, nil, err
}
return nil, response, nil
}
func (c *OauthClient) postAndGetBody(method string, args map[string]string) ([]byte, int, error) {
postdata := strings.NewReader(payloadFromMap(args))
postreq, err := http.NewRequest("POST", c.Service+method, postdata)
if err != nil {
return nil, 0, err
}
if c.ClientID != "" && c.ClientSecret != "" {
postreq.Header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(c.ClientID+":"+c.ClientSecret)))
} |
postreq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
postreq.Header.Add("X-CENTRIFY-NATIVE-CLIENT", "Yes")
postreq.Header.Add("X-CFY-SRC", c.SourceHeader)
for k, v := range c.Headers {
postreq.Header.Add(k, v)
}
httpresp, err := c.Client.Do(postreq)
if err != nil {
c.ResponseHeaders = nil
return nil, 0, err
}
defer httpresp.Body.Close()
c.ResponseHeaders = httpresp.Header
body, err := ioutil.ReadAll(httpresp.Body)
if err != nil {
return nil, httpresp.StatusCode, err
}
return body, httpresp.StatusCode, nil
}
// GetLastResponseHeaders returns the response header for the previous REST call
func (c *OauthClient) GetLastResponseHeaders() http.Header {
return c.ResponseHeaders
}
func payloadFromMap(input map[string]string) string {
data := url.Values{}
for i, v := range input {
data.Add(i, v)
}
return data.Encode()
}
func bodyToTokenResponse(body []byte) (*TokenResponse, error) {
reply := &TokenResponse{}
err := json.Unmarshal(body, &reply)
if err != nil {
return nil, err
}
return reply, nil
}
func bodyToErrorResponse(body []byte) (*ErrorResponse, error) {
reply := &ErrorResponse{}
err := json.Unmarshal(body, &reply)
if err != nil {
return nil, err
}
return reply, nil
} | |
form_component.rs | use wasm_bindgen_test::*;
use yew::prelude::*;
use yew::{utils, App};
/// # Form
///
/// ## Example
///
/// ```rust
/// use std::collections::HashMap;
/// use wasm_bindgen::JsCast;
/// use web_sys::{HtmlOptionElement, HtmlSelectElement};
/// use yew::prelude::*;
/// use yew::utils;
/// use yew_styles::forms::{
/// form_component::Form,
/// form_group::{FormGroup, Orientation},
/// form_input::{FormInput, InputType},
/// form_label::FormLabel,
/// form_select::FormSelect,
/// form_submit::FormSubmit,
/// form_textarea::FormTextArea,
/// };
/// use yew_styles::layouts::{
/// container::{Container, Direction, Wrap},
/// item::{Item, ItemLayout},
/// };
/// use yew_styles::styles::{Palette, Style};
///
/// #[derive(Clone)]
/// struct Fields {
/// first_name: String,
/// last_name: String,
/// email: String,
/// specialty: String,
/// skills: Vec<String>,
/// cover_letter: String,
/// }
///
/// pub struct BasicFormPage {
/// link: ComponentLink<Self>,
/// fields: HashMap<String, String>,
/// skills: Vec<String>,
/// empty_fields: Vec<String>,
/// result: Option<Fields>,
/// }
///
/// pub enum Msg {
/// FirstName(String),
/// LastName(String),
/// Email(String),
/// Specialty(String),
/// Skills(Vec<String>),
/// CoverLetter(String),
/// Submit,
/// }
///
/// impl Component for BasicFormPage {
/// type Message = Msg;
/// type Properties = ();
///
/// fn create(_props: Self::Properties, link: ComponentLink<Self>) -> Self {
/// Self {
/// link,
/// fields: HashMap::new(),
/// skills: vec![],
/// empty_fields: vec![],
/// result: None,
/// }
/// }
///
/// fn update(&mut self, msg: Self::Message) -> ShouldRender {
/// match msg {
/// Msg::FirstName(first_name) => {
/// self.fields.insert("first_name".to_string(), first_name);
/// }
/// Msg::LastName(last_name) => {
/// self.fields.insert("last_name".to_string(), last_name);
/// }
/// Msg::Email(email) => {
/// self.fields.insert("email".to_string(), email);
/// }
/// Msg::Specialty(specialty) => {
/// self.fields.insert("specialty".to_string(), specialty);
/// }
/// Msg::Skills(skills) => {
/// self.skills = skills;
/// }
/// Msg::CoverLetter(cover_letter) => {
/// self.fields.insert("cover_letter".to_string(), cover_letter);
/// }
/// Msg::Submit => {
/// self.empty_fields = get_empty_fields(self.fields.clone());
///
/// if self.empty_fields.is_empty() {
/// self.result = Some(Fields {
/// first_name: self.fields.get("first_name").unwrap().to_string(),
/// last_name: self.fields.get("last_name").unwrap().to_string(),
/// email: self.fields.get("email").unwrap().to_string(),
/// specialty: self.fields.get("specialty").unwrap().to_string(),
/// skills: self.skills.clone(),
/// cover_letter: self.fields.get("cover_letter").unwrap().to_string(),
/// });
/// self.fields.drain();
///
/// self.skills = vec![];
///
/// set_default_selected("specialty");
/// remove_all_selected("skills");
/// }
/// }
/// }
///
/// true
/// }
///
/// fn change(&mut self, _props: Self::Properties) -> ShouldRender {
/// false
/// }
///
/// fn view(&self) -> Html {
/// html! {
/// <Container wrap=Wrap::Wrap direction=Direction::Row>
/// <Item layouts=vec!(ItemLayout::ItXs(12))>
/// <h1>{"Basic Form"}</h1>
/// </Item>
/// <Item layouts=vec!(ItemLayout::ItXs(12))>
/// <Form onsubmit_signal=self.link.callback(|e| Msg::Submit)>
/// <Container wrap=Wrap::Wrap direction=Direction::Row>
/// <Item layouts=vec!(ItemLayout::ItM(6), ItemLayout::ItXs(12))>
/// <FormGroup orientation=Orientation::Horizontal>
/// <FormLabel text="First name: "/>
/// <FormInput
/// value=match self.fields.get("first_name") {
/// Some(value) => value,
/// None => ""
/// }
/// error_state=self.empty_fields.iter().any(|field| field == "first_name")
/// error_message="First name field is required"
/// input_type=InputType::Text
/// oninput_signal=self.link.callback(|e: InputData| Msg::FirstName(e.value))
/// />
/// </FormGroup>
/// <FormGroup orientation=Orientation::Horizontal>
/// <FormLabel text="Last name: "/>
/// <FormInput
/// value=match self.fields.get("last_name") {
/// Some(value) => value,
/// None => ""
/// }
/// error_state=self.empty_fields.iter().any(|field| field == "last_name")
/// error_message="Last name field is required"
/// input_type=InputType::Text
/// oninput_signal=self.link.callback(|e: InputData| Msg::LastName(e.value))
/// />
/// </FormGroup>
/// <FormGroup orientation=Orientation::Horizontal>
/// <FormLabel text="Email: "/>
/// <FormInput
/// value=match self.fields.get("email") {
/// Some(value) => value,
/// None => ""
/// }
/// error_state=self.empty_fields.iter().any(|field| field == "email")
/// error_message="Email field is required"
/// input_type=InputType::Email
/// oninput_signal=self.link.callback(|e: InputData| Msg::Email(e.value))
/// />
/// </FormGroup>
/// </Item>
/// <Item layouts=vec!(ItemLayout::ItM(6), ItemLayout::ItXs(12))>
/// <FormGroup orientation=Orientation::Vertical>
/// <FormLabel text="Specialty:"/>
/// <FormSelect
/// id="specialty"
/// error_state=self.empty_fields.iter().any(|field| field == "specialty")
/// error_message="Select specialty is required"
/// onchange_signal=self.link.callback(|e: ChangeData| {
/// match e {
/// ChangeData::Select(element) => {
/// let value = element.value();
/// Msg::Specialty(value)
/// },
/// _ => unreachable!()
/// }
/// })
/// options=html!{
/// <>
/// <option value="" disabled=true>{"Choose specialty"}</option>
/// <option value="frontend">{"Frontend"}</option>
/// <option value="backend">{"Backend"}</option>
/// </>
/// }
/// />
/// </FormGroup>
/// <FormGroup orientation=Orientation::Vertical>
/// <FormLabel text="Skills:"/>
/// <FormSelect
/// id="skills"
/// multiple=true
/// onchange_signal=self.link.callback(|e: ChangeData| {
/// match e {
/// ChangeData::Select(element) => {
/// let mut values = vec![];
/// let options = element.options();
///
/// for i in 0..options.length() {
/// let option = options
/// .get_with_index(i)
/// .unwrap()
/// .dyn_into::<HtmlOptionElement>()
/// .unwrap();
/// if option.selected() {
/// values.push(option.value());
/// }
/// }
/// Msg::Skills(values)
/// },
/// _ => unreachable!()
/// }
/// })
/// options=html!{
/// <>
/// <option value="yew">{"Yew.rs"}</option>
/// <option value="rustwasm">{"Rustwasm"}</option>
/// <option value="rust">{"Rust"}</option>
/// <option value="warp">{"Warp"}</option>
/// <option value="tokio">{"Tokio"}</option>
/// </>
/// }
/// />
/// </FormGroup>
/// </Item>
/// <Item layouts=vec!(ItemLayout::ItXs(12))>
/// <FormGroup orientation=Orientation::Vertical>
/// <FormLabel text="Cover letter:"/>
/// <FormTextArea
/// value=match self.fields.get("cover_letter") {
/// Some(value) => value,
/// None => ""
/// }
/// error_state=self.empty_fields.iter().any(|field| field == "cover_letter")
/// error_message="cover letter is required"
/// oninput_signal=self.link.callback(|e: InputData| Msg::CoverLetter(e.value))/>
/// </FormGroup>
/// </Item>
/// <Item layouts=vec!(ItemLayout::ItXs(12), ItemLayout::ItM(3))>
/// <FormGroup>
/// <FormSubmit
/// value="Submit application"
/// submit_type=Palette::Success
/// submit_style=Style::Outline
/// />
/// </FormGroup>
/// </Item>
/// </Container>
/// </Form>
/// </Item>
/// <Item layouts=vec!(ItemLayout::ItXs(12))>
/// {get_result(self.result.clone())}
/// </Item>
/// </Container>
/// }
/// }
/// }
///
/// fn get_result(result: Option<Fields>) -> Html {
/// if let Some(form) = result {
/// html! {
/// <Container wrap=Wrap::Wrap direction=Direction::Row>
/// <Item layouts=vec!(ItemLayout::ItXs(12))>
/// <p><b>{"First name: "}</b>{form.first_name.clone()}</p>
/// <p><b>{"last name: "}</b>{form.last_name.clone()}</p>
/// <p><b>{"email: "}</b>{form.email.clone()}</p>
/// <p><b>{"Specialty: "}</b>{form.specialty.clone()}</p>
/// <p><b>{"Skills: "}</b>{form.skills.join(", ")}</p>
/// <p><b>{"Cover letter: "}</b>{form.cover_letter}</p>
/// </Item>
/// </Container>
/// }
/// } else {
/// html! {}
/// }
/// }
///
/// fn get_empty_fields(fields: HashMap<String, String>) -> Vec<String> {
/// let total_fields = vec![
/// "first_name",
/// "last_name",
/// "email",
/// "specialty",
/// "cover_letter",
/// ];
/// let mut empty_fields: Vec<String> = vec![];
///
/// for field in total_fields {
/// let is_filled = fields
/// .iter()
/// .any(|(key, value)| key == field && !value.is_empty());
/// if !is_filled {
/// empty_fields.push(field.to_string());
/// }
/// }
///
/// empty_fields
/// }
///
/// fn remove_all_selected(select: &str) {
/// let specialty_form_element = utils::document()
/// .get_element_by_id(select)
/// .unwrap()
/// .dyn_into::<HtmlSelectElement>()
/// .unwrap();
/// let specialty_options = specialty_form_element.options();
///
/// for i in 0..specialty_options.length() {
/// let option = specialty_options
/// .get_with_index(i)
/// .unwrap()
/// .dyn_into::<HtmlOptionElement>()
/// .unwrap();
///
/// option.set_selected(false);
/// }
/// }
///
/// fn set_default_selected(select: &str) {
/// let specialty_form_element = utils::document()
/// .get_element_by_id(select)
/// .unwrap()
/// .dyn_into::<HtmlSelectElement>()
/// .unwrap();
/// let specialty_options = specialty_form_element.options();
///
/// let option = specialty_options
/// .get_with_index(0)
/// .unwrap()
/// .dyn_into::<HtmlOptionElement>()
/// .unwrap();
///
/// option.set_selected(true);
/// }
/// ```
pub struct Form {
link: ComponentLink<Self>,
props: Props,
}
#[derive(Clone, Properties)]
pub struct Props {
/// Signal to emit the event submit.
#[prop_or(Callback::noop())]
pub onsubmit_signal: Callback<Event>,
pub children: Children,
/// The URL that processes the form submission
#[prop_or_default]
pub action: String,
/// The HTTP method to submit the form
#[prop_or(Method::Post)]
pub method: Method,
/// The name of the form
#[prop_or_default]
pub name: String,
/// General property to add custom class styles
#[prop_or_default]
pub class_name: String,
/// General property to add custom id
#[prop_or_default]
pub id: String,
}
#[derive(Clone)]
pub enum Method {
Post,
Get,
Dialog,
}
pub enum Msg {
Submitted(Event),
}
impl Component for Form {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Form { link, props }
}
fn update(&mut self, msg: Self::Message) -> ShouldRender |
fn change(&mut self, props: Self::Properties) -> ShouldRender {
self.props = props;
true
}
fn view(&self) -> Html {
html! {
<form
onsubmit=self.link.callback(|e: Event| Msg::Submitted(e))
action=self.props.action
method=get_method(self.props.method.clone())
name=self.props.name
class=format!("form {}", self.props.class_name)
id=format!("{}", self.props.id)
>
{ self.props.children.render() }
</form>
}
}
}
fn get_method(method: Method) -> String {
match method {
Method::Get => "get".to_string(),
Method::Post => "post".to_string(),
Method::Dialog => "dialog".to_string(),
}
}
#[wasm_bindgen_test]
fn should_create_form_component() {
let props = Props {
class_name: "form-test".to_string(),
id: "form-test-id".to_string(),
onsubmit_signal: Callback::noop(),
method: Method::Post,
action: "".to_string(),
name: "form-test".to_string(),
children: Children::new(vec![html! {<input id="result"/>}]),
};
let form_component: App<Form> = App::new();
form_component.mount_with_props(
utils::document().get_element_by_id("output").unwrap(),
props,
);
let form_element = utils::document().get_element_by_id("result").unwrap();
assert_eq!(form_element.tag_name(), "INPUT");
}
#[wasm_bindgen_test]
fn should_submit_the_form() {
let body = utils::document().body().unwrap();
let element = utils::document().create_element("div").unwrap();
element.set_text_content(Some("fill the form"));
element.set_id("form");
body.append_child(&element).unwrap();
let onsubmit = Callback::from(|_| {
let content = utils::document().get_element_by_id("form").unwrap();
content.set_text_content(Some("form submitted"));
});
let props = Props {
class_name: "form-test".to_string(),
id: "form-test-id".to_string(),
onsubmit_signal: onsubmit,
method: Method::Post,
action: "".to_string(),
name: "form-test".to_string(),
children: Children::new(vec![html! {<input/>}]),
};
let event = Event::new("Submit").unwrap();
props.onsubmit_signal.emit(event);
let form_element = utils::document().get_element_by_id("form").unwrap();
assert_eq!(
form_element.text_content().unwrap(),
"form submitted".to_string()
);
}
| {
match msg {
Msg::Submitted(value) => {
value.prevent_default();
self.props.onsubmit_signal.emit(value);
}
};
true
} |
pessoa.py | MAIOR_IDADE = 18
class Pessoa:
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def __str__(self):
|
def is_adult(self):
return (self.idade or 0) >= MAIOR_IDADE
| if not self.idade:
return self.nome
return f'{self.nome} - {self.idade}' |
215_test.go | package medium
import "testing"
func TestFindKthLargest(t *testing.T) {
arr := []int{3, 2, 1, 5, 6, 4}
k := 2
result := FindKthLargest(arr, k)
if result != 5 {
t.Logf("except %d, actual %d", 5, result)
t.FailNow() | }
} |
|
dicomstoreiampolicy.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "kubeform.dev/provider-google-api/apis/healthcare/v1alpha1"
scheme "kubeform.dev/provider-google-api/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// DicomStoreIamPoliciesGetter has a method to return a DicomStoreIamPolicyInterface.
// A group's client should implement this interface.
type DicomStoreIamPoliciesGetter interface {
DicomStoreIamPolicies(namespace string) DicomStoreIamPolicyInterface
}
// DicomStoreIamPolicyInterface has methods to work with DicomStoreIamPolicy resources.
type DicomStoreIamPolicyInterface interface {
Create(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.CreateOptions) (*v1alpha1.DicomStoreIamPolicy, error)
Update(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.UpdateOptions) (*v1alpha1.DicomStoreIamPolicy, error)
UpdateStatus(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.UpdateOptions) (*v1alpha1.DicomStoreIamPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.DicomStoreIamPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.DicomStoreIamPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DicomStoreIamPolicy, err error)
DicomStoreIamPolicyExpansion
}
// dicomStoreIamPolicies implements DicomStoreIamPolicyInterface
type dicomStoreIamPolicies struct {
client rest.Interface
ns string
}
// newDicomStoreIamPolicies returns a DicomStoreIamPolicies
func newDicomStoreIamPolicies(c *HealthcareV1alpha1Client, namespace string) *dicomStoreIamPolicies {
return &dicomStoreIamPolicies{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the dicomStoreIamPolicy, and returns the corresponding dicomStoreIamPolicy object, and an error if there is any.
func (c *dicomStoreIamPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DicomStoreIamPolicy, err error) {
result = &v1alpha1.DicomStoreIamPolicy{}
err = c.client.Get().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of DicomStoreIamPolicies that match those selectors.
func (c *dicomStoreIamPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DicomStoreIamPolicyList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.DicomStoreIamPolicyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested dicomStoreIamPolicies.
func (c *dicomStoreIamPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil |
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a dicomStoreIamPolicy and creates it. Returns the server's representation of the dicomStoreIamPolicy, and an error, if there is any.
func (c *dicomStoreIamPolicies) Create(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.CreateOptions) (result *v1alpha1.DicomStoreIamPolicy, err error) {
result = &v1alpha1.DicomStoreIamPolicy{}
err = c.client.Post().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Body(dicomStoreIamPolicy).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a dicomStoreIamPolicy and updates it. Returns the server's representation of the dicomStoreIamPolicy, and an error, if there is any.
func (c *dicomStoreIamPolicies) Update(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.UpdateOptions) (result *v1alpha1.DicomStoreIamPolicy, err error) {
result = &v1alpha1.DicomStoreIamPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
Name(dicomStoreIamPolicy.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(dicomStoreIamPolicy).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *dicomStoreIamPolicies) UpdateStatus(ctx context.Context, dicomStoreIamPolicy *v1alpha1.DicomStoreIamPolicy, opts v1.UpdateOptions) (result *v1alpha1.DicomStoreIamPolicy, err error) {
result = &v1alpha1.DicomStoreIamPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
Name(dicomStoreIamPolicy.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(dicomStoreIamPolicy).
Do(ctx).
Into(result)
return
}
// Delete takes name of the dicomStoreIamPolicy and deletes it. Returns an error if one occurs.
func (c *dicomStoreIamPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *dicomStoreIamPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("dicomstoreiampolicies").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched dicomStoreIamPolicy.
func (c *dicomStoreIamPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DicomStoreIamPolicy, err error) {
result = &v1alpha1.DicomStoreIamPolicy{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("dicomstoreiampolicies").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
| {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
} |
main.go | package main
import (
"log"
"net/http"
"net/url"
"github.com/TerminusDeus/bybit-api/rest"
)
// HttpProxy = "http://127.0.0.1:6152"
// SocksProxy = "socks5://127.0.0.1:6153"
func newClient(proxyURL string) *http.Client {
if proxyURL == "" {
return nil
}
proxy := func(_ *http.Request) (*url.URL, error) {
return url.Parse(proxyURL)
}
httpTransport := &http.Transport{
Proxy: proxy,
}
httpClient := &http.Client{
Transport: httpTransport,
}
return httpClient
}
func main() | {
//baseURL := "https://api.bybit.com/" // 主网络
baseURL := "https://api-testnet.bybit.com/" // 测试网络
client := newClient("socks5://127.0.0.1:1080")
b := rest.New(client,
baseURL, "rwEwhfC6mDFYIGfcyb", "yfNJSzGapfFwbJyvguAyVXLJSIOCIegBg42Z", true)
// 获取持仓
_, _, positions, err := b.GetPositions()
if err != nil {
log.Printf("%v", err)
return
}
log.Printf("positions: %#v", positions)
// 创建委托
symbol := "BTCUSD"
side := "Buy"
orderType := "Limit"
qty := 10
price := 35000.0
timeInForce := "GoodTillCancel"
_, _, order, err := b.CreateOrder(side, orderType, price, qty, timeInForce, 0, 0, false, false, "", symbol)
if err != nil {
log.Println(err)
return
}
log.Printf("Create order: %#v", order)
// 获取委托单
}
|
|
config.go | /*
* Copyright (C) 2019-2020 Red Dove Consultants Ltd. All rights reserved.
*/
package config
import (
"bufio"
"fmt"
"io"
"math"
"math/cmplx"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type Mapping map[string]Any
type MapWrapper struct {
data *Mapping
config *Config
}
func newMapWrapper(config *Config, data *Mapping) MapWrapper {
return MapWrapper{data, config}
}
func (self MapWrapper) String() string {
parts := make([]string, 0)
for k, _ := range *self.data {
var s string
v, err := self.Get(k)
if err != nil {
s = "???"
} else {
s = fmt.Sprintf("%s: %s", k, v)
}
parts = append(parts, s)
}
return fmt.Sprintf("{%s}", strings.Join(parts, ", "))
}
var identifierPattern = regexp.MustCompile(`^([\p{L}_]([\p{L}\p{N}_])*)$`)
func isIdentifier(s string) bool {
parts := identifierPattern.FindStringSubmatch(s)
return parts != nil
}
func (self *MapWrapper) Get(key string) (Any, error) {
var result Any
var err error
var ok bool
if result, ok = (*self.data)[key]; ok {
result, err = self.config.evaluated(result)
} else if !isIdentifier(key) {
result, err = self.config.getFromPath(key)
} else {
return nil, errFmt(nil, "Not found in configuration: %s", key)
}
return result, err
}
func (self *MapWrapper) baseGet(key string) (Any, error) {
if result, ok := (*self.data)[key]; ok {
return result, nil
} else {
return nil, errFmt(nil, "Not found in configuration: %s", key)
}
}
func (self *MapWrapper) AsDict() (Mapping, error) {
var err error
var key string
var item Any
result := make(Mapping)
for key, item = range *self.data {
item, err = self.config.evaluated(item)
if err != nil {
break
}
switch v := item.(type) {
case SeqWrapper:
item, err = v.AsList()
case MapWrapper:
item, err = v.AsDict()
case *Config:
item, err = v.AsDict()
}
if err != nil {
break
}
result[key] = item
}
return result, err
}
type SeqWrapper struct {
data *Sequence
config *Config
}
func newSeqWrapper(config *Config, data *Sequence) SeqWrapper {
return SeqWrapper{data, config}
}
func (self SeqWrapper) String() string {
parts := make([]string, 0)
for i, _ := range *self.data {
var s string
v, err := self.Get(i)
if err != nil {
s = "???"
} else {
s = fmt.Sprintf("%s", v)
}
parts = append(parts, s)
}
return fmt.Sprintf("[%s]", strings.Join(parts, ", "))
}
func (self *SeqWrapper) baseGet(index int) Any {
return (*self.data)[index]
}
func (self *SeqWrapper) Get(index int) (Any, error) {
v := self.baseGet(index)
return self.config.evaluated(v)
}
func (self *SeqWrapper) AsList() (Sequence, error) {
var err error
var item Any
result := make(Sequence, 0)
for _, item = range *self.data {
item, err = self.config.evaluated(item)
if err != nil {
break
}
switch v := item.(type) {
case SeqWrapper:
item, err = v.AsList()
case MapWrapper:
item, err = v.AsDict()
case *Config:
item, err = v.AsDict()
}
if err != nil {
break
}
result = append(result, item)
}
return result, err
}
type StringConverter func(string, *Config) Any
type Config struct {
NoDuplicates bool
StrictConversions bool
IncludePath []string
RootDir string
Path string
parent *Config
data *MapWrapper
cache *Mapping
Context *Mapping
evaluator *evaluator
converter StringConverter
}
func (self *Config) String() string {
n := 0
suffix := "s"
if self.data != nil {
n = len(*(*self.data).data)
if n == 1 {
suffix = ""
}
}
return fmt.Sprintf("Config(\"%s\" [%d item%s])", filepath.Base(self.Path), n, suffix)
}
var isoDatetimePattern = regexp.MustCompile(`^(\d{4})-(\d{2})-(\d{2})(([ T])(((\d{2}):(\d{2}):(\d{2}))(\.\d{1,6})?(([+-])(\d{2}):(\d{2})(:(\d{2})(\.\d{1,6})?)?)?))?$`)
var envValuePattern = regexp.MustCompile(`^\$(\w+)(\|(.*))?$`)
var interpolationPattern = regexp.MustCompile(`\$\{([^}]+)\}`)
// var colonObjectPattern = regexp.MustCompile(`^([A-Za-z_]\w*([/.][A-Za-z_]\w*)*)(:([A-Za-z_]\w*))?$`)
func defaultStringConverter(s string, cfg *Config) Any {
var result Any = s
var parts []string
parts = isoDatetimePattern.FindStringSubmatch(s)
if parts != nil {
year, _ := strconv.Atoi(parts[1])
month, _ := strconv.Atoi(parts[2])
day, _ := strconv.Atoi(parts[3])
var hour = 0
var minute = 0
var second = 0
var nanosecond = 0
var offsetHour = 0
var offsetMinute = 0
var offsetSecond = 0
hasTime := parts[5] != ""
loc, _ := time.LoadLocation("UTC")
if hasTime {
hour, _ = strconv.Atoi(parts[8])
minute, _ = strconv.Atoi(parts[9])
second, _ = strconv.Atoi(parts[10])
if parts[11] != "" {
fv, _ := strconv.ParseFloat(parts[11], 64)
nanosecond = int(fv * 1.0e9)
}
hasOffset := parts[13] != ""
if hasOffset {
var sign int
if parts[13] == "-" {
sign = -1
} else {
sign = 1
}
offsetHour, _ = strconv.Atoi(parts[14])
offsetMinute, _ = strconv.Atoi(parts[15])
if parts[17] == "" {
offsetSecond = 0
} else {
offsetSecond, _ = strconv.Atoi(parts[17])
}
loc = time.FixedZone("", sign*(offsetHour*3600+offsetMinute*60+offsetSecond))
}
}
result = time.Date(year, time.Month(month), day, hour, minute, second, nanosecond, loc)
} else {
parts = envValuePattern.FindStringSubmatch(s)
if parts != nil {
name := parts[1]
hasPipe := parts[2] != ""
if value, ok := os.LookupEnv(name); ok {
result = value
} else {
if hasPipe {
result = parts[3]
}
}
} else {
matches := interpolationPattern.FindAllStringSubmatchIndex(s, -1)
if matches != nil {
cp := 0
failed := false
sparts := make([]string, 0)
for _, match := range matches {
sp := match[0]
ep := match[1]
path := s[match[2]:match[3]]
if cp < sp {
sparts = append(sparts, s[cp:sp])
}
v, err := cfg.Get(path)
if err != nil {
failed = true
break
}
sparts = append(sparts, fmt.Sprintf("%v", v))
cp = ep
}
if !failed {
if cp < len(s) {
sparts = append(sparts, s[cp:])
}
result = strings.Join(sparts, "")
}
}
}
}
return result
}
type evaluator struct {
config *Config
refsSeen *map[UnaryNode]bool
}
func newEvaluator(config *Config) evaluator {
rs := make(map[UnaryNode]bool)
return evaluator{config, &rs}
}
var scalarTokens = map[tokenKind]bool{
Integer: true,
Float: true,
Complex: true,
True: true,
False: true,
None: true,
String: true,
}
func sameFile(s1 string, s2 string) bool {
var info1 os.FileInfo
var info2 os.FileInfo
var err error
if info1, err = os.Stat(s1); err != nil {
return false
}
if info2, err = os.Stat(s2); err != nil {
return false
}
return os.SameFile(info1, info2)
}
func (self *evaluator) evalAt(node UnaryNode) (Any, error) {
var result Any
var operand Any
var err error
if operand, err = self.evaluate(node.operand); err == nil {
if p, ok := operand.(string); !ok {
err = errFmt(nil, "@ operand must be a string, but is: %#v", operand)
} else {
var fn string
found := false
if filepath.IsAbs(p) {
if _, err = os.Stat(p); err == nil {
fn = p
found = true
}
} else {
fn = filepath.Join(self.config.RootDir, p)
if _, err = os.Stat(fn); err == nil {
found = true
} else {
for _, dn := range self.config.IncludePath {
fn = filepath.Join(dn, p)
if _, err = os.Stat(fn); err == nil {
found = true
break
}
}
}
}
if !found {
err = errFmt(nil, "Unable to locate %s", p)
} else {
if self.config.Path != "" && sameFile(self.config.Path, fn) {
err = errFmt(nil, "Configuration cannot include itself: %s", filepath.Base(fn))
} else {
var f *os.File
f, err = os.Open(fn)
if err == nil {
var reader io.Reader
var parser Parser
defer closeFile(f)
reader = bufio.NewReader(f)
parser, err = NewParser(&reader)
if err == nil {
var node Any
node, err = parser.Container()
if err == nil {
switch v := node.(type) {
case []keyValue:
var mapping MapWrapper
cfg := NewConfig()
cfg.NoDuplicates = self.config.NoDuplicates
cfg.StrictConversions = self.config.StrictConversions
mapping, err = cfg.wrapMapping(v)
if err == nil {
err = cfg.setPath(fn)
if err == nil {
cfg.data = &mapping
cfg.parent = self.config
cfg.Context = self.config.Context
cfg.IncludePath = self.config.IncludePath[:]
if self.config.cache != nil {
cache := make(Mapping)
cfg.cache = &cache
}
result = cfg
}
}
case Sequence:
result = newSeqWrapper(self.config, &v)
default:
result = v
}
}
}
}
}
}
}
}
return result, err
}
func isInteger(v Any) bool {
switch v.(type) {
case int64:
return true
default:
return false
}
}
func isFloat(v Any) bool {
switch v.(type) {
case float64:
return true
default:
return false
}
}
func isString(v Any) bool {
switch v.(type) {
case string:
return true
default:
return false
}
}
func isComplex(v Any) bool {
switch v.(type) {
case complex128:
return true
default:
return false
}
}
func isMapping(v Any) bool {
switch v.(type) {
case MapWrapper:
return true
default:
return false
}
}
func isSequence(v Any) bool {
switch v.(type) {
case SeqWrapper:
return true
default:
return false
}
}
func toFloat(v Any) float64 {
switch v.(type) {
case int64:
return float64(v.(int64))
case float64:
return v.(float64)
default:
panic(fmt.Sprintf("unable to convert %v to float64", v))
}
}
func toComplex(v Any) complex128 {
switch v.(type) {
case complex128:
return v.(complex128)
case int64:
return complex(float64(v.(int64)), 0.0)
case float64:
return complex(v.(float64), 0.0)
default:
panic(fmt.Sprintf("unable to convert %v to float64", v))
}
}
func mergeDicts(target *Mapping, source *Mapping) {
for k, v := range *source {
if tv, ok := (*target)[k]; ok {
if tvm, ok := tv.(Mapping); ok {
if vm, ok := v.(Mapping); ok {
mergeDicts(&tvm, &vm)
continue
}
}
}
(*target)[k] = v
}
}
func mergeMappings(map1 MapWrapper, map2 MapWrapper) (MapWrapper, error) {
var result MapWrapper
var err error
var mm Mapping
mm, err = map1.AsDict()
if err == nil {
var m2 Mapping
m2, err = map2.AsDict()
if err == nil {
mergeDicts(&mm, &m2)
result = newMapWrapper(map1.config, &mm)
}
}
return result, err
}
func (self *evaluator) addValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = toFloat(lhs) + toFloat(rhs)
} else if isComplex(lhs) || isComplex(rhs) {
result = toComplex(lhs) + toComplex(rhs)
} else if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) + rhs.(int64)
} else if isString(lhs) && isString(rhs) {
result = fmt.Sprintf("%v%v", lhs, rhs)
} else if isSequence(lhs) && isSequence(rhs) {
s := lhs.(SeqWrapper)
data, err := s.AsList()
if err == nil {
var lv Sequence
s := rhs.(SeqWrapper)
lv, err = s.AsList()
if err == nil {
data = append(data, lv...)
result = newSeqWrapper(s.config, &data)
}
}
} else if isMapping(lhs) && isMapping(rhs) {
result, err = mergeMappings(lhs.(MapWrapper), rhs.(MapWrapper))
} else {
err = errFmt(nil, "unable to add %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) subtractValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = toFloat(lhs) - toFloat(rhs)
} else if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) - rhs.(int64)
} else if isComplex(lhs) || isComplex(rhs) {
result = toComplex(lhs) - toComplex(rhs)
} else if isMapping(lhs) && isMapping(rhs) {
var md = make(Mapping)
m := newMapWrapper(lhs.(MapWrapper).config, &md)
for k, v := range *lhs.(MapWrapper).data {
if _, ok := (*rhs.(MapWrapper).data)[k]; !ok {
md[k] = v
}
}
result = m
} else {
err = errFmt(nil, "unable to subtract %s from %s", rhs, lhs)
}
return result, err
}
func (self *evaluator) multiplyValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = toFloat(lhs) * toFloat(rhs)
} else if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) * rhs.(int64)
} else {
err = errFmt(nil, "unable to multiply %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) divideValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = toFloat(lhs) / toFloat(rhs)
} else if isInteger(lhs) && isInteger(rhs) {
result = float64(lhs.(int64)) / float64(rhs.(int64))
} else {
err = errFmt(nil, "unable to divide %s by %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) integerDivideValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = int64(toFloat(lhs) / toFloat(rhs))
} else if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) / rhs.(int64)
} else {
err = errFmt(nil, "unable to integer divide %s by %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) moduloValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) % rhs.(int64)
} else {
err = errFmt(nil, "unable to compute %s modulo %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) leftShiftValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) << rhs.(int64)
} else {
err = errFmt(nil, "unable to left-shift %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) rightShiftValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) >> rhs.(int64)
} else {
err = errFmt(nil, "unable to right-shift %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) bitOrValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) | rhs.(int64)
} else if isMapping(lhs) && isMapping(rhs) {
result, err = mergeMappings(lhs.(MapWrapper), rhs.(MapWrapper))
} else {
err = errFmt(nil, "unable to bitwise-or %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) bitAndValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) & rhs.(int64)
} else {
err = errFmt(nil, "unable to bitwise-and %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) bitXorValues(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isInteger(lhs) && isInteger(rhs) {
result = lhs.(int64) ^ rhs.(int64)
} else {
err = errFmt(nil, "unable to bitwise-xor %s and %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) exponentiate(lhs Any, rhs Any) (Any, error) {
var result Any
var err error
if isFloat(lhs) || isFloat(rhs) {
result = math.Pow(toFloat(lhs), toFloat(rhs))
} else if isComplex(lhs) || isComplex(rhs) {
result = cmplx.Pow(toComplex(lhs), toComplex(rhs))
} else if isInteger(lhs) && isInteger(rhs) {
result = int64(math.Pow(toFloat(lhs), toFloat(rhs)))
} else {
err = errFmt(nil, "unable to exponentiate %s with %s", lhs, rhs)
}
return result, err
}
func (self *evaluator) negate(v Any) (Any, error) {
var result Any
var err error
if isInteger(v) {
result = -v.(int64)
} else if isFloat(v) {
result = -v.(float64)
} else if isComplex(v) {
result = -v.(complex128)
} else {
err = errFmt(nil, "unable to negate %s", v)
}
return result, err
}
func (self *evaluator) evaluate(value Any) (Any, error) {
var result Any
var err error
if value != nil {
switch v := value.(type) {
case bool:
result = v
case int64:
result = v
case string:
result = v
case Token:
if _, ok := scalarTokens[v.kind]; ok {
result = v.value
} else if v.kind == Word | else if v.kind == BackTick {
if sv, ok := v.value.(string); ok {
result, err = self.config.convertString(sv)
} else {
panic(fmt.Sprintf("Unexpected non-string value for backtick token: %#v", v.value))
}
} else {
err = errFmt(nil, "Unable to evaluate '%#v'", v)
}
case []keyValue:
result, err = self.config.wrapMapping(v)
case Mapping:
result = newMapWrapper(self.config, &v)
case Sequence:
result = newSeqWrapper(self.config, &v)
case UnaryNode:
switch v.op {
case At:
result, err = self.evalAt(v)
case Dollar:
result, err = self.getFromPath(v.operand)
case Minus:
var val Any
val, err = self.evaluate(v.operand)
if err == nil {
result, err = self.negate(val)
}
default:
err = errFmt(nil, "Unable to evaluate %#v", v)
}
case BinaryNode:
var lhs Any
lhs, err = self.evaluate(v.left)
if err == nil {
var lhsBool bool
switch v.op {
case And:
lhsBool = lhs.(bool)
if !lhsBool {
result = false
} else {
result, err = self.evaluate(v.right)
if err == nil {
result = result.(bool)
}
}
case Or:
lhsBool = lhs.(bool)
if lhsBool {
result = true
} else {
result, err = self.evaluate(v.right)
if err == nil {
result = result.(bool)
}
}
default:
var rhs Any
rhs, err = self.evaluate(v.right)
if err == nil {
switch v.op {
case Plus:
result, err = self.addValues(lhs, rhs)
case Minus:
result, err = self.subtractValues(lhs, rhs)
case Star:
result, err = self.multiplyValues(lhs, rhs)
case Slash:
result, err = self.divideValues(lhs, rhs)
case SlashSlash:
result, err = self.integerDivideValues(lhs, rhs)
case Modulo:
result, err = self.moduloValues(lhs, rhs)
case LeftShift:
result, err = self.leftShiftValues(lhs, rhs)
case RightShift:
result, err = self.rightShiftValues(lhs, rhs)
case BitwiseOr:
result, err = self.bitOrValues(lhs, rhs)
case BitwiseAnd:
result, err = self.bitAndValues(lhs, rhs)
case BitwiseXor:
result, err = self.bitXorValues(lhs, rhs)
case Power:
result, err = self.exponentiate(lhs, rhs)
default:
err = errFmt(nil, "Unable to evaluate %#v", v)
}
}
}
}
default:
err = errFmt(nil, "Unable to evaluate '%#v'", v)
}
}
return result, err
}
func (self *evaluator) getSliceIndexOrStep(node Any, indexOrStep string) (int, error) {
var result int
var err error
var item Any
item, err = self.evaluate(node)
if err == nil {
if number, ok := item.(int64); ok {
result = int(number)
} else {
err = errFmt(nil, "slice %s must be an integer, but is %s", item, indexOrStep)
}
}
return result, err
}
func (self *evaluator) getSlice(seq SeqWrapper, node SliceNode) (SeqWrapper, error) {
var result SeqWrapper
var err error
var startIndex int
var stopIndex int
var step int
var size = len(*seq.data)
if node.step == nil {
step = 1
} else {
step, err = self.getSliceIndexOrStep(node.step, "step")
if err == nil && step == 0 {
err = errFmt(nil, "slice step cannot be zero")
}
}
if node.start == nil {
startIndex = 0
} else {
startIndex, err = self.getSliceIndexOrStep(node.start, "index")
if err == nil {
if startIndex < 0 {
if startIndex >= -size {
startIndex += size
} else {
startIndex = 0
}
} else if startIndex >= size {
startIndex = size - 1
}
}
}
if err == nil {
if node.stop == nil {
stopIndex = size - 1
} else {
stopIndex, err = self.getSliceIndexOrStep(node.stop, "index")
if err == nil {
if stopIndex < 0 {
if stopIndex >= -size {
stopIndex += size
} else {
stopIndex = 0
}
}
if stopIndex > size {
stopIndex = size
}
if step < 0 {
stopIndex++
} else {
stopIndex--
}
}
}
}
if err == nil {
if step < 0 && startIndex < stopIndex {
tmp := stopIndex
stopIndex = startIndex
startIndex = tmp
}
items := make(Sequence, 0)
i := startIndex
var notDone bool
if step > 0 {
notDone = i <= stopIndex
} else {
notDone = i >= stopIndex
}
for notDone {
items = append(items, (*seq.data)[i])
i += step
if step > 0 {
notDone = i <= stopIndex
} else {
notDone = i >= stopIndex
}
}
result = newSeqWrapper(self.config, &items)
}
return result, err
}
func isRef(node Any) bool {
if un, ok := node.(UnaryNode); !ok {
return false
} else {
return un.op == Dollar
}
}
func (self *evaluator) getFromPath(node Any) (Any, error) {
var result Any
var err error
pi := pathIterator(node)
first := (<-pi).(Token).value.(string)
result, err = self.config.Get(first)
if err == nil {
var currentEvaluator *evaluator
var ok bool
var cfg *Config
if cfg, ok = result.(*Config); !ok {
currentEvaluator = self
} else {
currentEvaluator = cfg.evaluator
}
for item := range pi {
var operand Any
var doEval = false
pv := item.(pathValue)
operand = pv.operand
sn, sliced := operand.(SliceNode)
if !sliced && pv.op != Dot {
operand, err = currentEvaluator.evaluate(operand)
}
// do checks on operand types
if sliced {
if _, ok := result.(SeqWrapper); !ok {
err = errFmt(nil, "slices can only operate on lists")
break
}
} else {
}
switch rv := result.(type) {
case *Config:
currentEvaluator = cfg.evaluator
if s, ok := operand.(string); ok {
result, err = rv.Get(s)
} else {
err = errFmt(nil, "string required, but found %T(%v)", operand, operand)
}
case MapWrapper:
if s, ok := operand.(string); ok {
result, err = rv.baseGet(s)
} else {
err = errFmt(nil, "string required, but found %v", operand)
}
doEval = true
case SeqWrapper:
if sliced {
result, err = self.getSlice(result.(SeqWrapper), sn)
} else {
if n, ok := operand.(int64); !ok {
err = errFmt(nil, "integer required, but found '%v'", operand)
} else {
intN := int(n)
size := len(*rv.data)
if intN < 0 {
if intN >= -size {
intN += size
}
}
if intN < 0 || intN >= size {
err = errFmt(nil, "index out of range: is %d, must b between 0 and %d", intN, size-1)
} else {
result = rv.baseGet(intN)
doEval = true
}
}
}
default:
path := toSource(node)
err = errFmt(nil, "Not found in configuration: %s", path)
}
if err != nil {
break
}
if isRef(result) {
un := result.(UnaryNode)
if _, ok = (*currentEvaluator.refsSeen)[un]; ok {
parts := make([]string, 0)
for k := range *currentEvaluator.refsSeen {
loc := k.operand.(BinaryNode).left.(Token).start
s := fmt.Sprintf("%s %s", toSource(k.operand), loc)
parts = append(parts, s)
}
if len(parts) > 1 {
sort.Strings(parts)
}
err = errFmt(nil, "Circular reference: %s", strings.Join(parts, ", "))
break
}
(*currentEvaluator.refsSeen)[un] = true
}
if doEval {
result, err = currentEvaluator.evaluate(result)
}
}
}
return result, err
}
func NewConfig() *Config {
var result = Config{true, true, make([]string, 0), "", "", nil, nil, nil,
nil, nil, defaultStringConverter}
var e = newEvaluator(&result)
result.evaluator = &e
return &result
}
func FromFile(path string) (*Config, error) {
result := NewConfig()
err := result.LoadFile(path)
return result, err
}
/*func unwrap(o Any) (Any, error) {
var result = o
var err error
switch v := o.(type) {
case SeqWrapper:
result, err = v.AsList()
case MapWrapper:
result, err = v.AsDict()
}
return result, err
}
*/
func (self *Config) setPath(path string) error {
var err error
path, err = filepath.Abs(path)
if err == nil {
self.Path = path
if _, err = os.Stat(path); err == nil {
self.RootDir = filepath.Dir(path)
}
}
return err
}
func (self *Config) wrapMapping(items []keyValue) (MapWrapper, error) {
var result MapWrapper
var seen = make(map[string]Location)
var err error
var data = make(Mapping)
for _, item := range items {
var key = item.key.value.(string)
if self.NoDuplicates {
if _, ok := seen[key]; ok {
err = errFmt(nil, "Duplicate key %v at %v (previously at %v)",
key, item.key.start, seen[key])
break
}
seen[key] = item.key.start
}
data[key] = item.value
}
if err == nil {
result = newMapWrapper(self, &data)
}
return result, err
}
func (self *Config) convertString(s string) (Any, error) {
var result Any
var err error
result = self.converter(s, self)
if self.StrictConversions && (result == s) {
err = errFmt(nil, "Unable to convert string %v", s)
}
return result, err
}
func (self *Config) Load(reader *io.Reader) error {
parser, err := NewParser(reader)
if err == nil {
var node Any
node, err = parser.Container()
mb, ok := node.([]keyValue)
if !ok {
err = errFmt(nil, "Root configuration must be a mapping")
} else {
var data MapWrapper
data, err = self.wrapMapping(mb)
if err == nil {
self.data = &data
if self.cache != nil {
var c = make(Mapping)
self.cache = &c
}
}
}
}
return err
}
func closeFile(file *os.File) {
_ = file.Close()
}
func (self *Config) LoadFile(path string) error {
var reader io.Reader
f, err := os.Open(path)
if err == nil {
defer closeFile(f)
reader = bufio.NewReader(f)
err = self.Load(&reader)
if err == nil {
err = self.setPath(path)
}
}
return err
}
func (self *Config) Get(key string) (Any, error) {
var result Any
var err error
if self.data == nil {
err = errFmt(nil, "No data in configuration")
} else {
if self.cache != nil {
if r, ok := (*self.cache)[key]; ok {
result = r
}
}
if result == nil {
result, err = self.data.Get(key)
}
if err == nil {
if self.cache != nil {
(*self.cache)[key] = result
}
}
}
//if err == nil {
// result, err = unwrap(result)
//}
return result, err
}
func (self *Config) GetWithDefault(key string, defaultValue Any) (Any, error) {
result, err := self.Get(key)
if err != nil {
result = defaultValue
err = nil
}
return result, err
}
func parsePath(s string) (Any, error) {
var result Any
var err error
var parser Parser
parser, err = makeParser(s)
if err != nil {
err = errFmt(&parser.next.start, "Invalid Path: %s", s)
} else {
var failed = false
if parser.next.kind != Word {
failed = true
} else {
var node Any
node, err = parser.primary()
if err == nil {
if parser.atEnd() {
result = node
} else {
failed = true
}
}
}
if failed {
err = errFmt(&parser.next.start, "Invalid Path: %s", s)
}
}
return result, err
}
type pathValue struct {
op tokenKind
operand Any
}
func tokenValue(node Any) Any {
if t, ok := node.(Token); ok {
return t.value
}
panic(fmt.Sprintf("Token was expected, but got %#v", node))
}
func visit(node Any) <-chan Any {
ch := make(chan Any)
go func() {
switch v := node.(type) {
case Token:
ch <- v
case UnaryNode:
for item := range visit(v.operand) {
ch <- item
}
case BinaryNode:
for item := range visit(v.left) {
ch <- item
}
switch v.op {
case Dot:
ch <- pathValue{Dot, tokenValue(v.right)}
case Colon:
ch <- pathValue{Colon, v.right}
default:
ch <- pathValue{v.op, tokenValue(v.right)}
}
}
close(ch)
}()
return ch
}
func pathIterator(start Any) <-chan Any {
ch := make(chan Any)
go func() {
for item := range visit(start) {
ch <- item
}
close(ch)
}()
return ch
}
func toSource(o Any) string {
var result string
switch v := o.(type) {
case Token:
result = fmt.Sprintf("%v", v.value)
case BinaryNode:
pi := pathIterator(v)
parts := make([]string, 1)
parts[0] = (<-pi).(Token).value.(string)
for item := range pi {
pv := item.(pathValue)
switch pv.op {
case Dot:
parts = append(parts, ".")
parts = append(parts, pv.operand.(string))
case LeftBracket:
parts = append(parts, "[")
parts = append(parts, toSource(pv.operand))
parts = append(parts, "]")
case Colon:
sn := pv.operand.(SliceNode)
parts = append(parts, "[")
if sn.start != nil {
parts = append(parts, toSource(sn.start))
}
parts = append(parts, ":")
if sn.stop != nil {
parts = append(parts, toSource(sn.stop))
}
if sn.step != nil {
parts = append(parts, ":")
parts = append(parts, toSource(sn.step))
}
parts = append(parts, "]")
}
}
result = strings.Join(parts, "")
default:
result = fmt.Sprintf("%v", v)
}
return result
}
func (self *Config) getFromPath(key string) (Any, error) {
var result Any
var err error
if self.data == nil {
err = errFmt(nil, "No data in configuration")
} else {
var node Any
node, err = parsePath(key)
if err == nil {
rs := make(map[UnaryNode]bool)
// clear out any existing refs
self.evaluator.refsSeen = &rs
result, err = self.evaluator.getFromPath(node)
}
}
return result, err
}
func (self *Config) AsDict() (Mapping, error) {
return self.data.AsDict()
}
func (self *Config) evaluated(v Any) (Any, error) {
return self.evaluator.evaluate(v)
}
| {
if self.config.Context == nil {
err = errFmt(&v.start, "No context to look up variables'")
} else if lv, ok := (*self.config.Context)[v.text]; ok {
result = lv
} else {
err = errFmt(&v.start, "Unknown variable '%s'", v.text)
}
} |
Login.js | function solve(str) {
let i = 0;
let user = str[0].toString();
let count = 0;
let password = '';
for (let index = user.length - 1; index >= 0; index--) {
password += user[index];
}
while (true) {
if (str[i + 1] === password) {
console.log(`User ${user} logged in.`);
break;
} else if (count >= 3) {
console.log(`User ${user} blocked!`);
break;
} else {
console.log('Incorrect password. Try again.');
}
count++;
i++;
}
}
function solve2(input) { | let count = 0;
while (true) {
let enter = input.shift();
if (enter === password) {
console.log(`User ${user} logged in.`);
break;
} else if (count >= 3) {
console.log(`User ${user} blocked!`);
break;
} else {
console.log('Incorrect password. Try again.');
count++;
}
}
}
// solve(['momi','imom']);
// solve(['Acer','login','go','let me in','recA']);
// solve(['sunny', 'rainy', 'cloudy', 'sunny', 'not sunny']);
// solve(['aca','login','go','let me in','recA']); | let user = input.shift();
let password = user.split('').reverse().join(''); |
multi_file_input.py | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterator, Sequence, Tuple
from bentoml.adapters.base_input import BaseInputAdapter, parse_cli_inputs
from bentoml.adapters.utils import decompress_gzip_request
from bentoml.types import AwsLambdaEvent, FileLike, HTTPRequest, InferenceTask
ApiFuncArgs = Tuple[Sequence[FileLike], ...]
MultiFileTask = InferenceTask[Tuple[FileLike, ...]]
class MultiFileInput(BaseInputAdapter):
""" Low level input adapters that transform incoming files data from http request,
CLI or AWS lambda event into binary stream objects, then pass down to user defined
API functions.
Parameters
----------
input_names : List[str]
list of input names. For HTTP they are form input names. For CLI
they are CLI args --input-<name1> or --input-file-<name1>
allow_none : bool
accept HTTP requests or AWS Lambda events without all files
provided. Does not take effect on CLI.
Examples
----------
Service using MultiFileInput:
.. code-block:: python
from typing import List
from PIL import Image
import numpy as np
import bentoml
from bentoml.types import FileLike
from bentoml.framework.pytroch import PytorchModelArtifact
from bentoml.adapters import MultiFileInput
@bentoml.env(pip_packages=['torch', 'pillow', 'numpy'])
@bentoml.artifacts([PytorchModelArtifact('classifier')])
class PyTorchFashionClassifier(bentoml.BentoService):
@bentoml.api(
input=MultiFileInput(input_names=['image', 'json']), batch=True)
def predict(self, image_list: List[FileLike], json_list: List[FileLike]):
inputs = []
for img_io, json_io in zip(image_list, json_list):
img = Image.open(img_io)
json_obj = json.load(json_io)
inputs.append([img, json_obj])
outputs = self.artifacts.classifier(inputs)
return outputs
Query with HTTP request performed by cURL::
curl -i \\
-F [email protected] \\
-F [email protected] \\
localhost:5000/predict
OR by an HTML form that sends multipart data:
.. code-block:: html
<form action="http://localhost:8000" method="POST"
enctype="multipart/form-data">
<input name="image" type="file">
<input name="json" type="file">
<input type="submit">
</form>
Query with CLI command::
bentoml run PyTorchFashionClassifier:latest predict \\
--input-file-image test.jpg \\
--input-file-json test.json
OR infer all file pairs under a folder with ten pairs each batch::
bentoml run PyTorchFashionClassifier:latest predict --max-batch-size 10 \\
--input-file-image folder/*.jpg \\
--input-file-json folder/*.json
Note: jpg files and json files should be in same prefix like this::
folder:
- apple.jpg
- apple.json
- banana.jpg
- banana.json
...
"""
HTTP_METHODS = ["POST"]
BATCH_MODE_SUPPORTED = True
def __init__(
self, input_names: Sequence[str], allow_none: bool = False, **base_kwargs,
):
super().__init__(**base_kwargs)
self.input_names = input_names
self.allow_none = allow_none
@property
def config(self):
|
@property
def request_schema(self):
return {
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
k: {"type": "string", "format": "binary"}
for k in self.input_names
},
}
},
}
@decompress_gzip_request
def from_http_request(self, req: HTTPRequest) -> MultiFileTask:
if req.headers.content_type != 'multipart/form-data':
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} only accepts requests "
"with Content-Type: multipart/form-data",
)
else:
_, _, files = HTTPRequest.parse_form_data(req)
files = tuple(files.get(k) for k in self.input_names)
if not any(files):
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
elif not all(files) and not self.allow_none:
task = InferenceTask(data=None)
task.discard(
http_status=400,
err_msg=f"BentoML#{self.__class__.__name__} requires inputs "
f"fields {self.input_names}",
)
else:
task = InferenceTask(http_headers=req.headers, data=files,)
return task
def from_aws_lambda_event(self, event: AwsLambdaEvent) -> MultiFileTask:
request = HTTPRequest(
headers=tuple((k, v) for k, v in event.get('headers', {}).items()),
body=event['body'],
)
return self.from_http_request(request)
def from_cli(self, cli_args: Sequence[str]) -> Iterator[MultiFileTask]:
for inputs in parse_cli_inputs(cli_args, self.input_names):
yield InferenceTask(cli_args=cli_args, data=inputs)
def extract_user_func_args(self, tasks: Sequence[MultiFileTask]) -> ApiFuncArgs:
args = tuple(map(tuple, zip(*map(lambda t: t.data, tasks))))
if not args:
args = (tuple(),) * len(self.input_names)
return args
| return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"input_names": list(self.input_names)
} |
script.js | /*
Copyright 2015 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
"use strict";
function | () {
document.getElementById("demo").innerHTML = "Eat my feet!";
}
| fetchMessage |
tst_wavelengths.py | from __future__ import division
from cctbx.eltbx import wavelengths
from libtbx.test_utils import approx_equal
def exercise():
from cctbx import factor_kev_angstrom
w = wavelengths.characteristic("CU")
assert w.label() == "Cu"
assert approx_equal(w.as_angstrom(), 1.5418)
assert approx_equal(w.as_kev(), factor_kev_angstrom / 1.5418)
assert approx_equal(w.as_ev() / 1000, factor_kev_angstrom / 1.5418)
n = 0
for w in wavelengths.characteristic_iterator():
n += 1
uu = wavelengths.characteristic(w.label())
assert uu.label() == w.label()
assert uu.as_ev() == w.as_ev()
assert n == 15 | print "OK"
if (__name__ == "__main__"):
run() |
def run():
exercise() |
kindck-owned-trait-contains-1.rs | // run-pass
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![feature(box_syntax)]
trait repeat<A> { fn get(&self) -> A; }
impl<A:Clone + 'static> repeat<A> for Box<A> {
fn get(&self) -> A |
}
fn repeater<A:Clone + 'static>(v: Box<A>) -> Box<repeat<A>+'static> {
box v as Box<repeat<A>+'static> // No
}
pub fn main() {
let x = 3;
let y = repeater(box x);
assert_eq!(x, y.get());
}
| {
(**self).clone()
} |
runner_test.rs | use super::*;
use std::env::current_dir;
use std::path::{Path, PathBuf};
#[test]
fn create_script_file_and_delete() {
let file = create_script_file(&"test".to_string()).unwrap();
assert!(Path::new(&file).exists());
fsio::file::delete_ignore_error(&file);
assert!(!Path::new(&file).exists());
}
#[test]
fn modify_script_no_shebang_default_options() {
let options = ScriptOptions::new();
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_with_shebang_default_options() {
let options = ScriptOptions::new();
let cwd = current_dir().unwrap();
let mut expected_script = "#!/bin/bash\n".to_string();
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"#!/bin/bash\necho test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_exit_on_error() {
let mut options = ScriptOptions::new();
options.exit_on_error = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -e\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_working_directory() |
#[test]
fn modify_script_print_commands() {
let mut options = ScriptOptions::new();
options.print_commands = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -x\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn modify_script_exit_on_error_and_print_commands() {
let mut options = ScriptOptions::new();
options.exit_on_error = true;
options.print_commands = true;
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
if !cfg!(windows) {
expected_script.push_str("set -e\n");
expected_script.push_str("set -x\n");
}
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
}
#[test]
fn run_test_no_args_default_options() {
let args = vec![];
let options = ScriptOptions::new();
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_error_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let result = run("exit 1", &args, &options).unwrap();
assert_eq!(result.0, 1);
}
#[test]
fn run_test_error_execute() {
let args = vec![];
let mut options = ScriptOptions::new();
options.runner = Some("badtest123".to_string());
let result = run("exit 1", &args, &options);
assert!(result.is_err());
}
#[test]
fn run_test_with_args() {
let args = vec!["ARG1".to_string(), "ARG2".to_string()];
let options = ScriptOptions::new();
let script = if cfg!(windows) {
"echo arg1: %1\necho arg2: %2"
} else {
"echo arg1: $1\necho arg2: $2"
};
let (code, output, error) = run(script, &args, &options).unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
assert!(output.find("arg1: ARG1").is_some());
assert!(output.find("arg2: ARG2").is_some());
}
#[test]
fn run_test_no_args_inherit_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Inherit;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_pipe_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Pipe;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_null_input() {
let args = vec![];
let mut options = ScriptOptions::new();
options.input_redirection = IoOptions::Null;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_inherit_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Inherit;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_pipe_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Pipe;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_test_no_args_null_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Null;
let (code, output, error) = run(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
)
.unwrap();
assert_eq!(code, 0);
assert!(output.is_empty());
assert!(error.is_empty());
}
#[test]
fn spawn_test_valid_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let child = spawn("exit 0", &args, &options).unwrap();
let output = child.wait_with_output().unwrap();
assert!(output.status.success());
}
#[test]
fn spawn_test_error_exit_code() {
let args = vec![];
let options = ScriptOptions::new();
let child = spawn("exit 1", &args, &options).unwrap();
let output = child.wait_with_output().unwrap();
assert!(!output.status.success());
}
#[test]
#[should_panic]
fn run_or_exit_error_code() {
let args = vec![];
let options = ScriptOptions::new();
run_or_exit("exit 1", &args, &options);
}
#[test]
#[should_panic]
fn run_or_exit_invocation_error() {
let args = vec![];
let options = ScriptOptions::new();
run_or_exit("badcommand", &args, &options);
}
#[test]
fn run_or_exit_pipe_output() {
let args = vec![];
let mut options = ScriptOptions::new();
options.output_redirection = IoOptions::Pipe;
let (output, error) = run_or_exit(
r#"
echo "Test"
exit 0
"#,
&args,
&options,
);
assert!(!output.is_empty());
assert!(error.is_empty());
}
#[test]
fn run_or_exit_append_env() {
let args = vec![];
let mut options = ScriptOptions::new();
let mut env_vars = std::collections::HashMap::<String, String>::new();
env_vars.insert("MY_TEST_VARIABLE".to_string(), "MY_TEST_VALUE".to_string());
options.env_vars = Some(env_vars);
std::env::set_var("PARENT_VAR", "PARENT_VALUE");
let script: String;
if cfg!(windows) {
script = r#"
ECHO %MY_TEST_VARIABLE%
ECHO %PARENT_VAR%
"#
.to_string();
} else {
script = r#"
echo $MY_TEST_VARIABLE
echo $PARENT_VAR
"#
.to_string()
}
let (output, error) = run_or_exit(&script, &args, &options);
assert!(output.contains("MY_TEST_VALUE"));
assert!(output.contains("PARENT_VALUE"));
assert!(error.is_empty());
// Check if current environment is polluted
match std::env::var("MY_TEST_VARIABLE") {
Ok(_) => assert!(false, "The parent environment is polluted"),
Err(_) => (),
}
}
| {
let mut options = ScriptOptions::new();
options.working_directory = Some(PathBuf::from("/usr/me/home"));
let cwd = current_dir().unwrap();
let mut expected_script = "".to_string();
expected_script.push_str("cd \"");
expected_script.push_str(cwd.to_str().unwrap());
expected_script.push_str("\" && cd \"/usr/me/home\"\necho test\n\n");
let script = modify_script(&"echo test".to_string(), &options).unwrap();
assert_eq!(script, expected_script);
} |
nirspec.py | # NIRSpec specific rountines go here
import os
import numpy as np
from astropy.io import fits
from . import sigrej, background, nircam
from . import bright2flux as b2f
def read(filename, data, meta):
|
def flag_bg(data, meta):
'''Outlier rejection of sky background along time axis.
Uses the code written for NIRCam and untested for NIRSpec, but likely to still work
Parameters
----------
data: DataClass
The data object in which the fits data will stored
meta: MetaClass
The metadata object
Returns
-------
data: DataClass
The updated data object with outlier background pixels flagged.
'''
return nircam.flag_bg(data, meta)
def fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=False):
'''Fit for a non-uniform background.
Uses the code written for NIRCam and untested for NIRSpec, but likely to still work
'''
return nircam.fit_bg(data, meta, mask, y1, y2, bg_deg, p3thresh, n, isplots=isplots)
| '''Reads single FITS file from JWST's NIRCam instrument.
Parameters
----------
filename: str
Single filename to read
data: DataClass
The data object in which the fits data will stored
meta: MetaClass
The metadata object
Returns
-------
data: DataClass
The updated data object with the fits data stored inside
Notes
-----
History:
- November 2012 Kevin Stevenson
Initial version
- June 2021 Aarynn Carter/Eva-Maria Ahrer
Updated for NIRSpec
'''
assert isinstance(filename, str)
# Decide whether to perform the Stage 2 processing ourselves.
# if stage2_processing:
# # Run pipeline on a *_rateints.fits Stage 1 data product, but avoiding significant subarray trimming.
# stage2_filename = process_to_stage2(filename, do_assignwcs=do_assignwcs, do_extract2d=do_extract2d, do_srctype=do_srctype, do_flatfield=do_flatfield, do_photom=do_photom, delete_files=delete_files)
# else:
# # Use the input file as is.
# stage2_filename = filename
# Now we can start working with the data.
hdulist = fits.open(filename)
data.mhdr = hdulist[0].header
data.shdr = hdulist['SCI',1].header
data.intstart = 1
print(' WARNING: Manually setting INTSTART to 1 for NIRSpec CV3 data.')
#data.intstart = data.mhdr['INTSTART']
data.intend = data.mhdr['NINTS']
data.data = hdulist['SCI',1].data
data.err = hdulist['ERR',1].data
data.dq = hdulist['DQ',1].data
data.wave = hdulist['WAVELENGTH',1].data
data.v0 = hdulist['VAR_RNOISE',1].data
data.int_times = hdulist['INT_TIMES',1].data[data.intstart-1:data.intend]
# Record integration mid-times in BJD_TDB
# data.bjdtdb = data.int_times['int_mid_BJD_TDB']
# There is no time information in the simulated NIRSpec data
print(' WARNING: The timestamps for the simulated NIRSpec data are currently '
'hardcoded because they are not in the .fits files themselves')
data.bjdtdb = np.linspace(data.mhdr['EXPSTART'], data.mhdr['EXPEND'], data.intend)
# NIRSpec CV3 data has a lot of NaNs in the data and err arrays, which is making life difficult.
print(' WARNING: Manually changing NaNs from DATA and ERR arrays to 0 for the CV3 data')
data.err[np.where(np.isnan(data.err))] = np.inf
data.data[np.where(np.isnan(data.data))] = 0
return data, meta |
serde_snapshot.rs | use {
crate::{
accounts::Accounts,
accounts_db::{
AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId,
BankHashInfo,
},
accounts_index::AccountSecondaryIndexes,
accounts_update_notifier_interface::AccountsUpdateNotifier,
ancestors::Ancestors,
append_vec::{AppendVec, StoredMetaWriteVersion},
bank::{Bank, BankFieldsToDeserialize, BankRc},
blockhash_queue::BlockhashQueue,
builtins::Builtins,
epoch_stakes::EpochStakes,
hardened_unpack::UnpackedAppendVecMap,
rent_collector::RentCollector,
serde_snapshot::future::SerializableStorage,
stakes::Stakes,
},
bincode,
bincode::{config::Options, Error},
log::*,
rayon::prelude::*,
serde::{de::DeserializeOwned, Deserialize, Serialize},
solana_measure::measure::Measure,
solana_program_runtime::InstructionProcessor,
solana_sdk::{
clock::{Epoch, Slot, UnixTimestamp},
epoch_schedule::EpochSchedule,
fee_calculator::{FeeCalculator, FeeRateGovernor},
genesis_config::GenesisConfig,
hard_forks::HardForks,
hash::Hash,
inflation::Inflation,
pubkey::Pubkey,
},
std::{
collections::{HashMap, HashSet},
io::{self, BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
result::Result,
sync::{
atomic::{AtomicUsize, Ordering},
Arc, RwLock,
},
},
};
#[cfg(RUSTC_WITH_SPECIALIZATION)]
use solana_frozen_abi::abi_example::IgnoreAsHelper;
mod common;
mod future;
mod tests;
mod utils;
use future::Context as TypeContextFuture;
#[allow(unused_imports)]
use utils::{serialize_iter_as_map, serialize_iter_as_seq, serialize_iter_as_tuple};
// a number of test cases in accounts_db use this
#[cfg(test)]
pub(crate) use self::tests::reconstruct_accounts_db_via_serialization;
pub(crate) use crate::accounts_db::{SnapshotStorage, SnapshotStorages};
#[derive(Copy, Clone, Eq, PartialEq)]
pub(crate) enum SerdeStyle {
Newer,
}
const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024;
#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample)]
struct AccountsDbFields<T>(
HashMap<Slot, Vec<T>>,
StoredMetaWriteVersion,
Slot,
BankHashInfo,
);
/// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a
/// full snapshot, or both a full and incremental snapshot
pub struct SnapshotStreams<'a, R> {
pub full_snapshot_stream: &'a mut BufReader<R>,
pub incremental_snapshot_stream: Option<&'a mut BufReader<R>>,
}
/// Helper type to wrap AccountsDbFields when reconstructing AccountsDb from either just a full
/// snapshot, or both a full and incremental snapshot
#[derive(Debug)]
struct SnapshotAccountsDbFields<T> {
full_snapshot_accounts_db_fields: AccountsDbFields<T>,
incremental_snapshot_accounts_db_fields: Option<AccountsDbFields<T>>,
}
impl<T> SnapshotAccountsDbFields<T> {
/// Collapse the SnapshotAccountsDbFields into a single AccountsDbFields. If there is no
/// incremental snapshot, this returns the AccountsDbFields from the full snapshot. Otherwise
/// this uses the version, slot, and bank hash info from the incremental snapshot, then the
/// combination of the storages from both the full and incremental snapshots.
fn collapse_into(self) -> Result<AccountsDbFields<T>, Error> {
match self.incremental_snapshot_accounts_db_fields {
None => Ok(self.full_snapshot_accounts_db_fields),
Some(AccountsDbFields(
mut incremental_snapshot_storages,
incremental_snapshot_version,
incremental_snapshot_slot,
incremental_snapshot_bank_hash_info,
)) => {
let full_snapshot_storages = self.full_snapshot_accounts_db_fields.0;
let full_snapshot_slot = self.full_snapshot_accounts_db_fields.2;
// filter out incremental snapshot storages with slot <= full snapshot slot
incremental_snapshot_storages.retain(|slot, _| *slot > full_snapshot_slot);
// There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot
incremental_snapshot_storages
.iter()
.all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!")
})?; |
let mut combined_storages = full_snapshot_storages;
combined_storages.extend(incremental_snapshot_storages.into_iter());
Ok(AccountsDbFields(
combined_storages,
incremental_snapshot_version,
incremental_snapshot_slot,
incremental_snapshot_bank_hash_info,
))
}
}
}
}
trait TypeContext<'a> {
type SerializableAccountStorageEntry: Serialize
+ DeserializeOwned
+ From<&'a AccountStorageEntry>
+ SerializableStorage
+ Sync;
fn serialize_bank_and_storage<S: serde::ser::Serializer>(
serializer: S,
serializable_bank: &SerializableBankAndStorage<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn serialize_accounts_db_fields<S: serde::ser::Serializer>(
serializer: S,
serializable_db: &SerializableAccountsDb<'a, Self>,
) -> std::result::Result<S::Ok, S::Error>
where
Self: std::marker::Sized;
fn deserialize_bank_fields<R>(
stream: &mut BufReader<R>,
) -> Result<
(
BankFieldsToDeserialize,
AccountsDbFields<Self::SerializableAccountStorageEntry>,
),
Error,
>
where
R: Read;
fn deserialize_accounts_db_fields<R>(
stream: &mut BufReader<R>,
) -> Result<AccountsDbFields<Self::SerializableAccountStorageEntry>, Error>
where
R: Read;
}
fn deserialize_from<R, T>(reader: R) -> bincode::Result<T>
where
R: Read,
T: DeserializeOwned,
{
bincode::options()
.with_limit(MAX_STREAM_SIZE)
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from::<R, T>(reader)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn bank_from_streams<R>(
serde_style: SerdeStyle,
snapshot_streams: &mut SnapshotStreams<R>,
account_paths: &[PathBuf],
unpacked_append_vec_map: UnpackedAppendVecMap,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
account_secondary_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
limit_load_slot_count_from_snapshot: Option<usize>,
shrink_ratio: AccountShrinkThreshold,
verify_index: bool,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> std::result::Result<Bank, Error>
where
R: Read,
{
macro_rules! INTO {
($x:ident) => {{
let (full_snapshot_bank_fields, full_snapshot_accounts_db_fields) =
$x::deserialize_bank_fields(snapshot_streams.full_snapshot_stream)?;
let (incremental_snapshot_bank_fields, incremental_snapshot_accounts_db_fields) =
if let Some(ref mut incremental_snapshot_stream) =
snapshot_streams.incremental_snapshot_stream
{
let (bank_fields, accounts_db_fields) =
$x::deserialize_bank_fields(incremental_snapshot_stream)?;
(Some(bank_fields), Some(accounts_db_fields))
} else {
(None, None)
};
let snapshot_accounts_db_fields = SnapshotAccountsDbFields {
full_snapshot_accounts_db_fields,
incremental_snapshot_accounts_db_fields,
};
let bank = reconstruct_bank_from_fields(
incremental_snapshot_bank_fields.unwrap_or(full_snapshot_bank_fields),
snapshot_accounts_db_fields,
genesis_config,
frozen_account_pubkeys,
account_paths,
unpacked_append_vec_map,
debug_keys,
additional_builtins,
account_secondary_indexes,
caching_enabled,
limit_load_slot_count_from_snapshot,
shrink_ratio,
verify_index,
accounts_db_config,
accounts_update_notifier,
)?;
Ok(bank)
}};
}
match serde_style {
SerdeStyle::Newer => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_from_stream error: {:?}", err);
err
})
}
pub(crate) fn bank_to_stream<W>(
serde_style: SerdeStyle,
stream: &mut BufWriter<W>,
bank: &Bank,
snapshot_storages: &[SnapshotStorage],
) -> Result<(), Error>
where
W: Write,
{
macro_rules! INTO {
($x:ident) => {
bincode::serialize_into(
stream,
&SerializableBankAndStorage::<$x> {
bank,
snapshot_storages,
phantom: std::marker::PhantomData::default(),
},
)
};
}
match serde_style {
SerdeStyle::Newer => INTO!(TypeContextFuture),
}
.map_err(|err| {
warn!("bankrc_to_stream error: {:?}", err);
err
})
}
struct SerializableBankAndStorage<'a, C> {
bank: &'a Bank,
snapshot_storages: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableBankAndStorage<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_bank_and_storage(serializer, self)
}
}
struct SerializableAccountsDb<'a, C> {
accounts_db: &'a AccountsDb,
slot: Slot,
account_storage_entries: &'a [SnapshotStorage],
phantom: std::marker::PhantomData<C>,
}
impl<'a, C: TypeContext<'a>> Serialize for SerializableAccountsDb<'a, C> {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
C::serialize_accounts_db_fields(serializer, self)
}
}
#[cfg(RUSTC_WITH_SPECIALIZATION)]
impl<'a, C> IgnoreAsHelper for SerializableAccountsDb<'a, C> {}
#[allow(clippy::too_many_arguments)]
fn reconstruct_bank_from_fields<E>(
bank_fields: BankFieldsToDeserialize,
snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
genesis_config: &GenesisConfig,
frozen_account_pubkeys: &[Pubkey],
account_paths: &[PathBuf],
unpacked_append_vec_map: UnpackedAppendVecMap,
debug_keys: Option<Arc<HashSet<Pubkey>>>,
additional_builtins: Option<&Builtins>,
account_secondary_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
limit_load_slot_count_from_snapshot: Option<usize>,
shrink_ratio: AccountShrinkThreshold,
verify_index: bool,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Result<Bank, Error>
where
E: SerializableStorage + std::marker::Sync,
{
let mut accounts_db = reconstruct_accountsdb_from_fields(
snapshot_accounts_db_fields,
account_paths,
unpacked_append_vec_map,
genesis_config,
account_secondary_indexes,
caching_enabled,
limit_load_slot_count_from_snapshot,
shrink_ratio,
verify_index,
accounts_db_config,
accounts_update_notifier,
)?;
accounts_db.freeze_accounts(
&Ancestors::from(&bank_fields.ancestors),
frozen_account_pubkeys,
);
let bank_rc = BankRc::new(Accounts::new_empty(accounts_db), bank_fields.slot);
// if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call
let debug_do_not_add_builtins = limit_load_slot_count_from_snapshot.is_some();
let bank = Bank::new_from_fields(
bank_rc,
genesis_config,
bank_fields,
debug_keys,
additional_builtins,
debug_do_not_add_builtins,
);
Ok(bank)
}
fn reconstruct_single_storage<E>(
slot: &Slot,
append_vec_path: &Path,
storage_entry: &E,
remapped_append_vec_id: Option<AppendVecId>,
new_slot_storage: &mut HashMap<AppendVecId, Arc<AccountStorageEntry>>,
) -> Result<(), Error>
where
E: SerializableStorage,
{
let append_vec_id = remapped_append_vec_id.unwrap_or_else(|| storage_entry.id());
let (accounts, num_accounts) =
AppendVec::new_from_file(append_vec_path, storage_entry.current_len())?;
let u_storage_entry =
AccountStorageEntry::new_existing(*slot, append_vec_id, accounts, num_accounts);
new_slot_storage.insert(append_vec_id, Arc::new(u_storage_entry));
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn reconstruct_accountsdb_from_fields<E>(
snapshot_accounts_db_fields: SnapshotAccountsDbFields<E>,
account_paths: &[PathBuf],
unpacked_append_vec_map: UnpackedAppendVecMap,
genesis_config: &GenesisConfig,
account_secondary_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
limit_load_slot_count_from_snapshot: Option<usize>,
shrink_ratio: AccountShrinkThreshold,
verify_index: bool,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
) -> Result<AccountsDb, Error>
where
E: SerializableStorage + std::marker::Sync,
{
let mut accounts_db = AccountsDb::new_with_config(
account_paths.to_vec(),
&genesis_config.cluster_type,
account_secondary_indexes,
caching_enabled,
shrink_ratio,
accounts_db_config,
accounts_update_notifier,
);
let AccountsDbFields(
snapshot_storages,
snapshot_version,
snapshot_slot,
snapshot_bank_hash_info,
) = snapshot_accounts_db_fields.collapse_into()?;
let snapshot_storages = snapshot_storages.into_iter().collect::<Vec<_>>();
// Ensure all account paths exist
for path in &accounts_db.paths {
std::fs::create_dir_all(path)
.unwrap_or_else(|err| panic!("Failed to create directory {}: {}", path.display(), err));
}
// Remap the deserialized AppendVec paths to point to correct local paths
let num_collisions = AtomicUsize::new(0);
let next_append_vec_id = AtomicUsize::new(0);
let mut measure_remap = Measure::start("remap");
let mut storage = (0..snapshot_storages.len())
.into_par_iter()
.map(|i| {
let (slot, slot_storage) = &snapshot_storages[i];
let mut new_slot_storage = HashMap::new();
for storage_entry in slot_storage {
let file_name = AppendVec::file_name(*slot, storage_entry.id());
let append_vec_path = unpacked_append_vec_map.get(&file_name).ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
format!("{} not found in unpacked append vecs", file_name),
)
})?;
// Remap the AppendVec ID to handle any duplicate IDs that may previously existed
// due to full snapshots and incremental snapshots generated from different nodes
let (remapped_append_vec_id, remapped_append_vec_path) = loop {
let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::Relaxed);
let remapped_file_name = AppendVec::file_name(*slot, remapped_append_vec_id);
let remapped_append_vec_path =
append_vec_path.parent().unwrap().join(&remapped_file_name);
// Break out of the loop in the following situations:
// 1. The new ID is the same as the original ID. This means we do not need to
// rename the file, since the ID is the "correct" one already.
// 2. There is not a file already at the new path. This means it is safe to
// rename the file to this new path.
// **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if
// possible.
if storage_entry.id() == remapped_append_vec_id
|| std::fs::metadata(&remapped_append_vec_path).is_err()
{
break (remapped_append_vec_id, remapped_append_vec_path);
}
// If we made it this far, a file exists at the new path. Record the collision
// and try again.
num_collisions.fetch_add(1, Ordering::Relaxed);
};
// Only rename the file if the new ID is actually different from the original.
if storage_entry.id() != remapped_append_vec_id {
std::fs::rename(append_vec_path, &remapped_append_vec_path)?;
}
reconstruct_single_storage(
slot,
&remapped_append_vec_path,
storage_entry,
Some(remapped_append_vec_id),
&mut new_slot_storage,
)?;
}
Ok((*slot, new_slot_storage))
})
.collect::<Result<HashMap<Slot, _>, Error>>()?;
measure_remap.stop();
// discard any slots with no storage entries
// this can happen if a non-root slot was serialized
// but non-root stores should not be included in the snapshot
storage.retain(|_slot, stores| !stores.is_empty());
assert!(
!storage.is_empty(),
"At least one storage entry must exist from deserializing stream"
);
let next_append_vec_id = next_append_vec_id.load(Ordering::Relaxed);
let max_append_vec_id = next_append_vec_id - 1;
assert!(
max_append_vec_id <= AppendVecId::MAX / 2,
"Storage id {} larger than allowed max",
max_append_vec_id
);
// Process deserialized data, set necessary fields in self
accounts_db
.bank_hashes
.write()
.unwrap()
.insert(snapshot_slot, snapshot_bank_hash_info);
accounts_db.storage.0.extend(
storage
.into_iter()
.map(|(slot, slot_storage_entry)| (slot, Arc::new(RwLock::new(slot_storage_entry)))),
);
accounts_db
.next_id
.store(next_append_vec_id, Ordering::Relaxed);
accounts_db
.write_version
.fetch_add(snapshot_version, Ordering::Relaxed);
accounts_db.generate_index(limit_load_slot_count_from_snapshot, verify_index);
accounts_db.maybe_add_filler_accounts(genesis_config.ticks_per_slot());
let mut measure_notify = Measure::start("accounts_notify");
accounts_db.notify_account_restore_from_snapshot();
measure_notify.stop();
datapoint_info!(
"reconstruct_accountsdb_from_fields()",
("remap-time-us", measure_remap.as_us(), i64),
(
"remap-collisions",
num_collisions.load(Ordering::Relaxed),
i64
),
("accountsdb-notify-at-start-us", measure_notify.as_us(), i64),
);
Ok(accounts_db)
} | |
program.rs | //! Shader programs related types and functions.
//!
//! A shader `Program` is an object representing several operations. It’s a streaming program that
//! will operate on vertices, vertex patches, primitives and/or fragments.
//!
//! > *Note: shader programs don’t have to run on all those objects; they can be ran only on
//! vertices and fragments, for instance*.
//!
//! Creating a shader program is very simple. You need shader `Stage`s representing each step of the
//! processing.
//!
//! You *have* to provide at least a vertex and a fragment stages. If you want tessellation
//! processing, you need to provide a tessellation control and tessellation evaluation stages. If
//! you want primitives processing, you need to add a geometry stage.
//!
//! In order to customize the behavior of your shader programs, you have access to *uniforms*. For
//! more details about them, see the documentation for the type `Uniform` and trait `Uniformable`.
//! When creating a new shader program, you have to provide code to declare its *uniform semantics*.
//!
//! The *uniform semantics* represent a mapping between the variables declared in your shader
//! sources and variables you have access in your host code in Rust. Typically, you declare your
//! variable – `Uniform` – in Rust as `const` and use the function `Uniform::sem` to get the
//! semantic associated with the string you pass in.
//!
//! > **Becareful: currently, uniforms are a bit messy as you have to provide a per-program unique
//! number when you use the `Uniform::new` method. Efforts will be done in that direction in later
//! releases.
//!
//! You can create a `Program` with its `new` associated function.
#[cfg(feature = "std")]
use std::ffi::CString;
#[cfg(feature = "std")]
use std::fmt;
#[cfg(feature = "std")]
use std::marker::PhantomData;
#[cfg(feature = "std")]
use std::ops::Deref;
#[cfg(feature = "std")]
use std::ptr::null_mut;
#[cfg(not(feature = "std"))]
use alloc::prelude::ToOwned;
#[cfg(not(feature = "std"))]
use alloc::string::String;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#[cfg(not(feature = "std"))]
use core::fmt::{self, Write};
#[cfg(not(feature = "std"))]
use core::marker::PhantomData;
#[cfg(not(feature = "std"))]
use core::ops::Deref;
#[cfg(not(feature = "std"))]
use core::ptr::null_mut;
use crate::linear::{M22, M33, M44};
use crate::metagl::*;
use crate::shader::stage::{self, Stage, StageError};
use crate::vertex::Semantics;
/// A raw shader program.
///
/// This is a type-erased version of a `Program`.
#[derive(Debug)]
pub struct RawProgram {
handle: GLuint,
}
impl RawProgram {
/// Create a new program by attaching shader stages.
fn new<'a, T, G>(
tess: T,
vertex: &Stage,
geometry: G,
fragment: &Stage,
) -> Result<Self, ProgramError>
where
T: Into<Option<(&'a Stage, &'a Stage)>>,
G: Into<Option<&'a Stage>>,
{
unsafe {
let handle = gl::CreateProgram();
if let Some((tcs, tes)) = tess.into() {
gl::AttachShader(handle, tcs.handle());
gl::AttachShader(handle, tes.handle());
}
gl::AttachShader(handle, vertex.handle());
if let Some(geometry) = geometry.into() {
gl::AttachShader(handle, geometry.handle());
}
gl::AttachShader(handle, fragment.handle());
let program = RawProgram { handle };
program.link().map(move |_| program)
}
}
/// Link a program.
fn link(&self) -> Result<(), ProgramError> {
let handle = self.handle;
unsafe {
gl::LinkProgram(handle);
let mut linked: GLint = gl::FALSE.into();
gl::GetProgramiv(handle, gl::LINK_STATUS, &mut linked);
if linked == gl::TRUE.into() {
Ok(())
} else {
let mut log_len: GLint = 0;
gl::GetProgramiv(handle, gl::INFO_LOG_LENGTH, &mut log_len);
let mut log: Vec<u8> = Vec::with_capacity(log_len as usize);
gl::GetProgramInfoLog(handle, log_len, null_mut(), log.as_mut_ptr() as *mut GLchar);
gl::DeleteProgram(handle);
log.set_len(log_len as usize);
Err(ProgramError::LinkFailed(String::from_utf8(log).unwrap()))
}
}
}
#[inline]
pub(crate) fn handle(&self) -> GLuint {
self.handle
}
}
impl Drop for RawProgram {
fn drop(&mut self) {
unsafe { gl::DeleteProgram(self.handle) }
}
}
/// A typed shader program.
///
/// Typed shader programs represent their inputs, outputs and environment (uniforms) directly in
/// their types. This is very interesting as it adds more static safety and enables such programs
/// to *“store”* information like the uniform interface and such.
pub struct Program<S, Out, Uni> {
raw: RawProgram,
uni_iface: Uni,
_in: PhantomData<*const S>,
_out: PhantomData<*const Out>,
}
impl<S, Out, Uni> Program<S, Out, Uni>
where
S: Semantics,
{
/// Create a new program by consuming `Stage`s.
pub fn from_stages<'a, T, G>(
tess: T,
vertex: &Stage,
geometry: G,
fragment: &Stage,
) -> Result<BuiltProgram<S, Out, Uni>, ProgramError>
where
Uni: UniformInterface,
T: Into<Option<(&'a Stage, &'a Stage)>>,
G: Into<Option<&'a Stage>>,
{
Self::from_stages_env(tess, vertex, geometry, fragment, ())
}
/// Create a new program by consuming strings.
pub fn from_strings<'a, T, G>(
tess: T,
vertex: &str,
geometry: G,
fragment: &str,
) -> Result<BuiltProgram<S, Out, Uni>, ProgramError>
where
Uni: UniformInterface,
T: Into<Option<(&'a str, &'a str)>>,
G: Into<Option<&'a str>>,
{
Self::from_strings_env(tess, vertex, geometry, fragment, ())
}
/// Create a new program by consuming `Stage`s and by looking up an environment.
pub fn from_stages_env<'a, E, T, G>(
tess: T,
vertex: &Stage,
geometry: G,
fragment: &Stage,
env: E,
) -> Result<BuiltProgram<S, Out, Uni>, ProgramError>
where
Uni: UniformInterface<E>,
T: Into<Option<(&'a Stage, &'a Stage)>>,
G: Into<Option<&'a Stage>>,
{
let raw = RawProgram::new(tess, vertex, geometry, fragment)?;
let mut warnings = bind_vertex_attribs_locations::<S>(&raw);
raw.link()?;
let (uni_iface, uniform_warnings) = create_uniform_interface(&raw, env)?;
warnings.extend(uniform_warnings.into_iter().map(ProgramWarning::Uniform));
let program = Program {
raw,
uni_iface,
_in: PhantomData,
_out: PhantomData,
};
Ok(BuiltProgram { program, warnings })
}
/// Create a new program by consuming strings.
pub fn from_strings_env<'a, E, T, G>(
tess: T,
vertex: &str,
geometry: G,
fragment: &str,
env: E,
) -> Result<BuiltProgram<S, Out, Uni>, ProgramError>
where
Uni: UniformInterface<E>,
T: Into<Option<(&'a str, &'a str)>>,
G: Into<Option<&'a str>>,
{
let tess = match tess.into() {
Some((tcs_str, tes_str)) => {
let tcs = Stage::new(stage::Type::TessellationControlShader, tcs_str)
.map_err(ProgramError::StageError)?;
let tes = Stage::new(stage::Type::TessellationEvaluationShader, tes_str)
.map_err(ProgramError::StageError)?;
Some((tcs, tes))
}
None => None,
};
let gs = match geometry.into() {
Some(gs_str) => {
Some(Stage::new(stage::Type::GeometryShader, gs_str).map_err(ProgramError::StageError)?)
}
None => None,
};
let vs = Stage::new(stage::Type::VertexShader, vertex).map_err(ProgramError::StageError)?;
let fs = Stage::new(stage::Type::FragmentShader, fragment).map_err(ProgramError::StageError)?;
Self::from_stages_env(
tess.as_ref().map(|&(ref tcs, ref tes)| (tcs, tes)),
&vs,
gs.as_ref(),
&fs,
env,
)
}
/// Get the program interface associated with this program.
pub(crate) fn interface(&self) -> ProgramInterface<Uni> {
let raw_program = &self.raw;
let uniform_interface = &self.uni_iface;
ProgramInterface {
raw_program,
uniform_interface,
}
}
/// Transform the program to adapt the uniform interface.
///
/// This function will not re-allocate nor recreate the GPU data. It will try to change the
/// uniform interface and if the new uniform interface is correctly generated, return the same
/// shader program updated with the new uniform interface. If the generation of the new uniform
/// interface fails, this function will return the program with the former uniform interface.
pub fn adapt<Q>(self) -> Result<BuiltProgram<S, Out, Q>, AdaptationFailure<S, Out, Uni>>
where
Q: UniformInterface,
{
self.adapt_env(())
}
/// Transform the program to adapt the uniform interface by looking up an environment.
///
/// This function will not re-allocate nor recreate the GPU data. It will try to change the
/// uniform interface and if the new uniform interface is correctly generated, return the same
/// shader program updated with the new uniform interface. If the generation of the new uniform
/// interface fails, this function will return the program with the former uniform interface.
pub fn adapt_env<Q, E>(
self,
env: E,
) -> Result<BuiltProgram<S, Out, Q>, AdaptationFailure<S, Out, Uni>>
where
Q: UniformInterface<E>,
{
// first, try to create the new uniform interface
let new_uni_iface = create_uniform_interface(&self.raw, env);
match new_uni_iface {
Ok((uni_iface, warnings)) => {
// if we have succeeded, return self with the new uniform interface
let program = Program {
raw: self.raw,
uni_iface,
_in: PhantomData,
_out: PhantomData,
};
let warnings = warnings.into_iter().map(ProgramWarning::Uniform).collect();
Ok(BuiltProgram { program, warnings })
}
Err(iface_err) => {
// we couldn’t generate the new uniform interface; return the error(s) that occurred and the
// the untouched former program
let failure = AdaptationFailure {
program: self,
error: iface_err,
};
Err(failure)
}
}
}
/// A version of [`Program::adapt_env`] that doesn’t change the uniform interface type.
///
/// This function might be needed for when you want to update the uniform interface but still
/// enforce that the type must remain the same.
pub fn readapt_env<E>(
self,
env: E,
) -> Result<BuiltProgram<S, Out, Uni>, AdaptationFailure<S, Out, Uni>>
where
Uni: UniformInterface<E>,
{
self.adapt_env(env)
}
}
impl<S, Out, Uni> Deref for Program<S, Out, Uni> {
type Target = RawProgram;
fn deref(&self) -> &Self::Target {
&self.raw
}
}
/// A built program with potential warnings.
///
/// The sole purpose of this type is to be destructured when a program is built.
pub struct BuiltProgram<S, Out, Uni> {
/// Built program.
pub program: Program<S, Out, Uni>,
/// Potential warnings.
pub warnings: Vec<ProgramWarning>,
}
impl<S, Out, Uni> BuiltProgram<S, Out, Uni> {
/// Get the program and ignore the warnings.
pub fn ignore_warnings(self) -> Program<S, Out, Uni> {
self.program
}
}
/// A [`Program`] uniform adaptation that has failed.
pub struct AdaptationFailure<S, Out, Uni> {
/// Program used before trying to adapt.
pub program: Program<S, Out, Uni>,
/// Program error that prevented to adapt.
pub error: ProgramError,
}
impl<S, Out, Uni> AdaptationFailure<S, Out, Uni> {
/// Get the program and ignore the error.
pub fn ignore_error(self) -> Program<S, Out, Uni> {
self.program
}
}
/// Class of types that can act as uniform interfaces in typed programs.
///
/// A uniform interface is a value that contains uniforms. The purpose of a uniform interface is to
/// be stored in a typed program and handed back when the program is made available in a pipeline.
///
/// The `E` type variable represents the environment and might be used to drive the implementation
/// from a value. It’s defaulted to `()` so that if you don’t use the environment, you don’t have to
/// worry about that value when creating the shader program.
pub trait UniformInterface<E = ()>: Sized {
/// Build the uniform interface.
///
/// When mapping a uniform, if you want to accept failures, you can discard the error and use
/// `UniformBuilder::unbound` to let the uniform pass through, and collect the uniform warning.
fn uniform_interface<'a>(builder: &mut UniformBuilder<'a>, env: E) -> Result<Self, ProgramError>;
}
impl UniformInterface for () {
fn uniform_interface<'a>(_: &mut UniformBuilder<'a>, _: ()) -> Result<Self, ProgramError> {
Ok(())
}
}
/// Build uniforms to fold them to a uniform interface.
pub struct UniformBuilder<'a> {
raw: &'a RawProgram,
warnings: Vec<UniformWarning>,
}
impl<'a> UniformBuilder<'a> {
fn new(raw: &'a RawProgram) -> Self {
UniformBuilder {
raw,
warnings: Vec::new(),
}
}
/// Have the builder hand you a `Uniform` of the type of your choice.
///
/// Keep in mind that it’s possible that this function fails if you ask for a type for which the
/// one defined in the shader doesn’t type match. If you don’t want a failure but an *unbound*
/// uniform, head over to the `ask_unbound` function.
pub fn ask<T>(&self, name: &str) -> Result<Uniform<T>, UniformWarning>
where
T: Uniformable,
{
let uniform = match T::ty() {
Type::BufferBinding => self.ask_uniform_block(name)?,
_ => self.ask_uniform(name)?,
};
uniform_type_match(self.raw.handle, name, T::ty())?;
Ok(uniform)
}
/// Get an unbound [`Uniform`].
///
/// Unbound [`Uniform`]s are not any different from typical [`Uniform`]s but when resolving
/// mapping in the _shader program_, if the [`Uniform`] is found inactive or doesn’t exist,
/// instead of returning an error, this function will return an _unbound uniform_, which is a
/// uniform that does nothing interesting.
///
/// That function is useful if you don’t really care about silently sending values down a shader
/// program and getting them ignored. It might be the case for optional uniforms, for instance.
pub fn ask_unbound<T>(&mut self, name: &str) -> Uniform<T>
where
T: Uniformable,
{
match self.ask(name) {
Ok(uniform) => uniform,
Err(warning) => {
self.warnings.push(warning);
self.unbound()
}
}
}
fn ask_uniform<T>(&self, name: &str) -> Result<Uniform<T>, UniformWarning>
where
T: Uniformable,
{
let location = {
#[cfg(feature = "std")]
{
let c_name = CString::new(name.as_bytes()).unwrap();
unsafe { gl::GetUniformLocation(self.raw.handle, c_name.as_ptr() as *const GLchar) }
}
#[cfg(not(feature = "std"))]
{
unsafe {
with_cstring(name, |c_name| {
gl::GetUniformLocation(self.raw.handle, c_name)
})
.unwrap_or(-1)
}
}
};
if location < 0 {
Err(UniformWarning::Inactive(name.to_owned()))
} else {
Ok(Uniform::new(self.raw.handle, location))
}
}
fn ask_uniform_block<T>(&self, name: &str) -> Result<Uniform<T>, UniformWarning>
where
T: Uniformable,
{
let location = {
#[cfg(feature = "std")]
{
let c_name = CString::new(name.as_bytes()).unwrap();
unsafe { gl::GetUniformBlockIndex(self.raw.handle, c_name.as_ptr() as *const GLchar) }
}
#[cfg(not(feature = "std"))]
{
unsafe {
with_cstring(name, |c_name| {
gl::GetUniformBlockIndex(self.raw.handle, c_name)
})
.unwrap_or(gl::INVALID_INDEX)
}
}
};
if location == gl::INVALID_INDEX {
Err(UniformWarning::Inactive(name.to_owned()))
} else {
Ok(Uniform::new(self.raw.handle, location as GLint))
}
}
/// Special uniform that won’t do anything.
///
/// Use that function when you need a uniform to complete a uniform interface but you’re sure you
/// won’t use it.
pub fn unbound<T>(&self) -> Uniform<T>
where
T: Uniformable,
{
Uniform::unbound(self.raw.handle)
}
}
/// The shader program interface.
///
/// This struct gives you access to several capabilities, among them:
///
/// - The typed *uniform interface* you would have acquired earlier.
/// - Some functions to query more data dynamically.
pub struct ProgramInterface<'a, Uni> {
raw_program: &'a RawProgram,
uniform_interface: &'a Uni,
}
impl<'a, Uni> Deref for ProgramInterface<'a, Uni> {
type Target = Uni;
fn deref(&self) -> &Self::Target {
self.uniform_interface
}
}
impl<'a, Uni> ProgramInterface<'a, Uni> {
/// Get a [`UniformBuilder`] in order to perform dynamic uniform lookup.
pub fn query(&'a self) -> UniformBuilder<'a> {
UniformBuilder::new(self.raw_program)
}
}
/// Errors that a `Program` can generate.
#[derive(Debug)]
pub enum ProgramError {
/// A shader stage failed to compile or validate its state.
StageError(StageError),
/// Program link failed. You can inspect the reason by looking at the contained `String`.
LinkFailed(String),
/// Some uniform configuration is ill-formed. It can be a problem of inactive uniform, mismatch
/// type, etc. Check the `UniformWarning` type for more information.
UniformWarning(UniformWarning),
/// Some vertex attribute is ill-formed.
VertexAttribWarning(VertexAttribWarning),
}
impl fmt::Display for ProgramError {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ProgramError::StageError(ref e) => write!(f, "shader program has stage error: {}", e),
ProgramError::LinkFailed(ref s) => write!(f, "shader program failed to link: {}", s),
ProgramError::UniformWarning(ref e) => {
write!(f, "shader program contains uniform warning(s): {}", e)
}
ProgramError::VertexAttribWarning(ref e) => write!(
f,
"shader program contains vertex attribute warning(s): {}",
e
),
}
}
}
/// Program warnings, not necessarily considered blocking errors.
#[derive(Debug)]
pub enum ProgramWarning {
/// Some uniform configuration is ill-formed. It can be a problem of inactive uniform, mismatch
/// type, etc. Check the `UniformWarning` type for more information.
Uniform(UniformWarning),
/// Some vertex attribute is ill-formed.
VertexAttrib(VertexAttribWarning),
}
impl fmt::Display for ProgramWarning {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
ProgramWarning::Uniform(ref e) => write!(f, "uniform warning: {}", e),
ProgramWarning::VertexAttrib(ref e) => write!(f, "vertex attribute warning: {}", e),
}
}
}
/// Warnings related to uniform issues.
#[derive(Debug)]
pub enum UniformWarning {
/// Inactive uniform (not in use / no participation to the final output in shaders).
Inactive(String),
/// Type mismatch between the static requested type (i.e. the `T` in [`Uniform<T>`] for instance)
/// and the type that got reflected from the backend in the shaders.
///
/// The first `String` is the name of the uniform; the second one gives the type mismatch.
TypeMismatch(String, Type),
}
impl UniformWarning {
/// Create an inactive uniform warning.
pub fn inactive<N>(name: N) -> Self
where
N: Into<String>,
{
UniformWarning::Inactive(name.into())
}
/// Create a type mismatch.
pub fn type_mismatch<N>(name: N, ty: Type) -> Self
where
N: Into<String>,
{
UniformWarning::TypeMismatch(name.into(), ty)
}
}
impl fmt::Display for UniformWarning {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
UniformWarning::Inactive(ref s) => write!(f, "inactive {} uniform", s),
UniformWarning::TypeMismatch(ref n, ref t) => {
write!(f, "type mismatch for uniform {}: {}", n, t)
}
}
}
}
/// Warnings related to vertex attributes issues.
#[derive(Debug)]
pub enum VertexAttribWarning {
/// Inactive vertex attribute (not read).
Inactive(String),
}
impl fmt::Display for VertexAttribWarning {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
VertexAttribWarning::Inactive(ref s) => write!(f, "inactive {} vertex attribute", s),
}
}
}
/// A contravariant shader uniform. `Uniform<T>` doesn’t hold any value. It’s more like a mapping
/// between the host code and the shader the uniform was retrieved from.
#[derive(Debug)]
pub struct Uniform<T> {
program: GLuint,
index: GLint,
_t: PhantomData<*const T>,
}
impl<T> Uniform<T>
where
T: Uniformable,
{
fn new(program: GLuint, index: GLint) -> Self {
Uniform {
program,
index,
_t: PhantomData,
}
}
fn unbound(program: GLuint) -> Self {
Uniform {
program,
index: -1,
_t: PhantomData,
}
}
pub(crate) fn program(&self) -> GLuint {
self.program
}
pub(crate) fn index(&self) -> GLint {
self.index
}
/// Update the value pointed by this uniform.
pub fn update(&self, x: T) {
x.update(self);
}
}
/// Type of a uniform.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Type {
// scalars
/// 32-bit signed integer.
Int,
/// 32-bit unsigned integer.
UInt,
/// 32-bit floating-point number.
Float,
/// Boolean.
Bool,
// vectors
/// 2D signed integral vector.
IVec2,
/// 3D signed integral vector.
IVec3,
/// 4D signed integral vector. | /// 3D unsigned integral vector.
UIVec3,
/// 4D unsigned integral vector.
UIVec4,
/// 2D floating-point vector.
Vec2,
/// 3D floating-point vector.
Vec3,
/// 4D floating-point vector.
Vec4,
/// 2D boolean vector.
BVec2,
/// 3D boolean vector.
BVec3,
/// 4D boolean vector.
BVec4,
// matrices
/// 2×2 floating-point matrix.
M22,
/// 3×3 floating-point matrix.
M33,
/// 4×4 floating-point matrix.
M44,
// textures
/// Signed integral 1D texture sampler.
ISampler1D,
/// Signed integral 2D texture sampler.
ISampler2D,
/// Signed integral 3D texture sampler.
ISampler3D,
/// Signed integral 1D array texture sampler.
ISampler1DArray,
/// Signed integral 2D array texture sampler.
ISampler2DArray,
/// Unsigned integral 1D texture sampler.
UISampler1D,
/// Unsigned integral 2D texture sampler.
UISampler2D,
/// Unsigned integral 3D texture sampler.
UISampler3D,
/// Unsigned integral 1D array texture sampler.
UISampler1DArray,
/// Unsigned integral 2D array texture sampler.
UISampler2DArray,
/// Floating-point 1D texture sampler.
Sampler1D,
/// Floating-point 2D texture sampler.
Sampler2D,
/// Floating-point 3D texture sampler.
Sampler3D,
/// Floating-point 1D array texture sampler.
Sampler1DArray,
/// Floating-point 2D array texture sampler.
Sampler2DArray,
/// Signed cubemap sampler.
ICubemap,
/// Unsigned cubemap sampler.
UICubemap,
/// Floating-point cubemap sampler.
Cubemap,
// buffer
/// Buffer binding; used for UBOs.
BufferBinding,
}
impl fmt::Display for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
Type::Int => f.write_str("int"),
Type::UInt => f.write_str("uint"),
Type::Float => f.write_str("float"),
Type::Bool => f.write_str("bool"),
Type::IVec2 => f.write_str("ivec2"),
Type::IVec3 => f.write_str("ivec3"),
Type::IVec4 => f.write_str("ivec4"),
Type::UIVec2 => f.write_str("uvec2"),
Type::UIVec3 => f.write_str("uvec3"),
Type::UIVec4 => f.write_str("uvec4"),
Type::Vec2 => f.write_str("vec2"),
Type::Vec3 => f.write_str("vec3"),
Type::Vec4 => f.write_str("vec4"),
Type::BVec2 => f.write_str("bvec2"),
Type::BVec3 => f.write_str("bvec3"),
Type::BVec4 => f.write_str("bvec4"),
Type::M22 => f.write_str("mat2"),
Type::M33 => f.write_str("mat3"),
Type::M44 => f.write_str("mat4"),
Type::ISampler1D => f.write_str("isampler1D"),
Type::ISampler2D => f.write_str("isampler2D"),
Type::ISampler3D => f.write_str("isampler3D"),
Type::ISampler1DArray => f.write_str("isampler1DArray"),
Type::ISampler2DArray => f.write_str("isampler2DArray"),
Type::UISampler1D => f.write_str("usampler1D"),
Type::UISampler2D => f.write_str("usampler2D"),
Type::UISampler3D => f.write_str("usampler3D"),
Type::UISampler1DArray => f.write_str("usampler1DArray"),
Type::UISampler2DArray => f.write_str("usampler2DArray"),
Type::Sampler1D => f.write_str("sampler1D"),
Type::Sampler2D => f.write_str("sampler2D"),
Type::Sampler3D => f.write_str("sampler3D"),
Type::Sampler1DArray => f.write_str("sampler1DArray"),
Type::Sampler2DArray => f.write_str("sampler2DArray"),
Type::ICubemap => f.write_str("isamplerCube"),
Type::UICubemap => f.write_str("usamplerCube"),
Type::Cubemap => f.write_str("samplerCube"),
Type::BufferBinding => f.write_str("buffer binding"),
}
}
}
/// Types that can behave as `Uniform`.
pub unsafe trait Uniformable: Sized {
/// Update the uniform with a new value.
fn update(self, u: &Uniform<Self>);
/// Retrieve the `Type` of the uniform.
fn ty() -> Type;
}
unsafe impl Uniformable for i32 {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1i(u.index, self) }
}
fn ty() -> Type {
Type::Int
}
}
unsafe impl Uniformable for [i32; 2] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2iv(u.index, 1, &self as *const i32) }
}
fn ty() -> Type {
Type::IVec2
}
}
unsafe impl Uniformable for [i32; 3] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3iv(u.index, 1, &self as *const i32) }
}
fn ty() -> Type {
Type::IVec3
}
}
unsafe impl Uniformable for [i32; 4] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4iv(u.index, 1, &self as *const i32) }
}
fn ty() -> Type {
Type::IVec4
}
}
unsafe impl<'a> Uniformable for &'a [i32] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1iv(u.index, self.len() as GLsizei, self.as_ptr()) }
}
fn ty() -> Type {
Type::Int
}
}
unsafe impl<'a> Uniformable for &'a [[i32; 2]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2iv(u.index, self.len() as GLsizei, self.as_ptr() as *const i32) }
}
fn ty() -> Type {
Type::IVec2
}
}
unsafe impl<'a> Uniformable for &'a [[i32; 3]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3iv(u.index, self.len() as GLsizei, self.as_ptr() as *const i32) }
}
fn ty() -> Type {
Type::IVec3
}
}
unsafe impl<'a> Uniformable for &'a [[i32; 4]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4iv(u.index, self.len() as GLsizei, self.as_ptr() as *const i32) }
}
fn ty() -> Type {
Type::IVec4
}
}
unsafe impl Uniformable for u32 {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1ui(u.index, self) }
}
fn ty() -> Type {
Type::UInt
}
}
unsafe impl Uniformable for [u32; 2] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2uiv(u.index, 1, &self as *const u32) }
}
fn ty() -> Type {
Type::UIVec2
}
}
unsafe impl Uniformable for [u32; 3] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3uiv(u.index, 1, &self as *const u32) }
}
fn ty() -> Type {
Type::UIVec3
}
}
unsafe impl Uniformable for [u32; 4] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4uiv(u.index, 1, &self as *const u32) }
}
fn ty() -> Type {
Type::UIVec4
}
}
unsafe impl<'a> Uniformable for &'a [u32] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1uiv(u.index, self.len() as GLsizei, self.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::UInt
}
}
unsafe impl<'a> Uniformable for &'a [[u32; 2]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2uiv(u.index, self.len() as GLsizei, self.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::UIVec2
}
}
unsafe impl<'a> Uniformable for &'a [[u32; 3]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3uiv(u.index, self.len() as GLsizei, self.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::UIVec3
}
}
unsafe impl<'a> Uniformable for &'a [[u32; 4]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4uiv(u.index, self.len() as GLsizei, self.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::UIVec4
}
}
unsafe impl Uniformable for f32 {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1f(u.index, self) }
}
fn ty() -> Type {
Type::Float
}
}
unsafe impl Uniformable for [f32; 2] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2fv(u.index, 1, &self as *const f32) }
}
fn ty() -> Type {
Type::Vec2
}
}
unsafe impl Uniformable for [f32; 3] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3fv(u.index, 1, &self as *const f32) }
}
fn ty() -> Type {
Type::Vec3
}
}
unsafe impl Uniformable for [f32; 4] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4fv(u.index, 1, &self as *const f32) }
}
fn ty() -> Type {
Type::Vec4
}
}
unsafe impl<'a> Uniformable for &'a [f32] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1fv(u.index, self.len() as GLsizei, self.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::Float
}
}
unsafe impl<'a> Uniformable for &'a [[f32; 2]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform2fv(u.index, self.len() as GLsizei, self.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::Vec2
}
}
unsafe impl<'a> Uniformable for &'a [[f32; 3]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform3fv(u.index, self.len() as GLsizei, self.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::Vec3
}
}
unsafe impl<'a> Uniformable for &'a [[f32; 4]] {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform4fv(u.index, self.len() as GLsizei, self.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::Vec4
}
}
unsafe impl Uniformable for M22 {
fn update(self, u: &Uniform<Self>) {
let v = [self];
unsafe { gl::UniformMatrix2fv(u.index, 1, gl::FALSE, v.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::M22
}
}
unsafe impl Uniformable for M33 {
fn update(self, u: &Uniform<Self>) {
let v = [self];
unsafe { gl::UniformMatrix3fv(u.index, 1, gl::FALSE, v.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::M33
}
}
unsafe impl Uniformable for M44 {
fn update(self, u: &Uniform<Self>) {
let v = [self];
unsafe { gl::UniformMatrix4fv(u.index, 1, gl::FALSE, v.as_ptr() as *const f32) }
}
fn ty() -> Type {
Type::M44
}
}
unsafe impl<'a> Uniformable for &'a [M22] {
fn update(self, u: &Uniform<Self>) {
unsafe {
gl::UniformMatrix2fv(
u.index,
self.len() as GLsizei,
gl::FALSE,
self.as_ptr() as *const f32,
)
}
}
fn ty() -> Type {
Type::M22
}
}
unsafe impl<'a> Uniformable for &'a [M33] {
fn update(self, u: &Uniform<Self>) {
unsafe {
gl::UniformMatrix3fv(
u.index,
self.len() as GLsizei,
gl::FALSE,
self.as_ptr() as *const f32,
)
}
}
fn ty() -> Type {
Type::M33
}
}
unsafe impl<'a> Uniformable for &'a [M44] {
fn update(self, u: &Uniform<Self>) {
unsafe {
gl::UniformMatrix4fv(
u.index,
self.len() as GLsizei,
gl::FALSE,
self.as_ptr() as *const f32,
)
}
}
fn ty() -> Type {
Type::M44
}
}
unsafe impl Uniformable for bool {
fn update(self, u: &Uniform<Self>) {
unsafe { gl::Uniform1ui(u.index, self as GLuint) }
}
fn ty() -> Type {
Type::Bool
}
}
unsafe impl Uniformable for [bool; 2] {
fn update(self, u: &Uniform<Self>) {
let v = [self[0] as u32, self[1] as u32];
unsafe { gl::Uniform2uiv(u.index, 1, &v as *const u32) }
}
fn ty() -> Type {
Type::BVec2
}
}
unsafe impl Uniformable for [bool; 3] {
fn update(self, u: &Uniform<Self>) {
let v = [self[0] as u32, self[1] as u32, self[2] as u32];
unsafe { gl::Uniform3uiv(u.index, 1, &v as *const u32) }
}
fn ty() -> Type {
Type::BVec3
}
}
unsafe impl Uniformable for [bool; 4] {
fn update(self, u: &Uniform<Self>) {
let v = [
self[0] as u32,
self[1] as u32,
self[2] as u32,
self[3] as u32,
];
unsafe { gl::Uniform4uiv(u.index, 1, &v as *const u32) }
}
fn ty() -> Type {
Type::BVec4
}
}
unsafe impl<'a> Uniformable for &'a [bool] {
fn update(self, u: &Uniform<Self>) {
let v: Vec<_> = self.iter().map(|x| *x as u32).collect();
unsafe { gl::Uniform1uiv(u.index, v.len() as GLsizei, v.as_ptr()) }
}
fn ty() -> Type {
Type::Bool
}
}
unsafe impl<'a> Uniformable for &'a [[bool; 2]] {
fn update(self, u: &Uniform<Self>) {
let v: Vec<_> = self.iter().map(|x| [x[0] as u32, x[1] as u32]).collect();
unsafe { gl::Uniform2uiv(u.index, v.len() as GLsizei, v.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::BVec2
}
}
unsafe impl<'a> Uniformable for &'a [[bool; 3]] {
fn update(self, u: &Uniform<Self>) {
let v: Vec<_> = self
.iter()
.map(|x| [x[0] as u32, x[1] as u32, x[2] as u32])
.collect();
unsafe { gl::Uniform3uiv(u.index, v.len() as GLsizei, v.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::BVec3
}
}
unsafe impl<'a> Uniformable for &'a [[bool; 4]] {
fn update(self, u: &Uniform<Self>) {
let v: Vec<_> = self
.iter()
.map(|x| [x[0] as u32, x[1] as u32, x[2] as u32, x[3] as u32])
.collect();
unsafe { gl::Uniform4uiv(u.index, v.len() as GLsizei, v.as_ptr() as *const u32) }
}
fn ty() -> Type {
Type::BVec4
}
}
// Check if a [`Type`] matches the OpenGL counterpart.
macro_rules! check_types_match {
( $name: expr, $ty: expr, $glty: expr, $( $typ: path => $gl: path ),+) => {
match $ty {
$(
$typ if $glty != $gl => Err(UniformWarning::type_mismatch($name, $ty)),
)+
_ => Ok(())
}
}
}
// Check whether a shader program’s uniform type matches the type we have chosen.
fn uniform_type_match(program: GLuint, name: &str, ty: Type) -> Result<(), UniformWarning> {
let mut size: GLint = 0;
let mut glty: GLuint = 0;
unsafe {
// get the max length of the returned names
let mut max_len = 0;
gl::GetProgramiv(program, gl::ACTIVE_UNIFORM_MAX_LENGTH, &mut max_len);
// get the index of the uniform
let mut index = 0;
#[cfg(feature = "std")]
{
let c_name = CString::new(name.as_bytes()).unwrap();
gl::GetUniformIndices(
program,
1,
[c_name.as_ptr() as *const GLchar].as_ptr(),
&mut index,
);
}
#[cfg(not(feature = "std"))]
{
let r = with_cstring(name, |c_name| {
gl::GetUniformIndices(program, 1, [c_name].as_ptr(), &mut index);
});
if let Err(_) = r {
#[cfg(feature = "std")]
{
return Err(format!("unable to find the index of {}", name));
}
#[cfg(not(feature = "std"))]
{
let mut reason = String::new();
let _ = write!(&mut reason, "unable to find the index of {}", name);
return Err(reason);
}
}
}
// get its size and type
let mut name_ = Vec::<GLchar>::with_capacity(max_len as usize);
gl::GetActiveUniform(
program,
index,
max_len,
null_mut(),
&mut size,
&mut glty,
name_.as_mut_ptr(),
);
}
// early-return if array – we don’t support them yet
if size != 1 {
return Ok(());
}
check_types_match!(name, ty, glty,
// scalars
Type::Int => gl::INT,
Type::UInt => gl::UNSIGNED_INT,
Type::Float => gl::FLOAT,
Type::Bool => gl::BOOL,
// vectors
Type::IVec2 => gl::INT_VEC2,
Type::IVec3 => gl::INT_VEC3,
Type::IVec4 => gl::INT_VEC4,
Type::UIVec2 => gl::UNSIGNED_INT_VEC2,
Type::UIVec3 => gl::UNSIGNED_INT_VEC3,
Type::UIVec4 => gl::UNSIGNED_INT_VEC4,
Type::Vec2 => gl::FLOAT_VEC2,
Type::Vec3 => gl::FLOAT_VEC3,
Type::Vec4 => gl::FLOAT_VEC4,
Type::BVec2 => gl::BOOL_VEC2,
Type::BVec3 => gl::BOOL_VEC3,
Type::BVec4 => gl::BOOL_VEC4,
// matrices
Type::M22 => gl::FLOAT_MAT2,
Type::M33 => gl::FLOAT_MAT3,
Type::M44 => gl::FLOAT_MAT4,
// textures
Type::ISampler1D => gl::INT_SAMPLER_1D,
Type::ISampler2D => gl::INT_SAMPLER_2D,
Type::ISampler3D => gl::INT_SAMPLER_3D,
Type::ISampler1DArray => gl::INT_SAMPLER_1D_ARRAY,
Type::ISampler2DArray => gl::INT_SAMPLER_2D_ARRAY,
Type::UISampler1D => gl::UNSIGNED_INT_SAMPLER_1D,
Type::UISampler2D => gl::UNSIGNED_INT_SAMPLER_2D,
Type::UISampler3D => gl::UNSIGNED_INT_SAMPLER_3D,
Type::UISampler1DArray => gl::UNSIGNED_INT_SAMPLER_1D_ARRAY,
Type::UISampler2DArray => gl::UNSIGNED_INT_SAMPLER_2D_ARRAY,
Type::Sampler1D => gl::SAMPLER_1D,
Type::Sampler2D => gl::SAMPLER_2D,
Type::Sampler3D => gl::SAMPLER_3D,
Type::Sampler1DArray => gl::SAMPLER_1D_ARRAY,
Type::Sampler2DArray => gl::SAMPLER_2D_ARRAY,
Type::ICubemap => gl::INT_SAMPLER_CUBE,
Type::UICubemap => gl::UNSIGNED_INT_SAMPLER_CUBE,
Type::Cubemap => gl::SAMPLER_CUBE
)
}
// Generate a uniform interface and collect warnings.
fn create_uniform_interface<Uni, E>(
raw: &RawProgram,
env: E,
) -> Result<(Uni, Vec<UniformWarning>), ProgramError>
where
Uni: UniformInterface<E>,
{
let mut builder = UniformBuilder::new(raw);
let iface = Uni::uniform_interface(&mut builder, env)?;
Ok((iface, builder.warnings))
}
fn bind_vertex_attribs_locations<S>(raw: &RawProgram) -> Vec<ProgramWarning>
where
S: Semantics,
{
let mut warnings = Vec::new();
for desc in S::semantics_set() {
match get_vertex_attrib_location(raw, &desc.name) {
Ok(_) => {
let index = desc.index as GLuint;
// we are not interested in the location as we’re about to change it to what we’ve
// decided in the semantics
#[cfg(feature = "std")]
{
let c_name = CString::new(desc.name.as_bytes()).unwrap();
unsafe { gl::BindAttribLocation(raw.handle, index, c_name.as_ptr() as *const GLchar) };
}
#[cfg(not(feature = "std"))]
{
unsafe {
with_cstring(fmt.name, |c_name| {
gl::BindAttribLocation(raw.handle, index, c_name.as_ptr() as *const GLchar);
});
}
}
}
Err(warning) => warnings.push(ProgramWarning::VertexAttrib(warning)),
}
}
warnings
}
fn get_vertex_attrib_location(raw: &RawProgram, name: &str) -> Result<GLuint, VertexAttribWarning> {
let location = {
#[cfg(feature = "std")]
{
let c_name = CString::new(name.as_bytes()).unwrap();
unsafe { gl::GetAttribLocation(raw.handle, c_name.as_ptr() as *const GLchar) }
}
#[cfg(not(feature = "std"))]
{
unsafe {
with_cstring(name, |c_name| gl::GetAttribLocation(raw.handle, c_name)).unwrap_or(-1)
}
}
};
if location < 0 {
Err(VertexAttribWarning::Inactive(name.to_owned()))
} else {
Ok(location as _)
}
} | IVec4,
/// 2D unsigned integral vector.
UIVec2, |
execution_config.rs | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::config::{Error, RootPath};
use aptos_types::transaction::Transaction;
use serde::{Deserialize, Serialize};
use std::{
fs::File,
io::{Read, Write},
path::PathBuf,
};
const GENESIS_DEFAULT: &str = "genesis.blob";
#[derive(Clone, Deserialize, PartialEq, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct ExecutionConfig {
#[serde(skip)]
pub genesis: Option<Transaction>,
pub genesis_file_location: PathBuf,
pub network_timeout_ms: u64,
}
impl std::fmt::Debug for ExecutionConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "ExecutionConfig {{ genesis: ")?;
if self.genesis.is_some() {
write!(f, "Some(...)")?;
} else {
write!(f, "None")?;
}
write!(
f,
", genesis_file_location: {:?} ",
self.genesis_file_location
)
}
}
impl Default for ExecutionConfig {
fn default() -> ExecutionConfig {
ExecutionConfig {
genesis: None,
genesis_file_location: PathBuf::new(),
// Default value of 30 seconds for the network timeout.
network_timeout_ms: 30_000,
}
}
}
impl ExecutionConfig {
pub fn load(&mut self, root_dir: &RootPath) -> Result<(), Error> {
if !self.genesis_file_location.as_os_str().is_empty() {
let path = root_dir.full_path(&self.genesis_file_location);
let mut file = File::open(&path).map_err(|e| Error::IO("genesis".into(), e))?;
let mut buffer = vec![];
file.read_to_end(&mut buffer)
.map_err(|e| Error::IO("genesis".into(), e))?;
let data = bcs::from_bytes(&buffer).map_err(|e| Error::BCS("genesis", e))?;
self.genesis = Some(data);
}
Ok(())
}
pub fn save(&mut self, root_dir: &RootPath) -> Result<(), Error> {
if let Some(genesis) = &self.genesis {
if self.genesis_file_location.as_os_str().is_empty() {
self.genesis_file_location = PathBuf::from(GENESIS_DEFAULT);
}
let path = root_dir.full_path(&self.genesis_file_location);
let mut file = File::create(&path).map_err(|e| Error::IO("genesis".into(), e))?;
let data = bcs::to_bytes(&genesis).map_err(|e| Error::BCS("genesis", e))?;
file.write_all(&data)
.map_err(|e| Error::IO("genesis".into(), e))?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use aptos_temppath::TempPath;
use aptos_types::{
transaction::{ChangeSet, Transaction, WriteSetPayload},
write_set::WriteSetMut,
};
#[test]
fn test_no_genesis() {
let (mut config, path) = generate_config();
assert_eq!(config.genesis, None);
let root_dir = RootPath::new_path(path.path());
let result = config.load(&root_dir);
assert!(result.is_ok());
assert_eq!(config.genesis_file_location, PathBuf::new());
}
#[test]
fn | () {
let fake_genesis = Transaction::GenesisTransaction(WriteSetPayload::Direct(
ChangeSet::new(WriteSetMut::new(vec![]).freeze().unwrap(), vec![]),
));
let (mut config, path) = generate_config();
config.genesis = Some(fake_genesis.clone());
let root_dir = RootPath::new_path(path.path());
config.save(&root_dir).expect("Unable to save");
// Verifies some without path
assert_eq!(config.genesis_file_location, PathBuf::from(GENESIS_DEFAULT));
config.genesis = None;
let result = config.load(&root_dir);
assert!(result.is_ok());
assert_eq!(config.genesis, Some(fake_genesis));
}
fn generate_config() -> (ExecutionConfig, TempPath) {
let temp_dir = TempPath::new();
temp_dir.create_as_dir().expect("error creating tempdir");
let execution_config = ExecutionConfig::default();
(execution_config, temp_dir)
}
}
| test_some_and_load_genesis |
config.go | package config
import (
"fmt"
"io/ioutil"
"strings"
"github.com/rebuy-de/aws-nuke/pkg/types"
log "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
type ResourceTypes struct {
Targets types.Collection `yaml:"targets"`
Excludes types.Collection `yaml:"excludes"`
CloudControl types.Collection `yaml:"cloud-control"`
}
type Account struct {
Filters Filters `yaml:"filters"`
ResourceTypes ResourceTypes `yaml:"resource-types"`
Presets []string `yaml:"presets"`
}
type Nuke struct {
// Deprecated: Use AccountBlocklist instead.
AccountBlacklist []string `yaml:"account-blacklist"`
AccountBlocklist []string `yaml:"account-blocklist"`
Regions []string `yaml:"regions"`
Accounts map[string]Account `yaml:"accounts"`
ResourceTypes ResourceTypes `yaml:"resource-types"`
Presets map[string]PresetDefinitions `yaml:"presets"`
FeatureFlags FeatureFlags `yaml:"feature-flags"`
CustomEndpoints CustomEndpoints `yaml:"endpoints"`
}
type FeatureFlags struct {
DisableDeletionProtection DisableDeletionProtection `yaml:"disable-deletion-protection"`
ForceDeleteLightsailAddOns bool `yaml:"force-delete-lightsail-addons"`
}
type DisableDeletionProtection struct {
RDSInstance bool `yaml:"RDSInstance"`
EC2Instance bool `yaml:"EC2Instance"`
CloudformationStack bool `yaml:"CloudformationStack"`
}
type PresetDefinitions struct {
Filters Filters `yaml:"filters"`
}
type CustomService struct {
Service string `yaml:"service"`
URL string `yaml:"url"`
TLSInsecureSkipVerify bool `yaml:"tls_insecure_skip_verify"`
}
type CustomServices []*CustomService
type CustomRegion struct {
Region string `yaml:"region"`
Services CustomServices `yaml:"services"`
TLSInsecureSkipVerify bool `yaml:"tls_insecure_skip_verify"`
}
type CustomEndpoints []*CustomRegion
func Load(path string) (*Nuke, error) |
func (c *Nuke) ResolveBlocklist() []string {
if c.AccountBlocklist != nil {
return c.AccountBlocklist
}
log.Warn("deprecated configuration key 'account-blacklist' - please use 'account-blocklist' instead")
return c.AccountBlacklist
}
func (c *Nuke) HasBlocklist() bool {
var blocklist = c.ResolveBlocklist()
return blocklist != nil && len(blocklist) > 0
}
func (c *Nuke) InBlocklist(searchID string) bool {
for _, blocklistID := range c.ResolveBlocklist() {
if blocklistID == searchID {
return true
}
}
return false
}
func (c *Nuke) ValidateAccount(accountID string, aliases []string) error {
if !c.HasBlocklist() {
return fmt.Errorf("The config file contains an empty blocklist. " +
"For safety reasons you need to specify at least one account ID. " +
"This should be your production account.")
}
if c.InBlocklist(accountID) {
return fmt.Errorf("You are trying to nuke the account with the ID %s, "+
"but it is blocklisted. Aborting.", accountID)
}
if len(aliases) == 0 {
return fmt.Errorf("The specified account doesn't have an alias. " +
"For safety reasons you need to specify an account alias. " +
"Your production account should contain the term 'prod'.")
}
for _, alias := range aliases {
if strings.Contains(strings.ToLower(alias), "prod") {
return fmt.Errorf("You are trying to nuke an account with the alias '%s', "+
"but it has the substring 'prod' in it. Aborting.", alias)
}
}
if _, ok := c.Accounts[accountID]; !ok {
return fmt.Errorf("Your account ID '%s' isn't listed in the config. "+
"Aborting.", accountID)
}
return nil
}
func (c *Nuke) Filters(accountID string) (Filters, error) {
account := c.Accounts[accountID]
filters := account.Filters
if filters == nil {
filters = Filters{}
}
if account.Presets == nil {
return filters, nil
}
for _, presetName := range account.Presets {
notFound := fmt.Errorf("Could not find filter preset '%s'", presetName)
if c.Presets == nil {
return nil, notFound
}
preset, ok := c.Presets[presetName]
if !ok {
return nil, notFound
}
filters.Merge(preset.Filters)
}
return filters, nil
}
func (c *Nuke) resolveDeprecations() error {
deprecations := map[string]string{
"EC2DhcpOptions": "EC2DHCPOptions",
"EC2InternetGatewayAttachement": "EC2InternetGatewayAttachment",
"EC2NatGateway": "EC2NATGateway",
"EC2Vpc": "EC2VPC",
"EC2VpcEndpoint": "EC2VPCEndpoint",
"EC2VpnConnection": "EC2VPNConnection",
"EC2VpnGateway": "EC2VPNGateway",
"EC2VpnGatewayAttachement": "EC2VPNGatewayAttachment",
"ECRrepository": "ECRRepository",
"IamGroup": "IAMGroup",
"IamGroupPolicyAttachement": "IAMGroupPolicyAttachment",
"IamInstanceProfile": "IAMInstanceProfile",
"IamInstanceProfileRole": "IAMInstanceProfileRole",
"IamPolicy": "IAMPolicy",
"IamRole": "IAMRole",
"IamRolePolicyAttachement": "IAMRolePolicyAttachment",
"IamServerCertificate": "IAMServerCertificate",
"IamUser": "IAMUser",
"IamUserAccessKeys": "IAMUserAccessKey",
"IamUserGroupAttachement": "IAMUserGroupAttachment",
"IamUserPolicyAttachement": "IAMUserPolicyAttachment",
"RDSCluster": "RDSDBCluster",
}
for _, a := range c.Accounts {
for resourceType, resources := range a.Filters {
replacement, ok := deprecations[resourceType]
if !ok {
continue
}
log.Warnf("deprecated resource type '%s' - converting to '%s'\n", resourceType, replacement)
if _, ok := a.Filters[replacement]; ok {
return fmt.Errorf("using deprecated resource type and replacement: '%s','%s'", resourceType, replacement)
}
a.Filters[replacement] = resources
delete(a.Filters, resourceType)
}
}
return nil
}
// GetRegion returns the custom region or nil when no such custom endpoints are defined for this region
func (endpoints CustomEndpoints) GetRegion(region string) *CustomRegion {
for _, r := range endpoints {
if r.Region == region {
if r.TLSInsecureSkipVerify {
for _, s := range r.Services {
s.TLSInsecureSkipVerify = r.TLSInsecureSkipVerify
}
}
return r
}
}
return nil
}
// GetService returns the custom region or nil when no such custom endpoints are defined for this region
func (services CustomServices) GetService(serviceType string) *CustomService {
for _, s := range services {
if serviceType == s.Service {
return s
}
}
return nil
}
func (endpoints CustomEndpoints) GetURL(region, serviceType string) string {
r := endpoints.GetRegion(region)
if r == nil {
return ""
}
s := r.Services.GetService(serviceType)
if s == nil {
return ""
}
return s.URL
}
| {
var err error
raw, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
config := new(Nuke)
err = yaml.UnmarshalStrict(raw, config)
if err != nil {
return nil, err
}
if err := config.resolveDeprecations(); err != nil {
return nil, err
}
return config, nil
} |
PollStart.js | (function () {
if (window.PollStart) return;
window.PollStart = function (container, params) {
this.Container = typeof (container) == "object" ? container : document.getElementById(container);
if (!this.Container) return;
params = params || {};
this.Objects = {
OwnerID: params.OwnerID,
OwnerType: params.OwnerType,
CopyFromPollID: params.CopyFromPollID,
PollID: params.PollID,
PollName: Base64.decode(params.PollName),
Poll: params.Poll,
AudienceCount: params.AudienceCount,
IsWorkFlowAdmin: params.IsWorkFlowAdmin
};
this.Options = {
HideSummary: params.HideSummary,
UseExistingPoll: params.UseExistingPoll === true,
DefaultButtonTitle: params.DefaultButtonTitle
};
var that = this;
GlobalUtilities.load_files(["API/FGAPI.js"], { OnLoad: function () { that.preinit(); }});
};
PollStart.prototype = {
preinit: function () {
var that = this;
FGAPI.GetPollStatus({
IsCopyOfPollID: that.Objects.CopyFromPollID, PollID: that.Objects.PollID, ParseResults: true,
ResponseHandler: function (result) {
if ((result || {}).Poll) that.Objects.Poll = result.Poll;
that.initialize(result);
}
});
},
initialize: function (params) {
var that = this;
params = params || {};
var instanceId = params.InstanceID;
that.Container.innerHTML = "";
var elems = GlobalUtilities.create_nested_elements([{
Type: "div", Class: "small-12 medium-12 large-12", Style: "display:flex; flex-flow:column; height:100%;",
Childs: [
{
Type: "div", Style: "flex:0 1 auto; text-align:center; font-weight:bold; font-size:0.8rem; margin-bottom:0.5rem;",
Childs: [{ Type: "text", TextValue: that.Objects.PollName }]
},
{
Type: "div", Style: "flex:1 1 auto;",
Childs: [{
Type: "middle", Style: "padding-bottom:0.5rem;",
Childs: [
{ Type: "div", Name: "desc", Style: "text-align:center; font-size:0.8rem; display:none;" },
{
Type: "div", Class: "small-12 medium-12 large-12", Name: "info",
Style: "text-align:center; font-size:0.7rem; color:rgb(100,100,100); margin-top:0.3rem; display:none;"
}
]
}]
},
{
Type: "div", Class: "rv-air-button rv-border-radius-quarter", Name: "actionButton",
Style: "flex:0 1 auto; margin:0.5rem auto 0 auto; padding:0.3rem 1rem; text-align:center; font-size:0.7rem;"
}
]
}
], that.Container);
if (params.Description) {
jQuery(elems["desc"]).fadeIn(500);
GlobalUtilities.append_markup_text(elems["desc"], Base64.decode(params.Description));
}
//set the title of the action button
var actionButtonTitle = that.Options.DefaultButtonTitle || RVDic.FillTheForm;
if (+params.FilledElementsCount && (params.FilledElementsCount < params.ElementsCount)) {
actionButtonTitle = RVDic.Edit + " (" + RVDic.YouHaveFilledNOfMFields
.replace("[n]", params.FilledElementsCount).replace("[m]", params.ElementsCount) + ")";
}
else if ((params.ElementsCount > 0) && (params.FilledElementsCount == params.ElementsCount))
actionButtonTitle = RVDic.Edit + " (" + RVDic.Done + ")";
GlobalUtilities.set_text(elems["actionButton"], GlobalUtilities.convert_numbers_to_persian(actionButtonTitle));
//end of set the title of the action button
| var ttl = "";
if (params.AllFilledFormsCount == that.Objects.AudienceCount)
ttl = RVDic.FG.Poll.AllOfTheNUsersHaveDone.replace("[n]", that.Objects.AudienceCount);
else {
ttl = RVDic.FG.Poll.NOfTheMUsersHaveDone.replace("[n]", params.AllFilledFormsCount)
.replace("[m]", that.Objects.AudienceCount);
}
jQuery(elems["info"]).html(GlobalUtilities.convert_numbers_to_persian(ttl)).fadeIn(500);
}
//end of set the number ob users that have done
var processing = false;
elems["actionButton"].onclick = function () {
if (processing) return;
if (instanceId) show_poll();
else {
processing = true;
FGAPI.GetPollInstance({
CopyFromPollID: that.Objects.CopyFromPollID, PollID: that.Objects.PollID,
OwnerID: that.Objects.OwnerID, UseExistingPoll: that.Options.UseExistingPoll, ParseResults: true,
ResponseHandler: function (result) {
if (result.ErrorText) alert(RVDic.MSG[result.ErrorText] || result.ErrorText);
else if (result.Succeed) {
instanceId = result.InstanceID;
if (result.Poll) that.Objects.Poll = result.Poll;
show_poll();
}
processing = false;
}
});
}
};
var showed = null;
var show_poll = function () {
if (that.PollInstanceContainer) return showed = GlobalUtilities.show(that.PollInstanceContainer);
var _div = that.PollInstanceContainer = GlobalUtilities.create_nested_elements([
{
Type: "div", Class: "small-10 medium-9 large-8 rv-border-radius-1 SoftBackgroundColor",
Style: "margin:0rem auto; padding:1rem;", Name: "_div"
}
])["_div"];
GlobalUtilities.loading(_div);
showed = GlobalUtilities.show(_div);
GlobalUtilities.load_files(["FormsManager/FormViewer.js"], {
OnLoad: function () {
new FormViewer(_div, {
InstanceID: instanceId, LimitOwnerID: that.Objects.OwnerID, ShowAllIfNoLimit: true,
PollAbstract: true, Editable: true, FooterSaveButton: true, HideHeader: false,
HideTitle: true, HideDescription: false, FillButton: false, Poll: that.Objects.Poll,
PollOwnerType: that.Objects.OwnerType,
IsWorkFlowAdmin: that.Objects.IsWorkFlowAdmin,
OnInit: function () { this.goto_edit_mode(); },
OnAfterSave: function () {
GlobalUtilities.confirm(RVDic.FG.Poll.PollSavedMessage, function (r) {
if (r) showed.Close();
});
}
});
}
});
};
}
};
})(); |
//set the number of users that have done
if (that.Objects.AudienceCount && (that.Objects.AudienceCount > 1)) {
|
app.module.ts | import { Module } from '@nestjs/common';
import { AppController } from './app.controller';
import { AppService } from './app.service';
import { TypeOrmModule } from '@nestjs/typeorm';
import { CarModule } from './Entities/Car/car.module'
import { RegisterCarModule } from './Entities/registerCar/register.module'
@Module({
imports: [CarModule, RegisterCarModule,
TypeOrmModule.forRoot()], | controllers: [AppController],
providers: [AppService],
})
export class AppModule {} | |
mm.rs | use core;
use realmode;
use rangeset::{Range, RangeSet};
use core::alloc::{GlobalAlloc, Layout};
/// Global containing the contents of the E820 table
///
/// First value of the tuple is a bool indicating whether allocations are
/// allowed. This is set to false once the MM table has been cloned to pass
/// to the kernel, disabling allocations.
///
/// Second value indicates if the MM subsystem has been initialized.
///
/// Third value is the E820 table in a RangeSet
static mut MM_TABLE: (bool, bool, RangeSet) = (false, false, RangeSet::new());
/// Packed structure describing E820 entries
#[repr(C, packed)]
#[derive(Clone, Copy, PartialEq, Eq)]
struct E820Entry {
base: u64,
size: u64,
typ: u32,
}
/// Clone the MM table, further disabling allocations
pub fn clone_mm_table() -> RangeSet
{
unsafe {
/* Make sure MM is initialized and allocations are enabled */
assert!(MM_TABLE.1, "MM subsystem has not been initialized");
assert!(MM_TABLE.0, "MM table has already been cloned");
/* Disable allocations */
MM_TABLE.0 = false;
/* Return copy of MM table */
MM_TABLE.2.clone()
}
}
pub unsafe fn remove_range(addr: u64, size: u64)
{
let rs = &mut MM_TABLE.2;
assert!(size > 0, "Invalid size for remove_range()");
rs.remove(Range { start: addr, end: addr.checked_add(size).unwrap() - 1 });
}
/// Initialize the memory managment state. This requests the e820 table from
/// the BIOS and checks for overlapping/double mapped ranges.
pub unsafe fn init()
{
let rs = &mut MM_TABLE.2;
| * accumulate free sections into the RangeSet. The second loop we want
* to remove nonfree sections.
*/
for &add_entries in &[true, false] {
/* Continuation code, starts off at 0. BIOS implementation specific
* after first call to e820.
*/
let mut cont = 0;
/* Get the E820 table from the BIOS, entry by entry */
loop {
let mut ent = E820Entry { base: 0, size: 0, typ: 0 };
/* Set up the register state for the BIOS call */
let mut regs = realmode::RegisterState {
eax: 0xe820, /* Function 0xE820 */
ecx: 20, /* Entry size (in bytes) */
edx: 0x534d4150, /* Magic number 'PAMS' */
ebx: cont, /* Continuation number */
edi: &mut ent as *const _ as u32, /* Pointer to buffer */
..Default::default()
};
/* Invoke BIOS int 0x15, function 0xE820 to get the memory
* entries
*/
realmode::invoke_realmode(0x15, &mut regs);
/* Validate eax contains correct 'SMAP' magic signature */
assert!(regs.eax == 0x534d4150,
"E820 did not report correct magic");
/* Validate size of E820 entry is >= what we expect */
assert!(regs.ecx as usize >= core::mem::size_of_val(&ent),
"E820 entry structure was too small");
assert!(ent.size > 0, "E820 entry of zero size");
/* Safely compute end of memory region */
let ent_end = match ent.base.checked_add(ent.size - 1) {
Some(x) => x,
None => panic!("E820 entry integer overflow"),
};
/* Either insert free regions on the first iteration of the loop
* or remove used regions in the second iteration.
*/
if add_entries && ent.typ == 1 {
rs.insert(Range { start: ent.base, end: ent_end });
} else if !add_entries && ent.typ != 1 {
rs.remove(Range { start: ent.base, end: ent_end });
}
/* If ebx (continuation number) is zero or CF (error) was set,
* break out of the loop.
*/
if regs.ebx == 0 || (regs.efl & 1) == 1 {
break;
}
/* Update continuation */
cont = regs.ebx;
}
}
/* Remove the first 1MB of memory from allocatable memory. This is to
* prevent BIOS data structures and our PXE image from being removed.
*/
rs.remove(Range { start: 0, end: 0xFFFFF });
/* Mark MM as initialized and allocations enabled */
MM_TABLE.0 = true;
MM_TABLE.1 = true;
}
/// Structure representing global allocator
///
/// All state is handled elsewhere so this is empty.
pub struct GlobalAllocator;
unsafe impl GlobalAlloc for GlobalAllocator {
/// Global allocator. Grabs free memory from E820 and removes it from
/// the table.
unsafe fn alloc(&self, layout: Layout) -> *mut u8
{
assert!(MM_TABLE.1, "Attempted to allocate with mm uninitialized");
assert!(MM_TABLE.0, "Attempted to allocate with allocations disabled");
let rs = &mut MM_TABLE.2;
/* All the actual work is done in alloc_rangeset() */
let ret = rs.allocate(layout.size() as u64, layout.align() as u64);
if ret.is_null() {
panic!("Allocation failure");
} else {
ret as *mut u8
}
}
/// No free implementation.
///
/// We really have no reason to free in the bootloader, so we do not
/// support a free. We could easily add support if really needed, but
/// having free panic will prevent us from accidentally allocating data
/// and passing it to the next stage by pointer, and letting it drop.
/// Given we don't free anything in the bootloader, anything we pass to
/// the next stage is always valid.
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout)
{
panic!("Dealloc attempted\n");
}
} | /* Loop through the E820 twice. The first time we loop we want to |
homepage.js | import React, { useEffect, useState } from "react";
import { useDispatch, useSelector } from "react-redux";
import { fetchTopGamesBanner } from "../../../store/actions";
import Navbar from "../navbar/navbar";
import "./homepage.scss";
import Background from "../background/background";
import { Link } from "react-router-dom";
import { AuthMe } from "../../../store/actions/auth";
import { motion } from "framer-motion";
function Homepage() {
const dispatch = useDispatch();
const stateTopGames = useSelector((state) => state.bannerTopGamesReducer);
const [scale, setScale] = useState([1, 0.8, 0.8]);
useEffect(() => {
dispatch(fetchTopGamesBanner());
// eslint-disable-next-line react-hooks/exhaustive-deps
dispatch(AuthMe(false));
}, []);
const changeScale = (id) => {
let temp = scale;
temp[id] = 1;
for (let i = 0; i < scale.length; i++) {
if (i !== id) {
temp[i] = 0.8;
}
}
setScale([...temp]);
};
console.log(stateTopGames);
return (
<React.Fragment>
<Background image={"banner"} bgColor={"#1a4c31"} />
<Navbar /> | <div className="homepageBanner">
<motion.div
className="content"
initial={{
opacity: 0,
top: -30,
}}
transition={{ duration: 1 }}
animate={{ opacity: 1, top: 0 }}
>
<h1>
Browse Best Games <br /> Online
</h1>
<p>at very affordable prices!!</p>
<Link to="/browse/all">
<button>Browse All</button>
</Link>
</motion.div>
{stateTopGames ? (
<div className="upcommingGames">
{stateTopGames.map((game, index) => {
return (
<motion.div
initial={{
opacity: 0,
top: -30,
left: -20,
}}
transition={{ duration: 0.5 }}
animate={{ opacity: 1, top: 0, left: 0 }}
className="box"
key={index}
style={{
transform: `scale(${scale[index]})`,
opacity: scale[index] === 1 ? 1 : 0.5,
}}
onMouseEnter={() => changeScale(index)}
onMouseLeave={() => {
setScale([1, 0.8, 0.8]);
}}
>
<Link to={`/product/${game._id}`}>
<div className="nameBox">
<p>{game.title.replace(/ /g, "")}</p>
</div>
<img src={game.photo} alt={game.title}></img>
<div className="contentBox"></div>
</Link>
</motion.div>
);
})}
</div>
) : null}
</div>
</React.Fragment>
);
}
export default Homepage; | |
0001_initial.py | # Generated by Django 3.0.4 on 2020-04-18 12:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car_number', models.CharField(max_length=6)),
('description', models.CharField(max_length=100)),
],
options={
'db_table': 'Car',
},
),
migrations.CreateModel(
name='Team',
fields=[
('name', models.CharField(max_length=30, primary_key=True, serialize=False)),
('country', models.CharField(max_length=30)),
('number_of_racers', models.IntegerField()),
],
options={
'db_table': 'Team',
},
),
migrations.CreateModel(
name='Racer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=30)),
('name', models.CharField(max_length=30)),
('middlename', models.CharField(max_length=30)),
('description', models.CharField(max_length=100)),
('experience', models.CharField(max_length=30)),
('racer_class', models.CharField(choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D')], max_length=1)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Car')),
('team_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Team')),
],
options={
'db_table': 'Racer',
},
),
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('category', models.CharField(choices=[('OW', 'Open-wheel racing'), ('TC', 'Touring car racing'), ('SpC', 'Sports car racing'), ('PC', 'Production-car racing'), ('OM', 'One-make racing'), ('TAS', 'Time Attack Series'), ('StC', 'Stock car racing'), ('R', 'Rallying'), ('D', 'Drag racing'), ('OR', 'Off-road racing'), ('K', 'Kart racing'), ('H', 'Historical racing'), ('Other', 'Other')], max_length=5)),
('date', models.DateField()),
('winner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leaderboard.Racer')),
],
options={
'db_table': 'Race',
},
),
] |
|
error.rs | //
// Error includes LexerError and LexerErrorKind.
// It implements the From trait for LexicalDiagnostic struct which displays
// source filename, span position, severity of error and message struct fields.
//
use utils::{LexicalDiagnostic, Severity, Span};
/// LexerError includes all field items required by the LexicalDiagnostic struct.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct LexerError {
pub source: String,
pub span: Span,
pub severity: Severity,
pub kind: LexerErrorKind
}
/// LexerErrorKind holds all the error variants for the Lexer state.
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum LexerErrorKind {
UnknownChar,
Unreachable,
InvalidNumericLiteral,
UnterminatedStringLiteral,
InvalidEscapeChar,
InvalidUnicodeEscSeqChar,
}
impl From<LexerError> for LexicalDiagnostic {
fn from(err: LexerError) -> LexicalDiagnostic |
}
| {
let message = match err.kind {
LexerErrorKind::UnknownChar => "unknown character",
LexerErrorKind::InvalidNumericLiteral => "invalid numeric literal",
LexerErrorKind::UnterminatedStringLiteral => "unexpected EOF while scanning string literal",
LexerErrorKind::InvalidEscapeChar => "invalid escape character",
LexerErrorKind::InvalidUnicodeEscSeqChar => "invalid unicode escape sequence character",
LexerErrorKind::Unreachable => "unreachable error"
};
LexicalDiagnostic {
source: err.source,
span: err.span,
severity: err.severity,
msg: message.to_string()
}
} |
fish.ts | import { $ } from 'zx';
export default async function | () {
if ((await $`brew -v`.exitCode) !== 0) {
await $`curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh | bash /dev/stdin -c`;
}
// install fish
await $`brew install fish`;
// install oh-my-fish plugin
await $`curl -L https://get.oh-my.fish | fish`;
$`omf install spacefish`;
// nvm.fish
await $`curl -sL https://git.io/fisher | source && fisher install jorgebucaran/fisher`;
$`fisher install jorgebucaran/nvm.fish`;
await $`fish`;
}
| fish |
a.go | package a
import (
"b"
"b/bsub"
)
func afunc() {
}
func main() | {
b.Func() // want `b\.Func must not be called`
_ = b.Func // OK
f := b.Func // OK
f() // want `b\.Func must not be called`
new(b.Type).Method() // want `\(\*b\.Type\)\.Method must not be called`
_ = new(b.Type).Method // OK
m := new(b.Type).Method // OK
m() // want `\(\*b\.Type\)\.Method must not be called`
(*b.Type).Method(new(b.Type)) // want `\(\*b\.Type\)\.Method must not be called`
m2 := (*b.Type).Method // OK
m2(new(b.Type)) // want `\(\*b\.Type\)\.Method must not be called`
bsub.Type{}.Method() // want `\(b/bsub\.Type\)\.Method must not be called`
_ = bsub.Type{}.Method // OK
m3 := bsub.Type{}.Method
m3() // want `\(b/bsub\.Type\)\.Method must not be called`
(bsub.Type).Method(bsub.Type{}) // want `\(b/bsub\.Type\)\.Method must not be called`
m4 := (bsub.Type).Method // OK
m4(bsub.Type{}) // want `\(b/bsub\.Type\)\.Method must not be called`
afunc() // OK
} |
|
__init__.py | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The consent stores command group for the Cloud Healthcare API CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class | (base.Group):
"""Manage Cloud Healthcare API consent stores."""
| ConsentStores |
persistence.py | import gc
import json
import pathlib
import torch
class Checkpoint:
def __init__(self, checkpoint=None):
self.checkpoint = checkpoint
@staticmethod
def get_checkpoint_path(checkpoint_dir):
return checkpoint_dir.joinpath("checkpoint.tar")
@staticmethod
def load_legacy(model_dir):
model_path = model_dir.joinpath("model.data")
state_path = model_dir.joinpath("state.json")
if not model_path.exists():
return None
checkpoint = {
'model_state_dict' : torch.load(model_path),
}
print("Loaded legacy model state")
if state_path.exists():
with open(state_path, 'r') as f:
state = json.load(f)
checkpoint['epoch'] = state['epoch']
print("Loaded legacy training state")
return checkpoint
@classmethod
def load(cls, checkpoint_dir):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_path = Checkpoint.get_checkpoint_path(checkpoint_dir)
if not checkpoint_path.exists():
# If there is no checkpoint file we try to perform a legacy load
checkpoint = Checkpoint.load_legacy(checkpoint_dir)
if checkpoint is None:
print("No checkpoint found in directory '{}'".format(checkpoint_dir))
return cls(checkpoint)
return cls(torch.load(checkpoint_path))
@staticmethod
def save(checkpoint_dir, args, model, optimizer, epoch):
if not isinstance(checkpoint_dir, pathlib.Path):
checkpoint_dir = pathlib.Path(checkpoint_dir)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
checkpoint = {
'model_type' : args.model_type,
'use_coords' : True if args.use_coords else False,
'epoch' : epoch,
'model_state_dict': model.state_dict(),
}
if not args.omit_optimizer_state_save:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, Checkpoint.get_checkpoint_path(checkpoint_dir))
def purge(self):
self.checkpoint = None
gc.collect()
def | (self):
return self.checkpoint is not None
def restore_args(self, args):
# Restore checkpoint relevant arguments
if 'model_type' in self.checkpoint:
args.model_type = self.checkpoint['model_type']
print("Restored model type '{}'".format(args.model_type))
else:
print("Failed to restore model type")
if 'use_coords' in self.checkpoint:
args.use_coords = self.checkpoint['use_coords']
print("Restored use coords flag '{}'".format(args.use_coords))
else:
print("Failed to restore use coords flag")
return args
def restore_model_state(self, model):
if 'model_state_dict' in self.checkpoint:
model.load_state_dict(self.checkpoint['model_state_dict'])
print("Restored model state")
else:
print("Failed to restore model state")
return model
def restore_epoch(self, epoch):
if 'epoch' in self.checkpoint:
epoch = self.checkpoint['epoch']
print("Restored epoch {}".format(epoch))
else:
print("Failed to restore epoch")
return epoch
def restore_optimizer_state(self, optimizer):
if 'optimizer_state_dict' in self.checkpoint:
optimizer.load_state_dict(self.checkpoint['optimizer_state_dict'])
print("Restored optimizer state")
else:
print("Failed to restore optimizer state")
return optimizer
| is_valid |
monotonic.rs | use segment::{Segment, BoundingRect};
use scalar::{Scalar, NumCast};
use generic_math::{Point, Vector, Rect};
use std::ops::Range;
use arrayvec::ArrayVec;
use {QuadraticBezierSegment, CubicBezierSegment};
use std::f64;
pub(crate) trait MonotonicSegment {
type Scalar: Scalar;
fn solve_t_for_x(&self, x: Self::Scalar, t_range: Range<Self::Scalar>, tolerance: Self::Scalar) -> Self::Scalar;
}
/// A x and y monotonic curve segment, for example `Monotonic<QuadraticBezierSegment>`.
#[derive(Copy, Clone, Debug)]
pub struct Monotonic<T> {
pub(crate) segment: T,
}
impl<T: Segment> Monotonic<T> {
#[inline]
pub fn segment(&self) -> &T { &self.segment }
#[inline]
pub fn from(&self) -> Point<T::Scalar> { self.segment.from() }
#[inline]
pub fn to(&self) -> Point<T::Scalar> { self.segment.to() }
#[inline]
pub fn sample(&self, t: T::Scalar) -> Point<T::Scalar> { self.segment.sample(t) }
#[inline]
pub fn x(&self, t: T::Scalar) -> T::Scalar { self.segment.x(t) }
#[inline]
pub fn y(&self, t: T::Scalar) -> T::Scalar { self.segment.y(t) }
#[inline]
pub fn derivative(&self, t: T::Scalar) -> Vector<T::Scalar> { self.segment.derivative(t) }
#[inline]
pub fn dx(&self, t: T::Scalar) -> T::Scalar { self.segment.dx(t) }
#[inline]
pub fn dy(&self, t: T::Scalar) -> T::Scalar { self.segment.dy(t) }
#[inline]
pub fn split_range(&self, t_range: Range<T::Scalar>) -> Self {
Self { segment: self.segment.split_range(t_range) }
}
#[inline]
pub fn split(&self, t: T::Scalar) -> (Self, Self) {
let (a, b) = self.segment.split(t);
(Self { segment: a }, Self { segment: b })
}
#[inline]
pub fn before_split(&self, t: T::Scalar) -> Self {
Self { segment: self.segment.before_split(t) }
}
#[inline]
pub fn after_split(&self, t: T::Scalar) -> Self {
Self { segment: self.segment.after_split(t) }
}
#[inline]
pub fn flip(&self) -> Self {
Self { segment: self.segment.flip() }
}
#[inline]
pub fn approximate_length(&self, tolerance: T::Scalar) -> T::Scalar {
self.segment.approximate_length(tolerance)
}
}
impl<T: Segment> Segment for Monotonic<T> { impl_segment!(T::Scalar); }
impl<T: BoundingRect> BoundingRect for Monotonic<T> {
type Scalar = T::Scalar;
fn bounding_rect(&self) -> Rect<T::Scalar> {
// For monotonic segments the fast bounding rect approximation
// is exact.
self.segment.fast_bounding_rect()
}
fn fast_bounding_rect(&self) -> Rect<T::Scalar> {
self.segment.fast_bounding_rect()
}
fn bounding_range_x(&self) -> (T::Scalar, T::Scalar) {
self.segment.bounding_range_x()
}
fn bounding_range_y(&self) -> (T::Scalar, T::Scalar) {
self.segment.bounding_range_y()
}
fn fast_bounding_range_x(&self) -> (T::Scalar, T::Scalar) {
self.segment.fast_bounding_range_x()
}
fn fast_bounding_range_y(&self) -> (T::Scalar, T::Scalar) {
self.segment.fast_bounding_range_y()
}
}
impl<S: Scalar> Monotonic<QuadraticBezierSegment<S>> {
pub fn solve_t_for_x(&self, x: S) -> S {
Self::solve_t(
NumCast::from(self.segment.from.x).unwrap(),
NumCast::from(self.segment.ctrl.x).unwrap(),
NumCast::from(self.segment.to.x).unwrap(),
NumCast::from(x).unwrap(),
)
}
pub fn solve_t_for_y(&self, y: S) -> S {
Self::solve_t(
NumCast::from(self.segment.from.y).unwrap(),
NumCast::from(self.segment.ctrl.y).unwrap(),
NumCast::from(self.segment.to.y).unwrap(),
NumCast::from(y).unwrap(),
)
}
fn solve_t(from: f64, ctrl: f64, to: f64, x: f64) -> S {
let a = from - 2.0 * ctrl + to;
let b = -2.0 * from + 2.0 * ctrl;
let c = from - x;
let t = 2.0 * c / (-b - f64::sqrt(b * b - 4.0 * a * c));
NumCast::from(t.max(0.0).min(1.0)).unwrap()
}
#[inline]
pub fn split_at_x(&self, x: S) -> (Self, Self) {
self.split(self.solve_t_for_x(x))
}
pub fn intersections_t(
&self, self_t_range: Range<S>,
other: &Self, other_t_range: Range<S>,
tolerance: S,
) -> ArrayVec<[(S, S);2]> {
monotonic_segment_intersecions(
self, self_t_range,
other, other_t_range,
tolerance
)
}
pub fn intersections(
&self, self_t_range: Range<S>,
other: &Self, other_t_range: Range<S>,
tolerance: S,
) -> ArrayVec<[Point<S>;2]> {
let intersections = monotonic_segment_intersecions(
self, self_t_range,
other, other_t_range,
tolerance
);
let mut result = ArrayVec::new();
for (t, _) in intersections {
result.push(self.sample(t));
}
result
}
pub fn first_intersection_t(
&self, self_t_range: Range<S>,
other: &Self, other_t_range: Range<S>,
tolerance: S,
) -> Option<(S, S)> {
first_monotonic_segment_intersecion(
self, self_t_range,
other, other_t_range,
tolerance
)
}
pub fn first_intersection(
&self, self_t_range: Range<S>,
other: &Self, other_t_range: Range<S>,
tolerance: S,
) -> Option<Point<S>> {
first_monotonic_segment_intersecion(
self, self_t_range,
other, other_t_range,
tolerance
).map(|(t, _)|{ self.sample(t) })
}
}
impl<S: Scalar> MonotonicSegment for Monotonic<QuadraticBezierSegment<S>> {
type Scalar = S;
fn solve_t_for_x(&self, x: S, _t_range: Range<S>, _tolerance: S) -> S {
self.solve_t_for_x(x)
}
}
impl<S: Scalar> Monotonic<CubicBezierSegment<S>> {
pub fn solve_t_for_x(&self, x: S, t_range: Range<S>, tolerance: S) -> S {
debug_assert!(t_range.start <= t_range.end);
let from = self.x(t_range.start);
let to = self.x(t_range.end);
if x <= from {
return t_range.start;
}
if x >= to {
return t_range.end;
}
// Newton's method.
let mut t = x - from / (to - from);
for _ in 0..8 {
let x2 = self.x(t);
if S::abs(x2 - x) <= tolerance {
return t
}
let dx = self.dx(t);
if dx <= S::EPSILON {
break
}
t = t - (x2 - x) / dx;
}
// Fall back to binary search.
let mut min = t_range.start;
let mut max = t_range.end;
let mut t = S::HALF;
while min < max {
let x2 = self.x(t);
if S::abs(x2 - x) < tolerance {
return t;
}
if x > x2 {
min = t;
} else {
max = t;
}
t = (max - min) * S::HALF + min;
}
return t;
}
#[inline]
pub fn | (&self, x: S) -> (Self, Self) {
// TODO tolerance param.
self.split(self.solve_t_for_x(x, S::ZERO..S::ONE, S::value(0.001)))
}
}
impl<S: Scalar> MonotonicSegment for Monotonic<CubicBezierSegment<S>> {
type Scalar = S;
fn solve_t_for_x(&self, x: S, t_range: Range<S>, tolerance: S) -> S {
self.solve_t_for_x(x, t_range, tolerance)
}
}
/// Return the first intersection point (if any) of two monotonic curve
/// segments.
///
/// Both segments must be monotonically increasing in x.
pub(crate) fn first_monotonic_segment_intersecion<S: Scalar, A, B>(
a: &A, a_t_range: Range<S>,
b: &B, b_t_range: Range<S>,
tolerance: S,
) -> Option<(S, S)>
where
A: Segment<Scalar=S> + MonotonicSegment<Scalar=S> + BoundingRect<Scalar=S>,
B: Segment<Scalar=S> + MonotonicSegment<Scalar=S> + BoundingRect<Scalar=S>,
{
debug_assert!(a.from().x <= a.to().x);
debug_assert!(b.from().x <= b.to().x);
// We need to have a stricter tolerance in solve_t_for_x otherwise
// the error accumulation becomes pretty bad.
let tx_tolerance = tolerance / S::TEN;
let (a_min, a_max) = a.split_range(a_t_range).fast_bounding_range_x();
let (b_min, b_max) = b.split_range(b_t_range).fast_bounding_range_x();
if a_min > b_max || a_max < b_min {
return None;
}
let mut min_x = S::max(a_min, b_min);
let mut max_x = S::min(a_max, b_max);
let mut t_min_a = a.solve_t_for_x(min_x, S::ZERO..S::ONE, tx_tolerance);
let mut t_max_a = a.solve_t_for_x(max_x, t_min_a..S::ONE, tx_tolerance);
let mut t_min_b = b.solve_t_for_x(min_x, S::ZERO..S::ONE, tx_tolerance);
let mut t_max_b = b.solve_t_for_x(max_x, t_min_b..S::ONE, tx_tolerance);
const MAX_ITERATIONS: u32 = 32;
for _ in 0..MAX_ITERATIONS {
let y_max_a = a.y(t_max_a);
let y_max_b = b.y(t_max_b);
// It would seem more sensible to use the mid point instead of
// the max point, but using the mid point means we don't know whether
// the approximation will be slightly before or slightly after the
// point.
// Using the max point ensures that the we return an approximation
// that is always slightly after the real intersection, which
// means that if we search for intersections after the one we
// found, we are not going to converge towards it again.
if S::abs(y_max_a - y_max_b) < tolerance {
return Some((t_max_a, t_max_b));
}
let mid_x = (min_x + max_x) * S::HALF;
let t_mid_a = a.solve_t_for_x(mid_x, t_min_a..t_max_a, tx_tolerance);
let t_mid_b = b.solve_t_for_x(mid_x, t_min_b..t_max_b, tx_tolerance);
let y_mid_a = a.y(t_mid_a);
let y_min_a = a.y(t_min_a);
let y_mid_b = b.y(t_mid_b);
let y_min_b = b.y(t_min_b);
let min_sign = S::signum(y_min_a - y_min_b);
let mid_sign = S::signum(y_mid_a - y_mid_b);
let max_sign = S::signum(y_max_a - y_max_b);
if min_sign != mid_sign {
max_x = mid_x;
t_max_a = t_mid_a;
t_max_b = t_mid_b;
} else if max_sign != mid_sign {
min_x = mid_x;
t_min_a = t_mid_a;
t_min_b = t_mid_b;
} else {
// TODO: This is not always correct: if the min, max and mid
// points are all on the same side, we consider that there is
// no intersection, but there could be a pair of intersections
// between the min/max and the mid point.
break;
}
}
None
}
/// Return the intersection points (if any) of two monotonic curve
/// segments.
///
/// Both segments must be monotonically increasing in x.
pub(crate) fn monotonic_segment_intersecions<S: Scalar, A, B>(
a: &A, a_t_range: Range<S>,
b: &B, b_t_range: Range<S>,
tolerance: S,
) -> ArrayVec<[(S, S); 2]>
where
A: Segment<Scalar=S> + MonotonicSegment<Scalar=S> + BoundingRect<Scalar=S>,
B: Segment<Scalar=S> + MonotonicSegment<Scalar=S> + BoundingRect<Scalar=S>,
{
let (t1, t2) = match first_monotonic_segment_intersecion(
a, a_t_range.clone(),
b, b_t_range.clone(),
tolerance
) {
Some(intersection) => { intersection }
None => { return ArrayVec::new(); }
};
let mut result = ArrayVec::new();
result.push((t1, t2));
match first_monotonic_segment_intersecion(
a, t1..a_t_range.end,
b, t2..b_t_range.end,
tolerance
) {
Some(intersection) => { result.push(intersection); }
None => {}
}
result
}
#[test]
fn two_intersections() {
use QuadraticBezierSegment;
use math::point;
let c1 = QuadraticBezierSegment {
from: point(10.0, 0.0),
ctrl: point(10.0, 90.0),
to: point(100.0, 90.0),
}.assume_monotonic();
let c2 = QuadraticBezierSegment {
from: point(0.0, 10.0),
ctrl: point(90.0, 10.0),
to: point(90.0, 100.0),
}.assume_monotonic();
let intersections = monotonic_segment_intersecions(
&c1, 0.0..1.0,
&c2, 0.0..1.0,
0.001,
);
assert_eq!(intersections.len(), 2);
assert!(intersections[0].0 < 0.1, "{:?} < 0.1", intersections[0].0);
assert!(intersections[1].1 > 0.9, "{:?} > 0.9", intersections[0].1);
}
| split_at_x |
index.tsx | /**
* @author Saki
* @date 2019-07-29 20:36:35
* @Last Modified by: Saki
* @Last Modified time: 2019-07-29 20:36:59
*/
// core-js polyfills are required for IE11 compatibility. If you don't use IE then delete them
import "core-js/stable";
import React from 'react';
import ReactDOM from 'react-dom'; | // eof | import App from './App';
ReactDOM.render(<App />, document.getElementById('container'));
|
tasking_dsz.py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_dsz.py
import mcl.framework
import mcl.tasking
class dsz:
| INTERFACE = 16842801
PFAM = 4159
PROVIDER_ANY = 4159
PROVIDER = 16846911
RPC_INFO_SHUTDOWN = mcl.tasking.RpcInfo(mcl.framework.DSZ, [INTERFACE, PROVIDER_ANY, 0]) |
|
index.js | import * as React from 'react';
import PersonalDetails from './PersonalDetails';
import DirectionDetails from './DirectionDetails';
import EducationDetails from './EducationDetails';
import { Link } from "react-router-dom";
export default function | (){
return <div>
<Link to="/home">Back</Link>
<h1>Personal Details</h1>
<PersonalDetails></PersonalDetails>
<hr></hr>
<h1>Address Details</h1>
<DirectionDetails></DirectionDetails>
<hr></hr>
<h1>Education Details</h1>
<EducationDetails></EducationDetails>
<br></br>
</div>
} | UserDetails |
main.py | from collections import ChainMap
import os
from pathlib import Path
from pickle_spree import PopenFactory
import subprocess
import sys
class CallableDefinedInMain:
def __call__(self):
return 1
callable = CallableDefinedInMain()
new_popen = PopenFactory(callable=callable) | subprocess.Popen = new_popen
pythonpaths = os.environ.get("PYTHONPATH", "").split(":")
pythonpath = ":".join([str(Path(__file__).parent.absolute())]+pythonpaths)
if __name__ == "__main__":
Path("child_script.py").write_text("print('foo')")
subprocess.run([sys.executable, "child_script.py"],
env=ChainMap({"PYTHONPATH": pythonpath}, os.environ), check=True) | |
wordcounts.py | #! /opt/spark/bin/pyspark
import re
from pathlib import Path
INPUT_TXT = "~/uol-ds-tools/pyspark-utils/frankenstein.txt"
myfile = Path(INPUT_TXT).expanduser().absolute()
rdd_txt = sc.textFile(f"file:///{myfile}")
# Simple word counts splitting on whitespace
counts = (
rdd_txt.flatMap(lambda line: line.split())
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b)
.map(lambda a: (a[1], a[0]))
)
res1 = counts.collect()[:20]
for i in res1:
print(i)
print()
# Word counts splitting on non word elements
word_counts = (
rdd_txt.flatMap(lambda line: re.split(r"\W+", line))
.map(lambda word: (word, 1))
.reduceByKey(lambda a, b: a + b) | )
res2 = word_counts.collect()[:20]
for i in res2:
print(i)
print() | .map(lambda a: (a[1], a[0])) |
IraModels.ts | export type QueryAndParametersTuple = {
query: string,
params: any[]
}
export interface ModelConstructable {
new(): IraModel;
}
export class NamedArguments {
private readonly _params: NamedParams;
constructor(params: NamedParams) {
this._params = params;
}
getParams(): NamedParams {
return this._params;
}
}
interface IraModel {
}
interface IraPersistentConnector {
getTransactionHandler(queryAndParams: QueryAndParametersTuple): Promise<any>
}
| export type {
IraModel,
IraPersistentConnector
} | type NamedParams = {
[id: string]: any
}
|
gmap_z_bench_safe_test.go | // Copyright 2017 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with gm file,
// You can obtain one at https://github.com/gogf/gf.
// go test *.go -bench=".*" -benchmem
package gmap
import (
"testing"
"strconv"
)
var ibm = NewIntBoolMap()
var iim = NewIntIntMap()
var iifm = NewIntInterfaceMap()
var ism = NewIntStringMap()
var ififm = NewMap()
var sbm = NewStringBoolMap()
var sim = NewStringIntMap()
var sifm = NewStringInterfaceMap()
var ssm = NewStringStringMap()
// 写入性能测试
func Benchmark_IntBoolMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
ibm.Set(i, true)
}
}
func Benchmark_IntIntMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
iim.Set(i, i)
}
}
func Benchmark_IntInterfaceMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
iifm.Set(i, i)
}
}
func Benchmark_IntStringMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
ism.Set(i, strconv.Itoa(i))
}
}
func Benchmark_InterfaceInterfaceMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
ififm.Set(i, i)
}
}
func Benchmark_StringBoolMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
sbm.Set(strconv.Itoa(i), true)
}
}
func Benchmark_StringIntMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
sim.Set(strconv.Itoa(i), i)
}
}
func Benchmark_StringInterfaceMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
sifm.Set(strconv.Itoa(i), i)
}
}
func Benchmark_StringStringMap_Set(b *testing.B) {
for i := 0; i < b.N; i++ {
ssm.Set(strconv.Itoa(i), strconv.Itoa(i))
}
}
// 读取性能测试
func Benchmark_IntBoolMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
ibm.Get(i)
}
}
func Benchmark_IntIntMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
iim.Get(i)
}
}
func Benchmark_IntInterfaceMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
iifm.Get(i)
}
}
func Benchmark_IntStringMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
ism.Get(i)
}
}
func Benchmark_InterfaceInterfaceMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
ififm.Get(i)
}
}
func Benchmark_StringBoolMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
sbm.Get(strconv.Itoa(i))
}
}
func Benchmark_StringIntMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
sim.Get(strconv.Itoa(i))
}
}
func Benchmark_StringInterfaceMap_Get(b *testing.B) {
for i := 0; i < b.N; i++ {
sifm.Get(strconv.Itoa(i))
}
}
func Benchmark_StringStringMap_Get(b *testing.B) {
for i := 0; i < b. | N; i++ {
ssm.Get(strconv.Itoa(i))
}
}
|
|
move.js |
var cobs = require('../');
function | (text, ws) {
var program = cobs.compileProgram(text, ws);
program.procedure = program.compileFunction();
var data = program.run(null);
if (data)
return data.working_storage;
else
return null;
};
exports['compile and run move'] = function (test) {
var ws = { a: null };
var newws = run('move 1 to a.', ws);
test.equal(newws.a, 1);
};
exports['compile and run two moves'] = function (test) {
var ws = { a: null, b: null };
var newws = run('move 1 to a. move 2 to b.', ws);
test.equal(newws.a, 1);
test.equal(newws.b, 2);
};
exports['compile and run two moves to nested items'] = function (test) {
var ws = {
group: {
items: {
a: null,
b: null
}
}
};
var newws = run('move 1 to a. move 2 to b.', ws);
test.equal(newws.group.items.a, 1);
test.equal(newws.group.items.b, 2);
};
exports['compile and run move to two variables'] = function (test) {
var ws = { a: null, b: null };
var newws = run('move 1 to a, b.', ws);
test.equal(newws.a, 1);
test.equal(newws.b, 1);
};
| run |
uint_macros.rs | // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![doc(hidden)]
macro_rules! uint_module {
($T:ident) => (uint_module!($T, #[stable(feature = "rust1", since = "1.0.0")]););
($T:ident, #[$attr:meta]) => (
/// The smallest value that can be represented by this integer type. | pub const MIN: $T = $T::min_value();
/// The largest value that can be represented by this integer type.
#[$attr]
pub const MAX: $T = $T::max_value();
)
} | #[$attr] |
views.py | from flask import render_template,request,url_for,redirect,flash,abort
from . import main
from .forms import BlogForm, CommentForm, UpdateProfile
from ..models import User, Comment , Blog
from flask_login import login_required, current_user
from .. import db, photos
import datetime
from ..requests import getQuotes
@main.route('/')
def index():
|
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def new_blog():
blog_form = BlogForm()
if blog_form.validate_on_submit():
title = blog_form.title.data
blog = blog_form.text.data
# Updated blog instance
new_blog = Blog(blog_title=title,blog_content=blog,username=current_user.username,likes=0,dislikes=0)
# Save blog method
new_blog.save_blog()
return redirect(url_for('.index'))
title = 'New blog'
return render_template('new_blog.html',title = title,blog_form=blog_form )
@main.route('/blog/<int:id>', methods = ['GET','POST'])
def blog(id):
blog = Blog.get_blog(id)
posted_date = blog.posted.strftime('%b %d, %Y')
if request.args.get("like"):
blog.likes = blog.likes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
elif request.args.get("dislike"):
blog.dislikes = blog.dislikes + 1
db.session.add(blog)
db.session.commit()
return redirect("/blog/{blog_id}".format(blog_id=blog.id))
comment_form = CommentForm()
if comment_form.validate_on_submit():
comment = comment_form.text.data
new_comment = Comment(comment = comment,user = current_user,blog_id = blog)
new_comment.save_comment()
comments = Comment.get_comments(blog)
return render_template("blog.html", blog = blog, date = posted_date, comment_form = comment_form, comments = comments)
@main.route('/user/<uname>/blogs')
def user_blogs(uname):
user = User.query.filter_by(username=uname).first()
blogs = Blog.query.filter_by(user_id = user.id).all()
blogs_count = Blog.count_blogs(uname)
user_joined = user.date_joined.strftime('%b,%d,%y')
return render_template("profile/blogs.html",user = user, blogs = blogs, blogs_count= blogs_count,date= user_joined)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route("/blog/<int:id>/update",methods = ['GET','POST'])
@login_required
def update_blog(id):
blog = Blog.query.get_or_404(id)
if blog.username != current_user.username:
abort(403)
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog.blog_title = blog_form.title.data
blog.blog_content = blog_form.text.data
db.session.commit()
flash('Your blog has been updated!', 'success')
return redirect(url_for('main.blog', id=blog.id))
elif request.method == 'GET':
blog_form.title.data = blog.blog_title
blog_form.text.data = blog.blog_content
return render_template('new_blog.html',title = 'Update Blog',blog_form=blog_form )
@main.route("/blog/<int:id>/delete", methods=['POST'])
@login_required
def delete_blog(id):
blog = Blog.query.get(id)
if blog.username != current_user.username:
abort(403)
db.session.delete(blog)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
| '''
View root page function that returns the index page and its data
'''
blogs = Blog.query.all()
title = 'Home - Welcome to Blogs Online Website'
quote = getQuotes()
quote1 = getQuotes()
quote2 = getQuotes()
quote3 = getQuotes()
return render_template('index.html', title = title, blogs=blogs, quote=quote ,quote1=quote1,quote2=quote2,quote3=quote3 ) |
main.rs | #[macro_use]
extern crate rocket;
use std::fs::File;
use std::io::prelude::*;
use std::process::Command;
use std::{env, fs, str};
#[get("/<path>")]
fn getpath(path: &str) -> String {
println!("GETPATH: {}", path);
let data = fs::read_to_string(path).expect("Can not read the given path");
data
}
#[get("/<cmd>")]
fn exec(cmd: &str) -> String {
println!("CMD: {}", cmd); | command.arg(c);
}
let output = command.output().expect("Failed to execute the command.");
str::from_utf8(&output.stdout).unwrap().to_string()
}
#[get("/")]
fn getos() -> String {
let data = fs::read_to_string("/etc/os-release").expect("Can not read the /etc/os-release.");
data
}
#[get("/")]
fn index() -> String {
let path = env::current_dir().unwrap();
let ps = path.display();
format!(
"Example of poorly written code.
GET /getos -> will give the details of the OS.
GET /filename -> will provide a file from the current directory
GET /exec/date -> will give you the current date & time in the server.
POST /filename -> Saves the data in filename.
Code is running in: {}
",
ps
)
}
#[post("/<filename>", data = "<input>")]
fn new(filename: &str, input: Vec<u8>) -> String {
println!("POST: filename: {}", filename);
let mut tfile = File::create(filename).unwrap();
tfile.write_all(&input).unwrap();
"Okay".to_owned()
}
#[launch]
fn rocket() -> _ {
rocket::build()
.mount("/", routes![index])
.mount("/", routes![getpath])
.mount("/", routes![new])
.mount("/getos", routes![getos])
.mount("/exec/", routes![exec])
} | let mut cmds = cmd.split_whitespace().into_iter();
let mut command = Command::new(cmds.next().unwrap());
for c in cmds { |
job_gc_test.go | /*
Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
| import (
"context"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic/dynamicinformer"
fakedynamicclient "k8s.io/client-go/dynamic/fake"
restclient "k8s.io/client-go/rest"
batchv1alpha1 "volcano.sh/apis/pkg/apis/batch/v1alpha1"
"github.com/PaddlePaddle/PaddleFlow/pkg/common/config"
"github.com/PaddlePaddle/PaddleFlow/pkg/common/k8s"
commonschema "github.com/PaddlePaddle/PaddleFlow/pkg/common/schema"
)
var (
VCJobGVR = schema.GroupVersionResource{Group: "batch.volcano.sh", Version: "v1alpha1", Resource: "jobs"}
VCQueueGVR = schema.GroupVersionResource{Group: "scheduling.volcano.sh", Version: "v1beta1", Resource: "queues"}
EQuotaGVR = schema.GroupVersionResource{Group: "scheduling.volcano.sh", Version: "v1beta1", Resource: "elasticresourcequotas"}
PodGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
)
func NewUnstructured(gvk schema.GroupVersionKind, namespace, name string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": gvk.GroupVersion().String(),
"kind": gvk.Kind,
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
"labels": map[string]interface{}{
commonschema.JobOwnerLabel: commonschema.JobOwnerValue,
commonschema.VolcanoJobNameLabel: name,
commonschema.JobIDLabel: name,
},
},
"status": make(map[string]interface{}),
},
}
}
func newFakeGCController() *JobGarbageCollector {
scheme := runtime.NewScheme()
dynamicClient := fakedynamicclient.NewSimpleDynamicClient(scheme)
var server = httptest.NewServer(k8s.DiscoveryHandlerFunc)
defer server.Close()
fakeDiscovery := discovery.NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})
ctrl := &JobGarbageCollector{}
opt := &k8s.DynamicClientOption{
DynamicClient: dynamicClient,
DynamicFactory: dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 0),
DiscoveryClient: fakeDiscovery,
}
ctrl.Initialize(opt)
return ctrl
}
func TestJobGC(t *testing.T) {
tests := []struct {
name string
namespace string
skipClean bool
oldObj *unstructured.Unstructured
newObj *unstructured.Unstructured
oldStatus *batchv1alpha1.JobStatus
newStatus *batchv1alpha1.JobStatus
}{
{
name: "vcjob status from pending turn to running",
namespace: "default",
skipClean: true,
oldObj: NewUnstructured(k8s.VCJobGVK, "default", "vcjob1"),
oldStatus: &batchv1alpha1.JobStatus{
State: batchv1alpha1.JobState{
Phase: batchv1alpha1.Pending,
},
Pending: 1,
Running: 1,
},
newObj: NewUnstructured(k8s.VCJobGVK, "default", "vcjob1"),
newStatus: &batchv1alpha1.JobStatus{
State: batchv1alpha1.JobState{
Phase: batchv1alpha1.Running,
},
Pending: 1,
Running: 1,
},
},
{
name: "vcjob status from running turn to failed",
namespace: "default",
skipClean: false,
oldObj: NewUnstructured(k8s.VCJobGVK, "default", "vcjob1"),
oldStatus: &batchv1alpha1.JobStatus{
State: batchv1alpha1.JobState{
Phase: batchv1alpha1.Running,
},
Pending: 1,
Running: 1,
},
newObj: NewUnstructured(k8s.VCJobGVK, "default", "vcjob1"),
newStatus: &batchv1alpha1.JobStatus{
State: batchv1alpha1.JobState{
Phase: batchv1alpha1.Failed,
},
},
},
}
config.GlobalServerConfig = &config.ServerConfig{
Job: config.JobConfig{
Reclaim: config.ReclaimConfig{
CleanJob: true,
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
config.GlobalServerConfig.Job.Reclaim.SkipCleanFailedJob = test.skipClean
c := newFakeGCController()
var err error
test.oldObj.Object["status"], err = runtime.DefaultUnstructuredConverter.ToUnstructured(test.oldStatus)
assert.Equal(t, nil, err)
test.newObj.Object["status"], err = runtime.DefaultUnstructuredConverter.ToUnstructured(test.newStatus)
assert.Equal(t, nil, err)
_, err = c.opt.DynamicClient.Resource(VCJobGVR).Namespace(test.namespace).Create(context.TODO(), test.oldObj, metav1.CreateOptions{})
assert.Equal(t, nil, err)
c.update(test.oldObj, test.newObj)
stopCh := make(chan struct{})
defer close(stopCh)
c.Run(stopCh)
time.Sleep(2 * time.Second)
})
}
} | package controller
|
main.py | import logging
import argparse
import sys
from slow.hitter import HitterService as Hitter
from slow.hitter import KnownHosts
|
parser = argparse.ArgumentParser(description='Start syslog-grok-mongo captures.')
parser.add_argument('-name', type=str, default=Hitter.NAME,
help='name of the service')
# Mongo configs
parser.add_argument('-muri', type=str, default='mongo://127.0.0.1:27017',
help='mongo uri')
parser.add_argument('-mdb', type=str, default=MongoConnection.DB_NAME,
help='mongo db name')
# ETL stuff
parser.add_argument('-cpdir', type=str, default=DEFAULT_PATTERNS,
help='directory containing custom grok patterns directory')
parser.add_argument('-names', type=str, default=DEFAULT_NAMES,
help='file containing all the names for rule patterns')
parser.add_argument('-gconfig', type=str, default=DEFAULT_CONFIG,
help='Grok frontend configuration for rule chains')
# Hitter stuff
parser.add_argument('-broker_uri', type=str, default=Hitter.BROKER_URI,
help='kombu queue address')
parser.add_argument('-broker_queue', type=str, default=Hitter.BROKER_QUEUE,
help='kombu queue name to publish to')
parser.add_argument('-buffer_uri', type=str, default=Hitter.BROKER_URI,
help='buffer uri for results')
parser.add_argument('-buffer_queue', type=str, default=Hitter.LOGSTASH_QUEUE,
help='kombu queue for results')
parser.add_argument('-known_hosts', type=str, default=KnownHosts.HOST_FILE,
help='hosts file to load')
parser.add_argument('-msg_limit', type=int, default=100,
help='limit the number of messages')
V = 'log levels: INFO: %d, DEBUG: %d, WARRNING: %d' % (logging.INFO,
logging.DEBUG,
logging.WARNING)
parser.add_argument('-log_level', type=int, default=logging.DEBUG,
help=V)
if __name__ == "__main__":
args = parser.parse_args()
logging.getLogger().setLevel(args.log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
logging.getLogger().addHandler(ch)
mongo_backend = MongoConnection(uri=args.muri,
db_name=args.mdb)
ETL.setup_grokker(args)
etl_backend = ETL
service = Hitter(broker_uri=args.broker_uri,
broker_queue=args.broker_queue,
hosts_file=args.known_hosts,
mongo_backend=mongo_backend,
etl_backend=etl_backend,
store_uri=args.buffer_uri,
store_queue=args.buffer_queue,
msg_limit=args.msg_limit)
try:
logging.debug("Starting the syslog listener")
service.serve_forever(poll_interval=0.5)
except (IOError, SystemExit):
raise
except KeyboardInterrupt:
raise | from slow.etl import ETL, DEFAULT_NAMES, DEFAULT_PATTERNS, DEFAULT_CONFIG
from slow.mongo_backend import MongoConnection |
key_controller.go | package controllers
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
kyaml "k8s.io/apimachinery/pkg/util/yaml"
apiserverv1 "k8s.io/apiserver/pkg/apis/config/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/klog"
"k8s.io/utils/pointer"
operatorv1 "github.com/openshift/api/operator/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1"
"github.com/openshift/library-go/pkg/controller/factory"
"github.com/openshift/library-go/pkg/operator/encryption/crypto"
"github.com/openshift/library-go/pkg/operator/encryption/secrets"
"github.com/openshift/library-go/pkg/operator/encryption/state"
"github.com/openshift/library-go/pkg/operator/encryption/statemachine"
"github.com/openshift/library-go/pkg/operator/events"
operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers"
)
// encryptionSecretMigrationInterval determines how much time must pass after a key has been observed as
// migrated before a new key is created by the key minting controller. The new key's ID will be one
// greater than the last key's ID (the first key has a key ID of 1).
const encryptionSecretMigrationInterval = time.Hour * 24 * 7 // one week
// keyController creates new keys if necessary. It
// * watches
// - secrets in openshift-config-managed
// - pods in target namespace
// - secrets in target namespace
// * computes a new, desired encryption config from encryption-config-<revision>
// and the existing keys in openshift-config-managed.
// * derives from the desired encryption config whether a new key is needed due to
// - encryption is being enabled via the API or
// - a new to-be-encrypted resource shows up or
// - the EncryptionType in the API does not match with the newest existing key or
// - based on time (once a week is the proposed rotation interval) or
// - an external reason given as a string in .encryption.reason of UnsupportedConfigOverrides.
// It then creates it.
//
// Note: the "based on time" reason for a new key is based on the annotation
// encryption.apiserver.operator.openshift.io/migrated-timestamp instead of
// the key secret's creationTimestamp because the clock is supposed to
// start when a migration has been finished, not when it begins.
type keyController struct {
operatorClient operatorv1helpers.OperatorClient
apiServerClient configv1client.APIServerInterface
component string
name string
encryptionSecretSelector metav1.ListOptions
deployer statemachine.Deployer
secretClient corev1client.SecretsGetter
provider Provider
}
func NewKeyController(
component string,
provider Provider,
deployer statemachine.Deployer,
operatorClient operatorv1helpers.OperatorClient,
apiServerClient configv1client.APIServerInterface,
apiServerInformer configv1informers.APIServerInformer,
kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces,
secretClient corev1client.SecretsGetter,
encryptionSecretSelector metav1.ListOptions,
eventRecorder events.Recorder,
) factory.Controller {
c := &keyController{
operatorClient: operatorClient,
apiServerClient: apiServerClient,
component: component,
name: "EncryptionKeyController",
encryptionSecretSelector: encryptionSecretSelector,
deployer: deployer,
provider: provider,
secretClient: secretClient,
}
return factory.New().
WithSync(c.sync).
ResyncEvery(time.Second). // TODO: Is the 1s resync really necessary?
WithInformers(
apiServerInformer.Informer(),
operatorClient.Informer(),
kubeInformersForNamespaces.InformersFor("openshift-config-managed").Core().V1().Secrets().Informer(),
deployer,
).ToController(c.name, eventRecorder.WithComponentSuffix("encryption-key-controller"))
}
func (c *keyController) sync(ctx context.Context, syncCtx factory.SyncContext) error {
if ready, err := shouldRunEncryptionController(c.operatorClient, c.provider.ShouldRunEncryptionControllers); err != nil || !ready {
return err // we will get re-kicked when the operator status updates
}
configError := c.checkAndCreateKeys(syncCtx, c.provider.EncryptedGRs())
// update failing condition
cond := operatorv1.OperatorCondition{
Type: "EncryptionKeyControllerDegraded",
Status: operatorv1.ConditionFalse,
}
if configError != nil {
cond.Status = operatorv1.ConditionTrue
cond.Reason = "Error"
cond.Message = configError.Error()
}
if _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(cond)); updateError != nil {
return updateError
}
return configError
}
func (c *keyController) checkAndCreateKeys(syncContext factory.SyncContext, encryptedGRs []schema.GroupResource) error {
currentMode, externalReason, err := c.getCurrentModeAndExternalReason()
if err != nil {
return err
}
currentConfig, desiredEncryptionState, secrets, isProgressingReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, encryptedGRs)
if err != nil {
return err
}
if len(isProgressingReason) > 0 {
syncContext.Queue().AddAfter(syncContext.QueueKey(), 2*time.Minute)
return nil
}
// avoid intended start of encryption
hasBeenOnBefore := currentConfig != nil || len(secrets) > 0
if currentMode == state.Identity && !hasBeenOnBefore {
return nil
}
var (
newKeyRequired bool
newKeyID uint64
reasons []string
)
// note here that desiredEncryptionState is never empty because getDesiredEncryptionState
// fills up the state with all resources and set identity write key if write key secrets
// are missing.
var commonReason *string
for gr, grKeys := range desiredEncryptionState {
latestKeyID, internalReason, needed := needsNewKey(grKeys, currentMode, externalReason, encryptedGRs)
if !needed {
continue
}
if commonReason == nil {
commonReason = &internalReason
} else if *commonReason != internalReason {
commonReason = pointer.StringPtr("") // this means we have no common reason
}
newKeyRequired = true
nextKeyID := latestKeyID + 1
if newKeyID < nextKeyID {
newKeyID = nextKeyID
}
reasons = append(reasons, fmt.Sprintf("%s-%s", gr.Resource, internalReason))
}
if !newKeyRequired {
return nil
}
if commonReason != nil && len(*commonReason) > 0 && len(reasons) > 1 {
reasons = []string{*commonReason} // don't repeat reasons
}
sort.Sort(sort.StringSlice(reasons))
internalReason := strings.Join(reasons, ", ")
keySecret, err := c.generateKeySecret(newKeyID, currentMode, internalReason, externalReason)
if err != nil {
return fmt.Errorf("failed to create key: %v", err)
}
_, createErr := c.secretClient.Secrets("openshift-config-managed").Create(context.TODO(), keySecret, metav1.CreateOptions{})
if errors.IsAlreadyExists(createErr) {
return c.validateExistingSecret(keySecret, newKeyID)
}
if createErr != nil {
syncContext.Recorder().Warningf("EncryptionKeyCreateFailed", "Secret %q failed to create: %v", keySecret.Name, err)
return createErr
}
syncContext.Recorder().Eventf("EncryptionKeyCreated", "Secret %q successfully created: %q", keySecret.Name, reasons)
return nil
}
func (c *keyController) validateExistingSecret(keySecret *corev1.Secret, keyID uint64) error {
actualKeySecret, err := c.secretClient.Secrets("openshift-config-managed").Get(context.TODO(), keySecret.Name, metav1.GetOptions{})
if err != nil {
return err
}
actualKeyID, ok := state.NameToKeyID(actualKeySecret.Name)
if !ok || actualKeyID != keyID {
// TODO we can just get stuck in degraded here ...
return fmt.Errorf("secret %s has an invalid name, new keys cannot be created for encryption target", keySecret.Name)
}
if _, err := secrets.ToKeyState(actualKeySecret); err != nil {
return fmt.Errorf("secret %s is invalid, new keys cannot be created for encryption target", keySecret.Name)
}
return nil // we made this key earlier
}
func (c *keyController) generateKeySecret(keyID uint64, currentMode state.Mode, internalReason, externalReason string) (*corev1.Secret, error) {
bs := crypto.ModeToNewKeyFunc[currentMode]()
ks := state.KeyState{
Key: apiserverv1.Key{
Name: fmt.Sprintf("%d", keyID),
Secret: base64.StdEncoding.EncodeToString(bs),
},
Mode: currentMode,
InternalReason: internalReason,
ExternalReason: externalReason,
}
return secrets.FromKeyState(c.component, ks)
}
func (c *keyController) getCurrentModeAndExternalReason() (state.Mode, string, error) {
apiServer, err := c.apiServerClient.Get(context.TODO(), "cluster", metav1.GetOptions{})
if err != nil {
return "", "", err
}
operatorSpec, _, _, err := c.operatorClient.GetOperatorState()
if err != nil {
return "", "", err
}
// TODO make this un-settable once set
// ex: we could require the tech preview no upgrade flag to be set before we will honor this field
type unsupportedEncryptionConfig struct {
Encryption struct {
Reason string `json:"reason"`
} `json:"encryption"`
}
encryptionConfig := &unsupportedEncryptionConfig{}
if raw := operatorSpec.UnsupportedConfigOverrides.Raw; len(raw) > 0 {
jsonRaw, err := kyaml.ToJSON(raw)
if err != nil {
klog.Warning(err)
// maybe it's just json
jsonRaw = raw
}
if err := json.Unmarshal(jsonRaw, encryptionConfig); err != nil {
return "", "", err
}
}
reason := encryptionConfig.Encryption.Reason
switch currentMode := state.Mode(apiServer.Spec.Encryption.Type); currentMode {
case state.AESCBC, state.Identity: // secretbox is disabled for now
return currentMode, reason, nil
case "": // unspecified means use the default (which can change over time)
return state.DefaultMode, reason, nil
default:
return "", "", fmt.Errorf("unknown encryption mode configured: %s", currentMode)
}
}
// needsNewKey checks whether a new key must be created for the given resource. If true, it also returns the latest
// used key ID and a reason string.
func needsNewKey(grKeys state.GroupResourceState, currentMode state.Mode, externalReason string, encryptedGRs []schema.GroupResource) (uint64, string, bool) | {
// we always need to have some encryption keys unless we are turned off
if len(grKeys.ReadKeys) == 0 {
return 0, "key-does-not-exist", currentMode != state.Identity
}
latestKey := grKeys.ReadKeys[0]
latestKeyID, ok := state.NameToKeyID(latestKey.Key.Name)
if !ok {
return latestKeyID, fmt.Sprintf("key-secret-%d-is-invalid", latestKeyID), true
}
// if latest secret has been deleted, we will never be able to migrate to that key.
if !latestKey.Backed {
return latestKeyID, fmt.Sprintf("encryption-config-key-%d-not-backed-by-secret", latestKeyID), true
}
// check that we have pruned read-keys: the write-keys, plus at most one more backed read-key (potentially some unbacked once before)
backedKeys := 0
for _, rk := range grKeys.ReadKeys {
if rk.Backed {
backedKeys++
}
}
if backedKeys > 2 {
return 0, "", false
}
// we have not migrated the latest key, do nothing until that is complete
if allMigrated, _, _ := state.MigratedFor(encryptedGRs, latestKey); !allMigrated {
return 0, "", false
}
// if the most recent secret was encrypted in a mode different than the current mode, we need to generate a new key
if latestKey.Mode != currentMode {
return latestKeyID, "encryption-mode-changed", true
}
// if the most recent secret turned off encryption and we want to keep it that way, do nothing
if latestKey.Mode == state.Identity && currentMode == state.Identity {
return 0, "", false
}
// if the most recent secret has a different external reason than the current reason, we need to generate a new key
if latestKey.ExternalReason != externalReason && len(externalReason) != 0 {
return latestKeyID, "external-reason-changed", true
}
// we check for encryptionSecretMigratedTimestamp set by migration controller to determine when migration completed
// this also generates back pressure for key rotation when migration takes a long time or was recently completed
return latestKeyID, "rotation-interval-has-passed", time.Since(latestKey.Migrated.Timestamp) > encryptionSecretMigrationInterval
} |
|
admin.py | from django.contrib import admin
from .models import Upload,VendorData
class UploadAdmin(admin.ModelAdmin): | class VendordataAdmin(admin.ModelAdmin):
list_display = ('id','sub_id','first_name','last_name','status')
admin.site.site_header = "Subscription Fulfillment Upload"
admin.site.register(Upload, UploadAdmin)
admin.site.register(VendorData, VendordataAdmin) | list_display = ('id','uploaded_file','uploaded_date')
|
ui.rs | extern crate kiss3d;
extern crate nalgebra as na;
// Third party
use kiss3d::light::Light;
use kiss3d::window::Window;
// Conrod
use kiss3d::conrod::UiCell;
use kiss3d::conrod::{widget, widget_ids, Color, Colorable, Positionable, Sizeable, Widget};
// Generate a unique `WidgetId` for each widget.
widget_ids! {
pub struct Ids {
text,
canvas_label,
canvas,
dropdown_menu,
dropdown_menu_2,
}
}
struct State {
open: bool,
selected: usize,
ids: Ids,
}
// Main display function
pub fn main() {
const WINDOW_WIDTH: u32 = 1024;
const WINDOW_HEIGHT: u32 = 768;
let mut window = Window::new_with_size("Pointctl visualizer", WINDOW_WIDTH, WINDOW_HEIGHT);
window.set_background_color(0.9, 1.0, 0.9);
window.set_light(Light::StickToCamera);
window.set_point_size(4.);
let mut state = State {
open: false,
selected: 0usize,
ids: Ids::new(window.conrod_ui_mut().widget_id_generator()),
};
// Start the render loop, this will _not_ work with 2D scenes yet.
while window.render() {
let ui = Box::from(window.conrod_ui_mut().set_widgets());
let (_, new_state) = set_colllabsible_area(ui, state);
state = new_state;
}
}
fn | (
mut ui: Box<UiCell>,
mut state: State,
) -> (std::boxed::Box<kiss3d::conrod::UiCell>, State) {
let (area, status) = widget::CollapsibleArea::new(state.open, "foobar")
.label_font_size(10u32)
.bottom_left_with_margin(1.0f64)
.w(400f64)
.h(50f64)
.color(Color::Rgba(0.0, 0.0, 0.0, 0.10))
.set(state.ids.canvas, &mut ui);
match status {
Some(s) => state.open = s.is_open(),
None => (),
};
match area {
Some(a) => {
widget::Text::new("test")
.up_from(a.collapsible_area_id, 5.0f64)
.color(Color::Rgba(1.0, 0.0, 0.0, 1.0))
.set(state.ids.text, &mut ui);
}
None => (),
};
let items = vec!["Foo", "Bar", "Baz"];
let event = widget::DropDownList::new(items.as_slice(), Some(state.selected))
.top_left_with_margin(1.0f64)
.w(400f64)
.h(30f64)
// .color(Color::Rgba(1.0, 1.0, 1.0, 1.0))
.set(state.ids.dropdown_menu, &mut ui);
let _ = widget::DropDownList::new(items.as_slice(), Some(state.selected))
.down_from(state.ids.dropdown_menu, 5.0f64)
.w(400f64)
.h(30f64)
// .color(Color::Rgba(1.0, 1.0, 1.0, 1.0))
.set(state.ids.dropdown_menu_2, &mut ui);
match event {
Some(id) => state.selected = id,
None => (),
};
(ui, state)
}
| set_colllabsible_area |
gaussian.py | import os
"""This submodule aims to provide utilities for the gaussian software package.
It will allow the user to quickly write custom interfaces to analyse the output files.
"""
class Extractor:
"""This class supports data extraction from gaussian output files.
It provides functionality to extract all the implemented data at once or custom extraction
can be set up by using its public methods.
"""
def __init__(self, filepath, labels=None):
self.filepath = filepath
self.labels = labels
self.normal_executions = 0
# Initialize
self.check_normal_execution()
self.check_frequencies()
self.label_positions = self._get_label_positions()
def check_normal_execution(self):
"""Checks for normal execution
Checks for normal execution of the gaussian output file.
Use this first when writing custom extraction methods to check the validity of the calculations.
Returns:
(bool): Returns True when a calculation has normal execution.
"""
with open(self.filepath, "r") as f:
for line in f:
if 'Normal termination of Gaussian' in line:
self.normal_executions += 1
if self.labels != None:
if self.normal_executions == len(self.labels)+1:
return True
else:
raise Exception('There are {} Normal terminations, please check this file manually: {}'.format(
self.normal_executions, self.filepath))
else:
if self.normal_executions == 0:
raise Exception(
'There are no normal terminations, please check this file manually: {}'.format(self.filepath))
elif self.normal_executions == 1:
return True
else:
raise Exception(
'There are multiple normal terminations, please set the labels when constructing the flagg.')
def check_frequencies(self):
"""Check for negative (imaginary) frequencies.
Returns:
(bool): Returns True if no negative frequencies are found.
Raises:
Exception: Raises when negative frequencies are found.
"""
with open(self.filepath, 'r') as f:
imag = False
vals = []
for line in f:
if 'Frequencies -- ' in line:
vals.append(line)
split = vals[-1].split()
if float(split[2]) < 0:
imag = True
if float(split[3]) < 0:
imag = True
if float(split[4]) < 0:
imag = True
if imag:
raise Exception(
'There are imaginary frequencies, please check this file manually: {}'.format(self.filepath))
else:
return True
def _get_label_positions(self):
results = []
with open(self.filepath, 'r') as f:
for i, line in enumerate(f):
for l in self.labels:
if l in line:
results.append([i, l])
for i, n in enumerate(results):
if n[0] == results[i-1][0]:
results.remove(results[i-1])
def clean_list():
for i, n in enumerate(results):
if n[1] == results[i-1][1]:
results.remove(results[i-1])
clean_list()
clean_list()
return results
def extract_error(self):
with open(self.filepath, 'r') as f:
temp = None
for line in f:
if 'Error termination' in line:
return temp
else:
temp = line
def _extract_geometry(self, file):
file.readline()
file.readline()
file.readline()
file.readline()
atoms = []
xyz = []
is_molecule = True
while is_molecule:
# read and process the line
line = file.readline()
split = line.split()
# check if is still the molecule
if len(split) == 1:
is_molecule = False
else:
# process the line
atoms.append(split[1])
coords = []
coords.append(split[3])
coords.append(split[4])
coords.append(split[5])
xyz.append(coords)
return atoms, xyz
def extract_optimized_geometry(self):
"""Extracts the optimized geometry
Extracts the optimized geometry from the gaussian output file.
Returns:
(tuple): tuple containing:
atoms (list) : Atom numbers
coördinates (list): Cartesian coordinates in a 2D list
"""
results = []
with open(self.filepath, 'r') as f:
for line in f:
if 'Standard orientation' in line:
atoms, xyz = self._extract_geometry(f)
results.append([atoms, xyz])
if self.labels[1] in line:
break
return results[-2]
def extract_SCF(self):
vals = []
results = []
with open(self.filepath, 'r') as f:
for i, line in enumerate(f):
if 'SCF Done' in line:
split = line.split()
vals.append([i, split[4]])
for p in self._get_label_positions():
temp = None
for v in vals:
if v[0] < p[0]:
temp = v
temp = [p[1], temp[1]]
results.append(temp)
return results
def extract_HOMO_energy(self):
with open(self.filepath, 'r') as f:
inFreq = False
vals = []
for line in f:
if 'Link1' in line:
inFreq = True
if self.labels[1] in line:
inFreq = False
if inFreq:
if 'Alpha occ. eigenvalues' in line:
vals.append(line)
split = vals[-1].split()
return split[-1]
def extract_LUMO_energy(self):
with open(self.filepath, 'r') as f:
inFreq = False
vals = []
for line in f:
if 'Link1' in line:
inFreq = True
if self.labels[1] in line:
inFreq = False | split = vals[0].split()
return split[4]
def extract_zero_point_correction(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Zero-point correction' in line:
split = line.split()
return split[2]
def extract_thermal_correction_to_energy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Energy' in line:
split = line.split()
return split[4]
def extract_thermal_correction_to_enthalpy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Enthalpy' in line:
split = line.split()
return split[4]
def extract_thermal_correction_to_gibbs_free_energy(self):
with open(self.filepath, 'r') as f:
for line in f:
if 'Thermal correction to Gibbs Free Energy' in line:
split = line.split()
return split[6]
def _extract_npa(self, file):
file.readline()
file.readline()
file.readline()
file.readline()
file.readline()
natural_charges = []
is_molecule = True
while is_molecule:
line = file.readline()
split = line.split()
if len(split) == 1:
is_molecule = False
else:
natural_charges.append(split[2])
return natural_charges
def extract_npas(self):
results = []
with open(self.filepath, 'r') as f:
vals = []
for line in f:
if 'Summary of Natural Population Analysis:' in line:
vals.append(self._extract_npa(f))
results.append(vals[0])
results.append(vals[1])
results.append(vals[4])
return results |
if inFreq:
if 'Alpha virt. eigenvalues' in line:
vals.append(line) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.