hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
c101678b4fcbd2e6ee2996957ddb3e708ff5e9b4 | 1,164 | use super::Selection;
#[derive(Debug, Clone)]
pub enum Operation {
Read(Selection),
Write(Selection),
}
impl Operation {
pub fn is_find_unique(&self) -> bool {
match self {
Self::Read(selection) => selection.is_find_unique(),
_ => false,
}
}
pub fn into_read(self) -> Option<Selection> {
match self {
Self::Read(sel) => Some(sel),
_ => None,
}
}
pub fn into_write(self) -> Option<Selection> {
match self {
Self::Write(sel) => Some(sel),
_ => None,
}
}
}
impl Operation {
pub fn dedup_selections(self) -> Self {
match self {
Self::Read(s) => Self::Read(s.dedup()),
Self::Write(s) => Self::Write(s.dedup()),
}
}
pub fn name(&self) -> &str {
match self {
Self::Read(s) => s.name(),
Self::Write(s) => s.name(),
}
}
pub fn nested_selections(&self) -> &[Selection] {
match self {
Self::Read(s) => s.nested_selections(),
Self::Write(s) => s.nested_selections(),
}
}
}
| 21.555556 | 64 | 0.476804 |
7a0222a9977742293e4a12d4c01b1bc3219f2e2b | 15,319 | //! This module contains the `PcapHeader` struct which represents a global pcap header.
use std::io::Read;
use byteorder::*;
use errors::*;
/// Struct that represents the global Pcap header of a Pcap file
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct PcapHeader {
/// Magic number
pub magic_number: u32,
/// Major version number
pub version_major: u16,
/// Minor version number
pub version_minor: u16,
/// GMT to local timezone correction, should always be 0
pub ts_correction: i32,
/// Timestamp accuracy, should always be 0
pub ts_accuracy: u32,
/// Max length of captured packet, typically 65535
pub snaplen: u32,
/// DataLink type (first layer in the packet (u32))
pub datalink: DataLink
}
impl Default for PcapHeader {
fn default() -> Self {
PcapHeader {
magic_number: 0xa1b2c3d4,
version_major: 2,
version_minor: 4,
ts_correction: 0,
ts_accuracy: 0,
snaplen: 65535,
datalink: DataLink::ETHERNET,
}
}
}
impl PcapHeader {
/// Creates a new `PcapHeader` with the following parameters:
///
/// ```rust,ignore
/// PcapHeader {
///
/// magic_number : 0xa1b2c3d4,
/// version_major : 2,
/// version_minor : 4,
/// ts_correction : 0,
/// ts_accuracy : 0,
/// snaplen : 65535,
/// datalink : datalink
/// };
/// ```
pub fn with_datalink(datalink: DataLink) -> PcapHeader {
PcapHeader {
datalink,
..Default::default()
}
}
/// Parses a `Reader` and creates a new `PcapHeader` from it if possible
pub fn from_reader<R: Read>(reader: &mut R) -> ResultChain<PcapHeader> {
let magic_number = reader.read_u32::<BigEndian>()?;
match magic_number {
0xa1b2c3d4 | 0xa1b23c4d => return init_pcap_header::<_, BigEndian>(reader, magic_number),
0xd4c3b2a1 | 0x4d3cb2a1 => return init_pcap_header::<_, LittleEndian>(reader, magic_number),
_ => bail!(ErrorKind::WrongField(format!("Wrong PacketHeader.magic_number: {}", magic_number)))
};
// Inner function used for the initialisation of the `PcapHeader`
fn init_pcap_header<R: Read, B: ByteOrder>(reader: &mut R, magic_number:u32) -> Result<PcapHeader, Error> {
Ok(
PcapHeader {
magic_number : magic_number,
version_major : reader.read_u16::<B>()?,
version_minor : reader.read_u16::<B>()?,
ts_correction : reader.read_i32::<B>()?,
ts_accuracy : reader.read_u32::<B>()?,
snaplen : reader.read_u32::<B>()?,
datalink : DataLink::from(reader.read_u32::<B>()?)
}
)
}
}
/// Convert a `PcapHeader` to a `Vec<u8>`.
pub fn to_array<B: ByteOrder>(&self) -> ResultChain<Vec<u8>> {
let mut out = Vec::with_capacity(24);
//The magic number is always read in BigEndian so it's always written in BigEndian too
out.write_u32::<BigEndian>(self.magic_number)?;
out.write_u16::<B>(self.version_major)?;
out.write_u16::<B>(self.version_minor)?;
out.write_i32::<B>(self.ts_correction)?;
out.write_u32::<B>(self.ts_accuracy)?;
out.write_u32::<B>(self.snaplen)?;
out.write_u32::<B>(self.datalink.into())?;
Ok(out)
}
/// Return the endianness of the global header
///
/// # Panics
///
/// Panics if the magic number is invalid
pub fn endianness(&self) -> Endianness {
match self.magic_number {
0xa1b2c3d4 | 0xa1b23c4d => Endianness::Big,
0xd4c3b2a1 | 0x4d3cb2a1 => Endianness::Little,
_ => unreachable!("Wrong magic number, can't get the header's endianness")
}
}
/// Return the timestamp resolution of the global header
///
/// # Panics
///
/// Panics if the magic number is invalid
pub fn ts_resolution(&self) -> TsResolution {
match self.magic_number {
0xa1b2c3d4 | 0xd4c3b2a1 => TsResolution::MicroSecond,
0xa1b23c4d | 0x4d3cb2a1 => TsResolution::NanoSecond,
_ => unreachable!("Wrong magic number, can't get the header's timestamp resolution")
}
}
}
/// Represents the endianness of the global header
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Endianness {
Big,
Little
}
/// Represents each possible timestamp resolution of the global header
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum TsResolution {
MicroSecond,
NanoSecond
}
/// Represents each possible Pcap datalink
#[allow(non_camel_case_types)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum DataLink {
NULL,
ETHERNET,
AX25,
IEEE802_5,
ARCNET_BSD,
SLIP,
PPP,
FDDI,
PPP_HDLC,
PPP_ETHER,
ATM_RFC1483,
RAW,
C_HDLC,
IEEE802_11,
FRELAY,
LOOP,
LINUX_SLL,
LTALK,
PFLOG,
IEEE802_11_PRISM,
IP_OVER_FC,
SUNATM,
IEEE802_11_RADIOTAP,
ARCNET_LINUX,
APPLE_IP_OVER_IEEE1394,
MTP2_WITH_PHDR,
MTP2,
MTP3,
SCCP,
DOCSIS,
LINUX_IRDA,
USER0,
USER1,
USER2,
USER3,
USER4,
USER5,
USER6,
USER7,
USER8,
USER9,
USER10,
USER11,
USER12,
USER13,
USER14,
USER15,
IEEE802_11_AVS,
BACNET_MS_TP,
PPP_PPPD,
GPRS_LLC,
GPF_T,
GPF_F,
LINUX_LAPD,
BLUETOOTH_HCI_H4,
USB_LINUX,
PPI,
IEEE802_15_4,
SITA,
ERF,
BLUETOOTH_HCI_H4_WITH_PHDR,
AX25_KISS,
LAPD,
PPP_WITH_DIR,
C_HDLC_WITH_DIR,
FRELAY_WITH_DIR,
IPMB_LINUX,
IEEE802_15_4_NONASK_PHY,
USB_LINUX_MMAPPED,
FC_2,
FC_2_WITH_FRAME_DELIMS,
IPNET,
CAN_SOCKETCAN,
IPV4,
IPV6,
IEEE802_15_4_NOFCS,
DBUS,
DVB_CI,
MUX27010,
STANAG_5066_D_PDU,
NFLOG,
NETANALYZER,
NETANALYZER_TRANSPARENT,
IPOIB,
MPEG_2_TS,
NG40,
NFC_LLCP,
INFINIBAND,
SCTP,
USBPCAP,
RTAC_SERIAL,
BLUETOOTH_LE_LL,
NETLINK,
BLUETOOTH_LINUX_MONITOR,
BLUETOOTH_BREDR_BB,
BLUETOOTH_LE_LL_WITH_PHDR,
PROFIBUS_DL,
PKTAP,
EPON,
IPMI_HPM_2,
ZWAVE_R1_R2,
ZWAVE_R3,
WATTSTOPPER_DLM,
ISO_14443,
RDS,
USB_DARWIN,
SDLC,
Unknown(u32)
}
impl From<u32> for DataLink {
fn from(n: u32) -> DataLink {
match n {
0 => DataLink::NULL,
1 => DataLink::ETHERNET,
3 => DataLink::AX25,
6 => DataLink::IEEE802_5,
7 => DataLink::ARCNET_BSD,
8 => DataLink::SLIP,
9 => DataLink::PPP,
10 => DataLink::FDDI,
50 => DataLink::PPP_HDLC,
51 => DataLink::PPP_ETHER,
100 => DataLink::ATM_RFC1483,
101 => DataLink::RAW,
104 => DataLink::C_HDLC,
105 => DataLink::IEEE802_11,
107 => DataLink::FRELAY,
108 => DataLink::LOOP,
113 => DataLink::LINUX_SLL,
114 => DataLink::LTALK,
117 => DataLink::PFLOG,
119 => DataLink::IEEE802_11_PRISM,
122 => DataLink::IP_OVER_FC,
123 => DataLink::SUNATM,
127 => DataLink::IEEE802_11_RADIOTAP,
129 => DataLink::ARCNET_LINUX,
138 => DataLink::APPLE_IP_OVER_IEEE1394,
139 => DataLink::MTP2_WITH_PHDR,
140 => DataLink::MTP2,
141 => DataLink::MTP3,
142 => DataLink::SCCP,
143 => DataLink::DOCSIS,
144 => DataLink::LINUX_IRDA,
147 => DataLink::USER0,
148 => DataLink::USER1,
149 => DataLink::USER2,
150 => DataLink::USER3,
151 => DataLink::USER4,
152 => DataLink::USER5,
153 => DataLink::USER6,
154 => DataLink::USER7,
155 => DataLink::USER8,
156 => DataLink::USER9,
157 => DataLink::USER10,
158 => DataLink::USER11,
159 => DataLink::USER12,
160 => DataLink::USER13,
161 => DataLink::USER14,
162 => DataLink::USER15,
163 => DataLink::IEEE802_11_AVS,
165 => DataLink::BACNET_MS_TP,
166 => DataLink::PPP_PPPD,
169 => DataLink::GPRS_LLC,
170 => DataLink::GPF_T,
171 => DataLink::GPF_F,
177 => DataLink::LINUX_LAPD,
187 => DataLink::BLUETOOTH_HCI_H4,
189 => DataLink::USB_LINUX,
192 => DataLink::PPI,
195 => DataLink::IEEE802_15_4,
196 => DataLink::SITA,
197 => DataLink::ERF,
201 => DataLink::BLUETOOTH_HCI_H4_WITH_PHDR,
202 => DataLink::AX25_KISS,
203 => DataLink::LAPD,
204 => DataLink::PPP_WITH_DIR,
205 => DataLink::C_HDLC_WITH_DIR,
206 => DataLink::FRELAY_WITH_DIR,
209 => DataLink::IPMB_LINUX,
215 => DataLink::IEEE802_15_4_NONASK_PHY,
220 => DataLink::USB_LINUX_MMAPPED,
224 => DataLink::FC_2,
225 => DataLink::FC_2_WITH_FRAME_DELIMS,
226 => DataLink::IPNET,
227 => DataLink::CAN_SOCKETCAN,
228 => DataLink::IPV4,
229 => DataLink::IPV6,
230 => DataLink::IEEE802_15_4_NOFCS,
231 => DataLink::DBUS,
235 => DataLink::DVB_CI,
236 => DataLink::MUX27010,
237 => DataLink::STANAG_5066_D_PDU,
239 => DataLink::NFLOG,
240 => DataLink::NETANALYZER,
241 => DataLink::NETANALYZER_TRANSPARENT,
242 => DataLink::IPOIB,
243 => DataLink::MPEG_2_TS,
244 => DataLink::NG40,
245 => DataLink::NFC_LLCP,
247 => DataLink::INFINIBAND,
248 => DataLink::SCTP,
249 => DataLink::USBPCAP,
250 => DataLink::RTAC_SERIAL,
251 => DataLink::BLUETOOTH_LE_LL,
253 => DataLink::NETLINK,
254 => DataLink::BLUETOOTH_LINUX_MONITOR,
255 => DataLink::BLUETOOTH_BREDR_BB,
256 => DataLink::BLUETOOTH_LE_LL_WITH_PHDR,
257 => DataLink::PROFIBUS_DL,
258 => DataLink::PKTAP,
259 => DataLink::EPON,
260 => DataLink::IPMI_HPM_2,
261 => DataLink::ZWAVE_R1_R2,
262 => DataLink::ZWAVE_R3,
263 => DataLink::WATTSTOPPER_DLM,
264 => DataLink::ISO_14443,
265 => DataLink::RDS,
266 => DataLink::USB_DARWIN,
268 => DataLink::SDLC,
_ => DataLink::Unknown(n)
}
}
}
impl From<DataLink> for u32 {
fn from(link: DataLink) -> u32 {
match link {
DataLink::NULL => 0,
DataLink::ETHERNET => 1,
DataLink::AX25 => 3,
DataLink::IEEE802_5 => 6,
DataLink::ARCNET_BSD => 7,
DataLink::SLIP => 8,
DataLink::PPP => 9,
DataLink::FDDI => 10,
DataLink::PPP_HDLC => 50,
DataLink::PPP_ETHER => 51,
DataLink::ATM_RFC1483 => 100,
DataLink::RAW => 101,
DataLink::C_HDLC => 104,
DataLink::IEEE802_11 => 105,
DataLink::FRELAY => 107,
DataLink::LOOP => 108,
DataLink::LINUX_SLL => 113,
DataLink::LTALK => 114,
DataLink::PFLOG => 117,
DataLink::IEEE802_11_PRISM => 119,
DataLink::IP_OVER_FC => 122,
DataLink::SUNATM => 123,
DataLink::IEEE802_11_RADIOTAP => 127,
DataLink::ARCNET_LINUX => 129,
DataLink::APPLE_IP_OVER_IEEE1394 => 138,
DataLink::MTP2_WITH_PHDR => 139,
DataLink::MTP2 => 140,
DataLink::MTP3 => 141,
DataLink::SCCP => 142,
DataLink::DOCSIS => 143,
DataLink::LINUX_IRDA => 144,
DataLink::USER0 => 147,
DataLink::USER1 => 148,
DataLink::USER2 => 149,
DataLink::USER3 => 150,
DataLink::USER4 => 151,
DataLink::USER5 => 152,
DataLink::USER6 => 153,
DataLink::USER7 => 154,
DataLink::USER8 => 155,
DataLink::USER9 => 156,
DataLink::USER10 => 157,
DataLink::USER11 => 158,
DataLink::USER12 => 159,
DataLink::USER13 => 160,
DataLink::USER14 => 161,
DataLink::USER15 => 162,
DataLink::IEEE802_11_AVS => 163,
DataLink::BACNET_MS_TP => 165,
DataLink::PPP_PPPD => 166,
DataLink::GPRS_LLC => 169,
DataLink::GPF_T => 170,
DataLink::GPF_F => 171,
DataLink::LINUX_LAPD => 177,
DataLink::BLUETOOTH_HCI_H4 => 187,
DataLink::USB_LINUX => 189,
DataLink::PPI => 192,
DataLink::IEEE802_15_4 => 195,
DataLink::SITA => 196,
DataLink::ERF => 197,
DataLink::BLUETOOTH_HCI_H4_WITH_PHDR => 201,
DataLink::AX25_KISS => 202,
DataLink::LAPD => 203,
DataLink::PPP_WITH_DIR => 204,
DataLink::C_HDLC_WITH_DIR => 205,
DataLink::FRELAY_WITH_DIR => 206,
DataLink::IPMB_LINUX => 209,
DataLink::IEEE802_15_4_NONASK_PHY => 215,
DataLink::USB_LINUX_MMAPPED => 220,
DataLink::FC_2 => 224,
DataLink::FC_2_WITH_FRAME_DELIMS => 225,
DataLink::IPNET => 226,
DataLink::CAN_SOCKETCAN => 227,
DataLink::IPV4 => 228,
DataLink::IPV6 => 229,
DataLink::IEEE802_15_4_NOFCS => 230,
DataLink::DBUS => 231,
DataLink::DVB_CI => 235,
DataLink::MUX27010 => 236,
DataLink::STANAG_5066_D_PDU => 237,
DataLink::NFLOG => 239,
DataLink::NETANALYZER => 240,
DataLink::NETANALYZER_TRANSPARENT => 241,
DataLink::IPOIB => 242,
DataLink::MPEG_2_TS => 243,
DataLink::NG40 => 244,
DataLink::NFC_LLCP => 245,
DataLink::INFINIBAND => 247,
DataLink::SCTP => 248,
DataLink::USBPCAP => 249,
DataLink::RTAC_SERIAL => 250,
DataLink::BLUETOOTH_LE_LL => 251,
DataLink::NETLINK => 253,
DataLink::BLUETOOTH_LINUX_MONITOR => 254,
DataLink::BLUETOOTH_BREDR_BB => 255,
DataLink::BLUETOOTH_LE_LL_WITH_PHDR => 256,
DataLink::PROFIBUS_DL => 257,
DataLink::PKTAP => 258,
DataLink::EPON => 259,
DataLink::IPMI_HPM_2 => 260,
DataLink::ZWAVE_R1_R2 => 261,
DataLink::ZWAVE_R3 => 262,
DataLink::WATTSTOPPER_DLM => 263,
DataLink::ISO_14443 => 264,
DataLink::RDS => 265,
DataLink::USB_DARWIN => 266,
DataLink::SDLC => 268,
DataLink::Unknown(n) => n
}
}
} | 29.573359 | 115 | 0.536589 |
ab0b5f888ea0bb151e0e22bf0cbc35150d5ad809 | 8,946 | mod find;
mod node;
pub use self::find::Find;
use std::{cmp, ops::RangeInclusive};
use self::node::Node;
/// A self-balancing binary search tree optimized to hold interval-value pairs.
#[derive(Default)]
pub struct IntervalTree<K: Clone + Ord, V> {
root: Option<Box<Node<K, V>>>,
}
impl<K: Clone + Ord, V> IntervalTree<K, V> {
/// Creates an empty interval tree.
///
/// # Examples
///
/// ```
/// use interval_tree::IntervalTree;
/// let _tree: IntervalTree<u64, &str> = IntervalTree::new();
/// ```
pub fn new() -> Self {
Self { root: None }
}
/// Adds an interval-value pair into the tree.
///
/// Upon a collision, a new node is added as the left child of the existing node.
///
/// # Examples
///
/// ```
/// use interval_tree::IntervalTree;
///
/// let mut tree = IntervalTree::new();
///
/// tree.insert(2..=6, "elm");
/// tree.insert(7..=13, "ash");
/// tree.insert(7..=13, "walnut");
/// ```
pub fn insert(&mut self, key: RangeInclusive<K>, value: V) {
self.root = if let Some(root) = self.root.take() {
Some(insert(root, key, value))
} else {
Some(Box::new(Node::new(key, value)))
};
}
/// Returns an iterator visiting nodes that intersect the given key.
///
/// # Examples
///
/// ```
/// use interval_tree::IntervalTree;
///
/// let mut tree = IntervalTree::new();
///
/// tree.insert(2..=6, "elm");
/// tree.insert(7..=13, "ash");
/// tree.insert(3..=9, "walnut");
///
/// let mut iter = tree.find(8..=10);
///
/// let entry = iter.next().unwrap();
/// assert_eq!(entry.key(), &(3..=9));
/// assert_eq!(entry.get(), &"walnut");
///
/// let entry = iter.next().unwrap();
/// assert_eq!(entry.key(), &(7..=13));
/// assert_eq!(entry.get(), &"ash");
///
/// assert!(iter.next().is_none());
/// ```
pub fn find(&self, key: RangeInclusive<K>) -> Find<K, V> {
let nodes = self.root.iter().map::<&Node<K, V>, _>(|n| n).collect();
Find::new(nodes, key)
}
}
fn height<K: Clone + Ord, V>(root: &Option<Box<Node<K, V>>>) -> u32 {
root.as_ref().map_or(0, |n| n.height)
}
enum BalanceFactor {
LeftHeavy,
Balanced,
RightHeavy,
}
fn balance_factor<K: Clone + Ord, V>(root: &Node<K, V>) -> BalanceFactor {
let left_height = height(&root.left) as i32;
let right_height = height(&root.right) as i32;
if left_height > right_height && left_height - right_height >= 2 {
BalanceFactor::LeftHeavy
} else if left_height < right_height && right_height - left_height >= 2 {
BalanceFactor::RightHeavy
} else {
BalanceFactor::Balanced
}
}
fn update_height<K: Clone + Ord, V>(root: &mut Node<K, V>) {
let left_height = height(&root.left);
let right_height = height(&root.right);
root.height = cmp::max(left_height, right_height) + 1;
}
fn update_max<K: Clone + Ord, V>(root: &mut Node<K, V>) {
root.max = root.key.end().clone();
if let Some(ref left) = root.left {
if left.max > root.max {
root.max = left.max.clone();
}
}
if let Some(ref right) = root.right {
if right.max > root.max {
root.max = right.max.clone();
}
}
}
fn rotate_left<K: Clone + Ord, V>(mut root: Box<Node<K, V>>) -> Box<Node<K, V>> {
let mut new_root = root.right.take().expect("invalid tree");
root.right = new_root.left.take();
update_height(&mut root);
update_max(&mut root);
new_root.left = Some(root);
update_height(&mut new_root);
update_max(&mut new_root);
new_root
}
fn balance_left_heavy_tree<K: Clone + Ord, V>(mut root: Box<Node<K, V>>) -> Box<Node<K, V>> {
let left = root.left.take().expect("invalid tree");
if height(&left.left) < height(&left.right) {
let new_left = rotate_left(left);
root.left = Some(new_left);
update_height(&mut root);
update_max(&mut root);
} else {
root.left = Some(left);
}
rotate_right(root)
}
fn rotate_right<K: Clone + Ord, V>(mut root: Box<Node<K, V>>) -> Box<Node<K, V>> {
let mut new_root = root.left.take().expect("invalid tree");
root.left = new_root.right.take();
update_height(&mut root);
update_max(&mut root);
new_root.right = Some(root);
update_height(&mut new_root);
update_max(&mut new_root);
new_root
}
fn balance_right_heavy_tree<K: Clone + Ord, V>(mut root: Box<Node<K, V>>) -> Box<Node<K, V>> {
let right = root.right.take().expect("invalid tree");
if height(&right.left) > height(&right.right) {
let new_right = rotate_right(right);
root.right = Some(new_right);
update_height(&mut root);
update_max(&mut root);
} else {
root.right = Some(right);
}
rotate_left(root)
}
fn balance<K: Clone + Ord, V>(root: Box<Node<K, V>>) -> Box<Node<K, V>> {
match balance_factor(&root) {
BalanceFactor::LeftHeavy => balance_left_heavy_tree(root),
BalanceFactor::Balanced => root,
BalanceFactor::RightHeavy => balance_right_heavy_tree(root),
}
}
fn insert<K, V>(mut root: Box<Node<K, V>>, key: RangeInclusive<K>, value: V) -> Box<Node<K, V>>
where
K: Clone + Ord,
{
if key.start() <= root.key.start() {
root.left = if let Some(left) = root.left.take() {
Some(insert(left, key, value))
} else {
Some(Box::new(Node::new(key, value)))
}
} else if key.start() > root.key.start() {
root.right = if let Some(right) = root.right.take() {
Some(insert(right, key, value))
} else {
Some(Box::new(Node::new(key, value)))
}
}
update_height(&mut root);
update_max(&mut root);
balance(root)
}
#[cfg(test)]
mod tests {
use super::*;
fn build_tree() -> IntervalTree<i32, i32> {
// 15..18
// / \
// 5..8 17..19
// / \ / \
// 4..8 7..10 16..22 21..24
let mut tree = IntervalTree::new();
tree.insert(17..=19, 0);
tree.insert(5..=8, 1);
tree.insert(21..=24, 2);
tree.insert(4..=8, 3);
tree.insert(15..=18, 4);
tree.insert(7..=10, 5);
tree.insert(16..=22, 6);
tree
}
#[test]
fn test_insert() {
let tree = build_tree();
let root = tree.root.as_ref().unwrap();
assert_eq!(root.key, 15..=18);
assert_eq!(root.value, 4);
assert_eq!(root.max, 24);
assert_eq!(root.height, 3);
let node = root.left.as_ref().unwrap();
assert_eq!(node.key, 5..=8);
assert_eq!(node.value, 1);
assert_eq!(node.max, 10);
assert_eq!(node.height, 2);
let node = root
.left
.as_ref()
.and_then(|node| node.left.as_ref())
.unwrap();
assert_eq!(node.key, 4..=8);
assert_eq!(node.value, 3);
assert_eq!(node.max, 8);
assert_eq!(node.height, 1);
let node = root
.left
.as_ref()
.and_then(|node| node.right.as_ref())
.unwrap();
assert_eq!(node.key, 7..=10);
assert_eq!(node.value, 5);
assert_eq!(node.max, 10);
assert_eq!(node.height, 1);
let node = root.right.as_ref().unwrap();
assert_eq!(node.key, 17..=19);
assert_eq!(node.value, 0);
assert_eq!(node.max, 24);
assert_eq!(node.height, 2);
let node = root
.right
.as_ref()
.and_then(|node| node.left.as_ref())
.unwrap();
assert_eq!(node.key, 16..=22);
assert_eq!(node.value, 6);
assert_eq!(node.max, 22);
assert_eq!(node.height, 1);
let node = root
.right
.as_ref()
.and_then(|node| node.right.as_ref())
.unwrap();
assert_eq!(node.key, 21..=24);
assert_eq!(node.value, 2);
assert_eq!(node.max, 24);
assert_eq!(node.height, 1);
}
#[test]
fn test_find() {
let tree = build_tree();
let entries: Vec<_> = tree.find(7..=20).collect();
assert_eq!(entries.len(), 6);
assert_eq!(entries[0].key(), &(15..=18));
assert_eq!(entries[0].get(), &4);
assert_eq!(entries[1].key(), &(17..=19));
assert_eq!(entries[1].get(), &0);
assert_eq!(entries[2].key(), &(16..=22));
assert_eq!(entries[2].get(), &6);
assert_eq!(entries[3].key(), &(5..=8));
assert_eq!(entries[3].get(), &1);
assert_eq!(entries[4].key(), &(7..=10));
assert_eq!(entries[4].get(), &5);
assert_eq!(entries[5].key(), &(4..=8));
assert_eq!(entries[5].get(), &3);
}
}
| 27.109091 | 95 | 0.528504 |
0aa7b8eaf5e76187c2d480591d3602bf7aa7d1da | 21,552 | /*
This file is part of Curv library
Copyright 2018 by Kzen Networks
(https://github.com/KZen-networks/curv)
License MIT: <https://github.com/KZen-networks/curv/blob/master/LICENSE>
*/
pub const SECRET_KEY_SIZE: usize = 32;
pub const COMPRESSED_SIZE: usize = 96;
use std::fmt;
use std::fmt::Debug;
use std::ops::{Add, Mul, Neg};
use std::str;
use ff_zeroize::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};
use pairing_plus::bls12_381::Fr;
use pairing_plus::bls12_381::G2Compressed;
use pairing_plus::bls12_381::G2Uncompressed;
use pairing_plus::bls12_381::G2;
use pairing_plus::hash_to_curve::HashToCurve;
use pairing_plus::hash_to_field::ExpandMsgXmd;
use pairing_plus::serdes::SerDes;
use pairing_plus::EncodedPoint;
use pairing_plus::{CurveAffine, CurveProjective, Engine};
use sha2::Sha256;
use serde::de::{self, Error, MapAccess, SeqAccess, Visitor};
use serde::ser::SerializeStruct;
use serde::ser::{Serialize, Serializer};
use serde::{Deserialize, Deserializer};
pub type SK = <pairing_plus::bls12_381::Bls12 as ScalarEngine>::Fr;
pub type PK = <pairing_plus::bls12_381::Bls12 as Engine>::G2Affine;
use crate::arithmetic::traits::*;
use crate::BigInt;
use crate::ErrorKey::{self};
use std::ptr;
use std::sync::atomic;
use zeroize::Zeroize;
use crate::elliptic::curves::traits::ECPoint;
use crate::elliptic::curves::traits::ECScalar;
#[cfg(feature = "merkle")]
use crypto::digest::Digest;
#[cfg(feature = "merkle")]
use crypto::sha3::Sha3;
#[cfg(feature = "merkle")]
use merkle::Hashable;
use std::io::Cursor;
#[derive(Clone, Copy)]
pub struct FieldScalar {
purpose: &'static str,
fe: SK,
}
#[derive(Clone, Copy)]
pub struct G2Point {
purpose: &'static str,
ge: PK,
}
pub type GE = G2Point;
pub type FE = FieldScalar;
impl Zeroize for FieldScalar {
fn zeroize(&mut self) {
unsafe { ptr::write_volatile(self, FE::zero()) };
atomic::fence(atomic::Ordering::SeqCst);
atomic::compiler_fence(atomic::Ordering::SeqCst);
}
}
impl ECScalar for FieldScalar {
type SecretKey = SK;
fn new_random() -> FieldScalar {
let rnd_bn = BigInt::sample_below(&FE::q());
ECScalar::from(&rnd_bn)
}
fn zero() -> FieldScalar {
FieldScalar {
purpose: "zero",
fe: SK::default(),
}
}
fn get_element(&self) -> SK {
self.fe
}
fn set_element(&mut self, element: SK) {
self.fe = element
}
fn from(n: &BigInt) -> FieldScalar {
let n_mod = BigInt::modulus(n, &FE::q());
let mut v = BigInt::to_bytes(&n_mod);
let mut bytes_array: [u8; SECRET_KEY_SIZE];
if v.len() < SECRET_KEY_SIZE {
let mut template = vec![0; SECRET_KEY_SIZE - v.len()];
template.extend_from_slice(&v);
v = template;
}
bytes_array = [0; SECRET_KEY_SIZE];
let bytes = &v[..SECRET_KEY_SIZE];
bytes_array.copy_from_slice(&bytes);
// bytes_array.reverse();
let mut repr = SK::default().into_repr();
repr.read_be(Cursor::new(&bytes_array[..])).unwrap();
FieldScalar {
purpose: "from_big_int",
fe: Fr::from_repr(repr).unwrap(),
}
}
fn to_big_int(&self) -> BigInt {
let tmp = self.fe.into_repr();
let scalar_u64 = tmp.as_ref();
let to_bn = scalar_u64.iter().rev().fold(BigInt::zero(), |acc, x| {
let element_bn = BigInt::from(*x);
element_bn + (acc << 64)
});
to_bn
}
fn q() -> BigInt {
let q_u64: [u64; 4] = [
0xffffffff00000001,
0x53bda402fffe5bfe,
0x3339d80809a1d805,
0x73eda753299d7d48,
];
let to_bn = q_u64.iter().rev().fold(BigInt::zero(), |acc, x| {
let element_bn = BigInt::from(*x);
element_bn + (acc << 64)
});
to_bn
}
fn add(&self, other: &SK) -> FieldScalar {
let mut add_fe = FieldScalar {
purpose: "other add",
fe: *other,
};
add_fe.fe.add_assign(&self.fe);
FieldScalar {
purpose: "add",
fe: add_fe.fe,
}
}
fn mul(&self, other: &SK) -> FieldScalar {
let mut mul_fe = FieldScalar {
purpose: "other mul",
fe: *other,
};
mul_fe.fe.mul_assign(&self.fe);
FieldScalar {
purpose: "mul",
fe: mul_fe.fe,
}
}
fn sub(&self, other: &SK) -> FieldScalar {
let mut other_neg = *other;
other_neg.negate();
let sub_fe = FieldScalar {
purpose: "other sub",
fe: other_neg,
};
self.add(&sub_fe.get_element())
}
fn invert(&self) -> FieldScalar {
let sc = self.fe;
let inv_sc = sc.inverse().unwrap(); //TODO
FieldScalar {
purpose: "inverse",
fe: inv_sc,
}
}
}
impl Debug for FieldScalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Point {{ purpose: {:?}, bytes: {:?} }}",
self.purpose, self.fe,
)
}
}
impl PartialEq for FieldScalar {
fn eq(&self, other: &FieldScalar) -> bool {
self.get_element() == other.get_element()
}
}
impl Mul<FieldScalar> for FieldScalar {
type Output = FieldScalar;
fn mul(self, other: FieldScalar) -> FieldScalar {
(&self).mul(&other.get_element())
}
}
impl<'o> Mul<&'o FieldScalar> for FieldScalar {
type Output = FieldScalar;
fn mul(self, other: &'o FieldScalar) -> FieldScalar {
(&self).mul(&other.get_element())
}
}
impl Add<FieldScalar> for FieldScalar {
type Output = FieldScalar;
fn add(self, other: FieldScalar) -> FieldScalar {
(&self).add(&other.get_element())
}
}
impl<'o> Add<&'o FieldScalar> for FieldScalar {
type Output = FieldScalar;
fn add(self, other: &'o FieldScalar) -> FieldScalar {
(&self).add(&other.get_element())
}
}
impl Serialize for FieldScalar {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_big_int().to_hex())
}
}
impl<'de> Deserialize<'de> for FieldScalar {
fn deserialize<D>(deserializer: D) -> Result<FieldScalar, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_str(BLS12_381ScalarVisitor)
}
}
struct BLS12_381ScalarVisitor;
impl<'de> Visitor<'de> for BLS12_381ScalarVisitor {
type Value = FieldScalar;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("bls12_381")
}
fn visit_str<E: de::Error>(self, s: &str) -> Result<FieldScalar, E> {
let v = BigInt::from_hex(s).map_err(E::custom)?;
Ok(ECScalar::from(&v))
}
}
impl Debug for G2Point {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Point {{ purpose: {:?}, bytes: {:?} }}",
self.purpose,
self.bytes_compressed_to_big_int().to_hex()
)
}
}
impl PartialEq for G2Point {
fn eq(&self, other: &G2Point) -> bool {
self.get_element() == other.get_element()
}
}
impl Zeroize for G2Point {
fn zeroize(&mut self) {
unsafe { ptr::write_volatile(self, GE::generator()) };
atomic::fence(atomic::Ordering::SeqCst);
atomic::compiler_fence(atomic::Ordering::SeqCst);
}
}
impl ECPoint for G2Point {
type SecretKey = SK;
type PublicKey = PK;
type Scalar = FieldScalar;
fn base_point2() -> G2Point {
const BASE_POINT2: [u8; 192] = [
0, 204, 165, 72, 21, 96, 36, 119, 117, 242, 58, 55, 105, 140, 136, 76, 180, 140, 92,
212, 55, 3, 146, 72, 120, 181, 37, 205, 165, 221, 144, 86, 57, 124, 16, 19, 160, 215,
21, 251, 236, 99, 91, 147, 237, 113, 223, 70, 14, 223, 81, 150, 157, 235, 107, 225,
151, 227, 119, 53, 195, 46, 25, 54, 57, 158, 156, 122, 75, 152, 119, 51, 137, 131, 43,
34, 68, 24, 24, 210, 18, 75, 36, 20, 232, 76, 38, 84, 44, 112, 213, 217, 192, 122, 177,
186, 5, 113, 25, 229, 205, 55, 65, 191, 147, 1, 212, 194, 151, 141, 43, 223, 68, 185,
183, 66, 163, 62, 96, 92, 36, 209, 216, 40, 16, 132, 231, 104, 179, 248, 189, 53, 154,
106, 83, 159, 5, 54, 86, 87, 45, 68, 52, 247, 3, 90, 148, 187, 234, 213, 114, 244, 52,
137, 201, 13, 165, 57, 217, 190, 150, 103, 223, 193, 129, 198, 47, 86, 122, 196, 22,
200, 123, 89, 178, 216, 11, 238, 155, 106, 172, 125, 164, 95, 2, 136, 132, 137, 27,
184, 237, 169,
];
let mut point = G2Uncompressed::empty();
point.as_mut().copy_from_slice(&BASE_POINT2);
G2Point {
purpose: "base_ge2",
ge: point.into_affine().expect("invalid base_point"),
}
}
fn generator() -> G2Point {
G2Point {
purpose: "base_fe",
ge: PK::one(),
}
}
fn get_element(&self) -> PK {
self.ge
}
fn x_coor(&self) -> Option<BigInt> {
let tmp = G2Uncompressed::from_affine(self.ge);
let bytes = tmp.as_ref();
let x_coor = &bytes[0..COMPRESSED_SIZE];
let bn = BigInt::from_bytes(x_coor);
Some(bn)
}
fn y_coor(&self) -> Option<BigInt> {
let tmp = G2Uncompressed::from_affine(self.ge);
let bytes = tmp.as_ref();
let y_coor = &bytes[COMPRESSED_SIZE..2 * COMPRESSED_SIZE];
let bn = BigInt::from_bytes(y_coor);
Some(bn)
}
fn bytes_compressed_to_big_int(&self) -> BigInt {
let tmp = G2Compressed::from_affine(self.ge);
let bytes = tmp.as_ref();
BigInt::from_bytes(bytes)
}
fn from_bytes(bytes: &[u8]) -> Result<G2Point, ErrorKey> {
let mut bytes_array_comp = [0u8; COMPRESSED_SIZE];
match bytes.len() {
0..=COMPRESSED_SIZE => {
(&mut bytes_array_comp[COMPRESSED_SIZE - bytes.len()..]).copy_from_slice(bytes);
}
_ => {
bytes_array_comp.copy_from_slice(&bytes[..COMPRESSED_SIZE]);
}
}
let g2_comp = G2::deserialize(&mut bytes_array_comp[..].as_ref(), true).unwrap();
let pk = G2Point {
purpose: "from_bytes",
ge: g2_comp.into_affine(), //TODO: handle error
};
Ok(pk)
}
// in this case the opposite of from_bytes: takes compressed pk to COMPRESSED_SIZE bytes.
fn pk_to_key_slice(&self) -> Vec<u8> {
let mut compressed_vec = vec![];
PK::serialize(&self.ge, &mut compressed_vec, true)
.expect("serializing into vec should always succeed");
compressed_vec
}
fn scalar_mul(&self, fe: &SK) -> G2Point {
let mut ge_proj: G2 = self.ge.into();
ge_proj.mul_assign(fe.into_repr());
G2Point {
purpose: "scalar_point_mul",
ge: ge_proj.into_affine(),
}
}
fn add_point(&self, other: &PK) -> G2Point {
let mut ge_proj: G2 = self.ge.into();
ge_proj.add_assign_mixed(other);
G2Point {
purpose: "combine",
ge: ge_proj.into_affine(),
}
}
fn sub_point(&self, other: &PK) -> G2Point {
let mut ge_proj: G2 = self.ge.into();
ge_proj.sub_assign_mixed(other);
G2Point {
purpose: "sub",
ge: ge_proj.into_affine(),
}
}
fn from_coor(_x: &BigInt, _y: &BigInt) -> G2Point {
// TODO
unimplemented!();
}
}
impl From<pairing_plus::bls12_381::G2Affine> for G2Point {
fn from(point: PK) -> Self {
G2Point {
purpose: "from_point",
ge: point,
}
}
}
impl Mul<FieldScalar> for G2Point {
type Output = G2Point;
fn mul(self, other: FieldScalar) -> G2Point {
self.scalar_mul(&other.get_element())
}
}
impl<'o> Mul<&'o FieldScalar> for G2Point {
type Output = G2Point;
fn mul(self, other: &'o FieldScalar) -> G2Point {
self.scalar_mul(&other.get_element())
}
}
impl<'o> Mul<&'o FieldScalar> for &'o G2Point {
type Output = G2Point;
fn mul(self, other: &'o FieldScalar) -> G2Point {
self.scalar_mul(&other.get_element())
}
}
impl Add<G2Point> for G2Point {
type Output = G2Point;
fn add(self, other: G2Point) -> G2Point {
self.add_point(&other.get_element())
}
}
impl<'o> Add<&'o G2Point> for G2Point {
type Output = G2Point;
fn add(self, other: &'o G2Point) -> G2Point {
self.add_point(&other.get_element())
}
}
impl<'o> Add<&'o G2Point> for &'o G2Point {
type Output = G2Point;
fn add(self, other: &'o G2Point) -> G2Point {
self.add_point(&other.get_element())
}
}
impl Neg for G2Point {
type Output = Self;
fn neg(mut self) -> Self {
self.ge.negate();
self.purpose = "negated";
self
}
}
#[cfg(feature = "merkle")]
impl Hashable for G2Point {
fn update_context(&self, context: &mut Sha3) {
let bytes: Vec<u8> = self.pk_to_key_slice();
context.input(&bytes[..]);
}
}
impl Serialize for G2Point {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let bytes = self.pk_to_key_slice();
let bytes_as_bn = BigInt::from_bytes(&bytes[..]);
let mut state = serializer.serialize_struct("Bls12381G2Point", 1)?;
state.serialize_field("bytes_str", &bytes_as_bn.to_hex())?;
state.end()
}
}
impl<'de> Deserialize<'de> for G2Point {
fn deserialize<D>(deserializer: D) -> Result<G2Point, D::Error>
where
D: Deserializer<'de>,
{
const FIELDS: &[&str] = &["bytes_str"];
deserializer.deserialize_struct("Bls12381G2Point", FIELDS, Bls12381G2PointVisitor)
}
}
struct Bls12381G2PointVisitor;
impl<'de> Visitor<'de> for Bls12381G2PointVisitor {
type Value = G2Point;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("Bls12381G2Point")
}
fn visit_seq<V>(self, mut seq: V) -> Result<G2Point, V::Error>
where
V: SeqAccess<'de>,
{
let bytes_str = seq
.next_element()?
.ok_or_else(|| V::Error::invalid_length(0, &"a single element"))?;
let bytes_bn = BigInt::from_hex(bytes_str).map_err(V::Error::custom)?;
let bytes = BigInt::to_bytes(&bytes_bn);
G2Point::from_bytes(&bytes[..]).map_err(|_| V::Error::custom("failed to parse g2 point"))
}
fn visit_map<E: MapAccess<'de>>(self, mut map: E) -> Result<G2Point, E::Error> {
let mut bytes_str: String = "".to_string();
while let Some(key) = map.next_key::<&'de str>()? {
let v = map.next_value::<&'de str>()?;
match key {
"bytes_str" => {
bytes_str = String::from(v);
}
_ => return Err(E::Error::unknown_field(key, &["bytes_str"])),
}
}
let bytes_bn = BigInt::from_hex(&bytes_str).map_err(E::Error::custom)?;
let bytes = BigInt::to_bytes(&bytes_bn);
G2Point::from_bytes(&bytes[..]).map_err(|_| E::Error::custom("failed to parse g2 point"))
}
}
impl G2Point {
/// Converts message to G1 point.
///
/// Uses [expand_message_xmd][xmd] based on sha256.
///
/// [xmd]: https://www.ietf.org/id/draft-irtf-cfrg-hash-to-curve-10.html#name-expand_message_xmd-2
pub fn hash_to_curve(message: &[u8]) -> Self {
let cs = &[1u8];
let point = <G2 as HashToCurve<ExpandMsgXmd<Sha256>>>::hash_to_curve(message, cs);
G2Point {
purpose: "hash_to_curve",
ge: point.into_affine(),
}
}
}
#[cfg(test)]
mod tests {
use pairing_plus::bls12_381::{G2Uncompressed, G2};
use pairing_plus::hash_to_curve::HashToCurve;
use pairing_plus::hash_to_field::ExpandMsgXmd;
use pairing_plus::{CurveProjective, SubgroupCheck};
use sha2::Sha256;
use super::G2Point;
use crate::arithmetic::traits::*;
use crate::elliptic::curves::bls12_381::g2::{FE, GE};
use crate::elliptic::curves::traits::ECPoint;
use crate::elliptic::curves::traits::ECScalar;
use crate::BigInt;
#[test]
fn test_serdes_pk() {
let pk = GE::generator();
let s = serde_json::to_string(&pk).expect("Failed in serialization");
let des_pk: GE = serde_json::from_str(&s).expect("Failed in deserialization");
assert_eq!(des_pk, pk);
let pk = GE::base_point2();
let s = serde_json::to_string(&pk).expect("Failed in serialization");
let des_pk: GE = serde_json::from_str(&s).expect("Failed in deserialization");
assert_eq!(des_pk, pk);
}
#[test]
fn bincode_pk() {
let pk = GE::generator();
let bin = bincode::serialize(&pk).unwrap();
let decoded: G2Point = bincode::deserialize(bin.as_slice()).unwrap();
assert_eq!(decoded, pk);
}
#[test]
#[should_panic]
#[allow(clippy::op_ref)] // Enables type inference.
fn test_serdes_bad_pk() {
let pk = GE::generator();
let s = serde_json::to_string(&pk).expect("Failed in serialization");
// we make sure that the string encodes invalid point:
let s: String = s.replace("30", "20");
let des_pk: GE = serde_json::from_str(&s).expect("Failed in deserialization");
let eight = ECScalar::from(&BigInt::from(8));
assert_eq!(des_pk, pk * &eight);
}
#[test]
fn test_from_mpz() {
let rand_scalar: FE = ECScalar::new_random();
let rand_bn = rand_scalar.to_big_int();
let rand_scalar2: FE = ECScalar::from(&rand_bn);
assert_eq!(rand_scalar, rand_scalar2);
}
#[test]
fn test_minus_point() {
let a: FE = ECScalar::new_random();
let b: FE = ECScalar::new_random();
let a_minus_b_fe: FE = a.sub(&b.get_element());
let base: GE = ECPoint::generator();
let point_ab1 = base * a_minus_b_fe;
let point_a = base * a;
let point_b = base * b;
let point_ab2 = point_a.sub_point(&point_b.get_element());
println!(
"point ab1: {:?}",
point_ab1.bytes_compressed_to_big_int().to_hex()
);
println!(
"point ab2: {:?}",
point_ab2.bytes_compressed_to_big_int().to_hex()
);
assert_eq!(point_ab1, point_ab2);
}
#[test]
fn test_add_point() {
let a: FE = ECScalar::new_random();
let b: FE = ECScalar::new_random();
let a_plus_b_fe = a + b;
let base: GE = ECPoint::generator();
let point_ab1 = base * a_plus_b_fe;
let point_a = base * a;
let point_b = base * b;
let point_ab2 = point_a.add_point(&point_b.get_element());
assert_eq!(point_ab1, point_ab2);
}
#[test]
fn test_add_scalar() {
let a: FE = ECScalar::new_random();
let zero: FE = FE::zero();
let a_plus_zero: FE = a + zero;
assert_eq!(a_plus_zero, a);
}
#[test]
fn test_mul_scalar() {
let a = [
10, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 10, 10, 10,
];
let a_bn = BigInt::from_bytes(&a[..]);
let a_fe: FE = ECScalar::from(&a_bn);
let five = BigInt::from(5);
let five_fe: FE = ECScalar::from(&five);
println!("five_fe: {:?}", five_fe.clone());
let five_a_bn = BigInt::mod_mul(&a_bn, &five, &FE::q());
let five_a_fe = five_fe * a_fe;
let five_a_fe_2: FE = ECScalar::from(&five_a_bn);
assert_eq!(five_a_fe, five_a_fe_2);
}
#[test]
fn test_mul_point() {
let a: FE = ECScalar::new_random();
let b: FE = ECScalar::new_random();
let a_mul_b_fe = a * b;
let base: GE = ECPoint::generator();
let point_ab1 = base * a_mul_b_fe;
let point_a = base * a;
let point_ab2 = point_a.scalar_mul(&b.get_element());
assert_eq!(point_ab1, point_ab2);
}
#[test]
fn test_invert() {
let a: FE = ECScalar::new_random();
let a_bn = a.to_big_int();
let a_inv = a.invert();
let a_inv_bn_1 = BigInt::mod_inv(&a_bn, &FE::q()).unwrap();
let a_inv_bn_2 = a_inv.to_big_int();
assert_eq!(a_inv_bn_1, a_inv_bn_2);
}
#[test]
fn test_scalar_mul_multiply_by_1() {
let g: GE = ECPoint::generator();
let fe: FE = ECScalar::from(&BigInt::from(1));
let b_tag = g * fe;
assert_eq!(b_tag, g);
}
#[test]
fn base_point2_nothing_up_my_sleeve() {
// Generate base_point2
let cs = &[1u8];
let msg = &[1u8];
let point = <G2 as HashToCurve<ExpandMsgXmd<Sha256>>>::hash_to_curve(msg, cs).into_affine();
assert!(point.in_subgroup());
// Print in uncompressed form
use pairing_plus::EncodedPoint;
let point_uncompressed = G2Uncompressed::from_affine(point);
println!("Uncompressed base_point2: {:?}", point_uncompressed);
// Check that ECPoint::base_point2() returns generated point
let base_point2: GE = ECPoint::base_point2();
assert_eq!(point, base_point2.ge);
}
}
| 29.282609 | 102 | 0.569785 |
507edcb7def8ca3cb8fcc629fb0ba0b260bbb895 | 45,604 | //! This module implements match statement exhaustiveness checking and usefulness checking
//! for match arms.
//!
//! It is modeled on the rustc module `librustc_mir_build::hair::pattern::_match`, which
//! contains very detailed documentation about the algorithms used here. I've duplicated
//! most of that documentation below.
//!
//! This file includes the logic for exhaustiveness and usefulness checking for
//! pattern-matching. Specifically, given a list of patterns for a type, we can
//! tell whether:
//! - (a) the patterns cover every possible constructor for the type (exhaustiveness).
//! - (b) each pattern is necessary (usefulness).
//!
//! The algorithm implemented here is a modified version of the one described in
//! <http://moscova.inria.fr/~maranget/papers/warn/index.html>.
//! However, to save future implementors from reading the original paper, we
//! summarise the algorithm here to hopefully save time and be a little clearer
//! (without being so rigorous).
//!
//! The core of the algorithm revolves about a "usefulness" check. In particular, we
//! are trying to compute a predicate `U(P, p)` where `P` is a list of patterns (we refer to this as
//! a matrix). `U(P, p)` represents whether, given an existing list of patterns
//! `P_1 ..= P_m`, adding a new pattern `p` will be "useful" (that is, cover previously-
//! uncovered values of the type).
//!
//! If we have this predicate, then we can easily compute both exhaustiveness of an
//! entire set of patterns and the individual usefulness of each one.
//! (a) the set of patterns is exhaustive iff `U(P, _)` is false (i.e., adding a wildcard
//! match doesn't increase the number of values we're matching)
//! (b) a pattern `P_i` is not useful if `U(P[0..=(i-1), P_i)` is false (i.e., adding a
//! pattern to those that have come before it doesn't increase the number of values
//! we're matching).
//!
//! During the course of the algorithm, the rows of the matrix won't just be individual patterns,
//! but rather partially-deconstructed patterns in the form of a list of patterns. The paper
//! calls those pattern-vectors, and we will call them pattern-stacks. The same holds for the
//! new pattern `p`.
//!
//! For example, say we have the following:
//!
//! ```ignore
//! // x: (Option<bool>, Result<()>)
//! match x {
//! (Some(true), _) => (),
//! (None, Err(())) => (),
//! (None, Err(_)) => (),
//! }
//! ```
//!
//! Here, the matrix `P` starts as:
//!
//! ```text
//! [
//! [(Some(true), _)],
//! [(None, Err(()))],
//! [(None, Err(_))],
//! ]
//! ```
//!
//! We can tell it's not exhaustive, because `U(P, _)` is true (we're not covering
//! `[(Some(false), _)]`, for instance). In addition, row 3 is not useful, because
//! all the values it covers are already covered by row 2.
//!
//! A list of patterns can be thought of as a stack, because we are mainly interested in the top of
//! the stack at any given point, and we can pop or apply constructors to get new pattern-stacks.
//! To match the paper, the top of the stack is at the beginning / on the left.
//!
//! There are two important operations on pattern-stacks necessary to understand the algorithm:
//!
//! 1. We can pop a given constructor off the top of a stack. This operation is called
//! `specialize`, and is denoted `S(c, p)` where `c` is a constructor (like `Some` or
//! `None`) and `p` a pattern-stack.
//! If the pattern on top of the stack can cover `c`, this removes the constructor and
//! pushes its arguments onto the stack. It also expands OR-patterns into distinct patterns.
//! Otherwise the pattern-stack is discarded.
//! This essentially filters those pattern-stacks whose top covers the constructor `c` and
//! discards the others.
//!
//! For example, the first pattern above initially gives a stack `[(Some(true), _)]`. If we
//! pop the tuple constructor, we are left with `[Some(true), _]`, and if we then pop the
//! `Some` constructor we get `[true, _]`. If we had popped `None` instead, we would get
//! nothing back.
//!
//! This returns zero or more new pattern-stacks, as follows. We look at the pattern `p_1`
//! on top of the stack, and we have four cases:
//!
//! * 1.1. `p_1 = c(r_1, .., r_a)`, i.e. the top of the stack has constructor `c`. We push onto
//! the stack the arguments of this constructor, and return the result:
//!
//! r_1, .., r_a, p_2, .., p_n
//!
//! * 1.2. `p_1 = c'(r_1, .., r_a')` where `c ≠ c'`. We discard the current stack and return
//! nothing.
//! * 1.3. `p_1 = _`. We push onto the stack as many wildcards as the constructor `c` has
//! arguments (its arity), and return the resulting stack:
//!
//! _, .., _, p_2, .., p_n
//!
//! * 1.4. `p_1 = r_1 | r_2`. We expand the OR-pattern and then recurse on each resulting stack:
//!
//! S(c, (r_1, p_2, .., p_n))
//! S(c, (r_2, p_2, .., p_n))
//!
//! 2. We can pop a wildcard off the top of the stack. This is called `D(p)`, where `p` is
//! a pattern-stack.
//! This is used when we know there are missing constructor cases, but there might be
//! existing wildcard patterns, so to check the usefulness of the matrix, we have to check
//! all its *other* components.
//!
//! It is computed as follows. We look at the pattern `p_1` on top of the stack,
//! and we have three cases:
//! * 1.1. `p_1 = c(r_1, .., r_a)`. We discard the current stack and return nothing.
//! * 1.2. `p_1 = _`. We return the rest of the stack:
//!
//! p_2, .., p_n
//!
//! * 1.3. `p_1 = r_1 | r_2`. We expand the OR-pattern and then recurse on each resulting stack:
//!
//! D((r_1, p_2, .., p_n))
//! D((r_2, p_2, .., p_n))
//!
//! Note that the OR-patterns are not always used directly in Rust, but are used to derive the
//! exhaustive integer matching rules, so they're written here for posterity.
//!
//! Both those operations extend straightforwardly to a list or pattern-stacks, i.e. a matrix, by
//! working row-by-row. Popping a constructor ends up keeping only the matrix rows that start with
//! the given constructor, and popping a wildcard keeps those rows that start with a wildcard.
//!
//!
//! The algorithm for computing `U`
//! -------------------------------
//! The algorithm is inductive (on the number of columns: i.e., components of tuple patterns).
//! That means we're going to check the components from left-to-right, so the algorithm
//! operates principally on the first component of the matrix and new pattern-stack `p`.
//! This algorithm is realised in the `is_useful` function.
//!
//! Base case (`n = 0`, i.e., an empty tuple pattern):
//! - If `P` already contains an empty pattern (i.e., if the number of patterns `m > 0`), then
//! `U(P, p)` is false.
//! - Otherwise, `P` must be empty, so `U(P, p)` is true.
//!
//! Inductive step (`n > 0`, i.e., whether there's at least one column [which may then be expanded
//! into further columns later]). We're going to match on the top of the new pattern-stack, `p_1`:
//!
//! - If `p_1 == c(r_1, .., r_a)`, i.e. we have a constructor pattern.
//! Then, the usefulness of `p_1` can be reduced to whether it is useful when
//! we ignore all the patterns in the first column of `P` that involve other constructors.
//! This is where `S(c, P)` comes in:
//!
//! ```text
//! U(P, p) := U(S(c, P), S(c, p))
//! ```
//!
//! This special case is handled in `is_useful_specialized`.
//!
//! For example, if `P` is:
//!
//! ```text
//! [
//! [Some(true), _],
//! [None, 0],
//! ]
//! ```
//!
//! and `p` is `[Some(false), 0]`, then we don't care about row 2 since we know `p` only
//! matches values that row 2 doesn't. For row 1 however, we need to dig into the
//! arguments of `Some` to know whether some new value is covered. So we compute
//! `U([[true, _]], [false, 0])`.
//!
//! - If `p_1 == _`, then we look at the list of constructors that appear in the first component of
//! the rows of `P`:
//! - If there are some constructors that aren't present, then we might think that the
//! wildcard `_` is useful, since it covers those constructors that weren't covered
//! before.
//! That's almost correct, but only works if there were no wildcards in those first
//! components. So we need to check that `p` is useful with respect to the rows that
//! start with a wildcard, if there are any. This is where `D` comes in:
//! `U(P, p) := U(D(P), D(p))`
//!
//! For example, if `P` is:
//! ```text
//! [
//! [_, true, _],
//! [None, false, 1],
//! ]
//! ```
//! and `p` is `[_, false, _]`, the `Some` constructor doesn't appear in `P`. So if we
//! only had row 2, we'd know that `p` is useful. However row 1 starts with a
//! wildcard, so we need to check whether `U([[true, _]], [false, 1])`.
//!
//! - Otherwise, all possible constructors (for the relevant type) are present. In this
//! case we must check whether the wildcard pattern covers any unmatched value. For
//! that, we can think of the `_` pattern as a big OR-pattern that covers all
//! possible constructors. For `Option`, that would mean `_ = None | Some(_)` for
//! example. The wildcard pattern is useful in this case if it is useful when
//! specialized to one of the possible constructors. So we compute:
//! `U(P, p) := ∃(k ϵ constructors) U(S(k, P), S(k, p))`
//!
//! For example, if `P` is:
//! ```text
//! [
//! [Some(true), _],
//! [None, false],
//! ]
//! ```
//! and `p` is `[_, false]`, both `None` and `Some` constructors appear in the first
//! components of `P`. We will therefore try popping both constructors in turn: we
//! compute `U([[true, _]], [_, false])` for the `Some` constructor, and `U([[false]],
//! [false])` for the `None` constructor. The first case returns true, so we know that
//! `p` is useful for `P`. Indeed, it matches `[Some(false), _]` that wasn't matched
//! before.
//!
//! - If `p_1 == r_1 | r_2`, then the usefulness depends on each `r_i` separately:
//!
//! ```text
//! U(P, p) := U(P, (r_1, p_2, .., p_n))
//! || U(P, (r_2, p_2, .., p_n))
//! ```
use std::sync::Arc;
use hir_def::{
adt::VariantData,
body::Body,
expr::{Expr, Literal, Pat, PatId},
AdtId, EnumVariantId, VariantId,
};
use ra_arena::Idx;
use smallvec::{smallvec, SmallVec};
use crate::{db::HirDatabase, ApplicationTy, InferenceResult, Ty, TypeCtor};
#[derive(Debug, Clone, Copy)]
/// Either a pattern from the source code being analyzed, represented as
/// as `PatId`, or a `Wild` pattern which is created as an intermediate
/// step in the match checking algorithm and thus is not backed by a
/// real `PatId`.
///
/// Note that it is totally valid for the `PatId` variant to contain
/// a `PatId` which resolves to a `Wild` pattern, if that wild pattern
/// exists in the source code being analyzed.
enum PatIdOrWild {
PatId(PatId),
Wild,
}
impl PatIdOrWild {
fn as_pat(self, cx: &MatchCheckCtx) -> Pat {
match self {
PatIdOrWild::PatId(id) => cx.body.pats[id].clone(),
PatIdOrWild::Wild => Pat::Wild,
}
}
fn as_id(self) -> Option<PatId> {
match self {
PatIdOrWild::PatId(id) => Some(id),
PatIdOrWild::Wild => None,
}
}
}
impl From<PatId> for PatIdOrWild {
fn from(pat_id: PatId) -> Self {
Self::PatId(pat_id)
}
}
impl From<&PatId> for PatIdOrWild {
fn from(pat_id: &PatId) -> Self {
Self::PatId(*pat_id)
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(super) enum MatchCheckErr {
NotImplemented,
MalformedMatchArm,
/// Used when type inference cannot resolve the type of
/// a pattern or expression.
Unknown,
}
/// The return type of `is_useful` is either an indication of usefulness
/// of the match arm, or an error in the case the match statement
/// is made up of types for which exhaustiveness checking is currently
/// not completely implemented.
///
/// The `std::result::Result` type is used here rather than a custom enum
/// to allow the use of `?`.
pub(super) type MatchCheckResult<T> = Result<T, MatchCheckErr>;
#[derive(Debug)]
/// A row in a Matrix.
///
/// This type is modeled from the struct of the same name in `rustc`.
pub(super) struct PatStack(PatStackInner);
type PatStackInner = SmallVec<[PatIdOrWild; 2]>;
impl PatStack {
pub(super) fn from_pattern(pat_id: PatId) -> PatStack {
Self(smallvec!(pat_id.into()))
}
pub(super) fn from_wild() -> PatStack {
Self(smallvec!(PatIdOrWild::Wild))
}
fn from_slice(slice: &[PatIdOrWild]) -> PatStack {
Self(SmallVec::from_slice(slice))
}
fn from_vec(v: PatStackInner) -> PatStack {
Self(v)
}
fn get_head(&self) -> Option<PatIdOrWild> {
self.0.first().copied()
}
fn tail(&self) -> &[PatIdOrWild] {
self.0.get(1..).unwrap_or(&[])
}
fn to_tail(&self) -> PatStack {
Self::from_slice(self.tail())
}
fn replace_head_with<I, T>(&self, pats: I) -> PatStack
where
I: Iterator<Item = T>,
T: Into<PatIdOrWild>,
{
let mut patterns: PatStackInner = smallvec![];
for pat in pats {
patterns.push(pat.into());
}
for pat in &self.0[1..] {
patterns.push(*pat);
}
PatStack::from_vec(patterns)
}
/// Computes `D(self)`.
///
/// See the module docs and the associated documentation in rustc for details.
fn specialize_wildcard(&self, cx: &MatchCheckCtx) -> Option<PatStack> {
if matches!(self.get_head()?.as_pat(cx), Pat::Wild) {
Some(self.to_tail())
} else {
None
}
}
/// Computes `S(constructor, self)`.
///
/// See the module docs and the associated documentation in rustc for details.
fn specialize_constructor(
&self,
cx: &MatchCheckCtx,
constructor: &Constructor,
) -> MatchCheckResult<Option<PatStack>> {
let head = match self.get_head() {
Some(head) => head,
None => return Ok(None),
};
let head_pat = head.as_pat(cx);
let result = match (head_pat, constructor) {
(Pat::Tuple { args: ref pat_ids, ellipsis }, Constructor::Tuple { arity: _ }) => {
if ellipsis.is_some() {
// If there are ellipsis here, we should add the correct number of
// Pat::Wild patterns to `pat_ids`. We should be able to use the
// constructors arity for this, but at the time of writing we aren't
// correctly calculating this arity when ellipsis are present.
return Err(MatchCheckErr::NotImplemented);
}
Some(self.replace_head_with(pat_ids.iter()))
}
(Pat::Lit(lit_expr), Constructor::Bool(constructor_val)) => {
match cx.body.exprs[lit_expr] {
Expr::Literal(Literal::Bool(pat_val)) if *constructor_val == pat_val => {
Some(self.to_tail())
}
// it was a bool but the value doesn't match
Expr::Literal(Literal::Bool(_)) => None,
// perhaps this is actually unreachable given we have
// already checked that these match arms have the appropriate type?
_ => return Err(MatchCheckErr::NotImplemented),
}
}
(Pat::Wild, constructor) => Some(self.expand_wildcard(cx, constructor)?),
(Pat::Path(_), Constructor::Enum(constructor)) => {
// unit enum variants become `Pat::Path`
let pat_id = head.as_id().expect("we know this isn't a wild");
if !enum_variant_matches(cx, pat_id, *constructor) {
None
} else {
Some(self.to_tail())
}
}
(
Pat::TupleStruct { args: ref pat_ids, ellipsis, .. },
Constructor::Enum(enum_constructor),
) => {
let pat_id = head.as_id().expect("we know this isn't a wild");
if !enum_variant_matches(cx, pat_id, *enum_constructor) {
None
} else {
let constructor_arity = constructor.arity(cx)?;
if let Some(ellipsis_position) = ellipsis {
// If there are ellipsis in the pattern, the ellipsis must take the place
// of at least one sub-pattern, so `pat_ids` should be smaller than the
// constructor arity.
if pat_ids.len() < constructor_arity {
let mut new_patterns: Vec<PatIdOrWild> = vec![];
for pat_id in &pat_ids[0..ellipsis_position] {
new_patterns.push((*pat_id).into());
}
for _ in 0..(constructor_arity - pat_ids.len()) {
new_patterns.push(PatIdOrWild::Wild);
}
for pat_id in &pat_ids[ellipsis_position..pat_ids.len()] {
new_patterns.push((*pat_id).into());
}
Some(self.replace_head_with(new_patterns.into_iter()))
} else {
return Err(MatchCheckErr::MalformedMatchArm);
}
} else {
// If there is no ellipsis in the tuple pattern, the number
// of patterns must equal the constructor arity.
if pat_ids.len() == constructor_arity {
Some(self.replace_head_with(pat_ids.into_iter()))
} else {
return Err(MatchCheckErr::MalformedMatchArm);
}
}
}
}
(Pat::Record { args: ref arg_patterns, .. }, Constructor::Enum(e)) => {
let pat_id = head.as_id().expect("we know this isn't a wild");
if !enum_variant_matches(cx, pat_id, *e) {
None
} else {
match cx.db.enum_data(e.parent).variants[e.local_id].variant_data.as_ref() {
VariantData::Record(struct_field_arena) => {
// Here we treat any missing fields in the record as the wild pattern, as
// if the record has ellipsis. We want to do this here even if the
// record does not contain ellipsis, because it allows us to continue
// enforcing exhaustiveness for the rest of the match statement.
//
// Creating the diagnostic for the missing field in the pattern
// should be done in a different diagnostic.
let patterns = struct_field_arena.iter().map(|(_, struct_field)| {
arg_patterns
.iter()
.find(|pat| pat.name == struct_field.name)
.map(|pat| PatIdOrWild::from(pat.pat))
.unwrap_or(PatIdOrWild::Wild)
});
Some(self.replace_head_with(patterns))
}
_ => return Err(MatchCheckErr::Unknown),
}
}
}
(Pat::Or(_), _) => return Err(MatchCheckErr::NotImplemented),
(_, _) => return Err(MatchCheckErr::NotImplemented),
};
Ok(result)
}
/// A special case of `specialize_constructor` where the head of the pattern stack
/// is a Wild pattern.
///
/// Replaces the Wild pattern at the head of the pattern stack with N Wild patterns
/// (N >= 0), where N is the arity of the given constructor.
fn expand_wildcard(
&self,
cx: &MatchCheckCtx,
constructor: &Constructor,
) -> MatchCheckResult<PatStack> {
assert_eq!(
Pat::Wild,
self.get_head().expect("expand_wildcard called on empty PatStack").as_pat(cx),
"expand_wildcard must only be called on PatStack with wild at head",
);
let mut patterns: PatStackInner = smallvec![];
for _ in 0..constructor.arity(cx)? {
patterns.push(PatIdOrWild::Wild);
}
for pat in &self.0[1..] {
patterns.push(*pat);
}
Ok(PatStack::from_vec(patterns))
}
}
/// A collection of PatStack.
///
/// This type is modeled from the struct of the same name in `rustc`.
pub(super) struct Matrix(Vec<PatStack>);
impl Matrix {
pub(super) fn empty() -> Self {
Self(vec![])
}
pub(super) fn push(&mut self, cx: &MatchCheckCtx, row: PatStack) {
if let Some(Pat::Or(pat_ids)) = row.get_head().map(|pat_id| pat_id.as_pat(cx)) {
// Or patterns are expanded here
for pat_id in pat_ids {
self.0.push(PatStack::from_pattern(pat_id));
}
} else {
self.0.push(row);
}
}
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn heads(&self) -> Vec<PatIdOrWild> {
self.0.iter().flat_map(|p| p.get_head()).collect()
}
/// Computes `D(self)` for each contained PatStack.
///
/// See the module docs and the associated documentation in rustc for details.
fn specialize_wildcard(&self, cx: &MatchCheckCtx) -> Self {
Self::collect(cx, self.0.iter().filter_map(|r| r.specialize_wildcard(cx)))
}
/// Computes `S(constructor, self)` for each contained PatStack.
///
/// See the module docs and the associated documentation in rustc for details.
fn specialize_constructor(
&self,
cx: &MatchCheckCtx,
constructor: &Constructor,
) -> MatchCheckResult<Self> {
let mut new_matrix = Matrix::empty();
for pat in &self.0 {
if let Some(pat) = pat.specialize_constructor(cx, constructor)? {
new_matrix.push(cx, pat);
}
}
Ok(new_matrix)
}
fn collect<T: IntoIterator<Item = PatStack>>(cx: &MatchCheckCtx, iter: T) -> Self {
let mut matrix = Matrix::empty();
for pat in iter {
// using push ensures we expand or-patterns
matrix.push(cx, pat);
}
matrix
}
}
#[derive(Clone, Debug, PartialEq)]
/// An indication of the usefulness of a given match arm, where
/// usefulness is defined as matching some patterns which were
/// not matched by an prior match arms.
///
/// We may eventually need an `Unknown` variant here.
pub(super) enum Usefulness {
Useful,
NotUseful,
}
pub(super) struct MatchCheckCtx<'a> {
pub(super) match_expr: Idx<Expr>,
pub(super) body: Arc<Body>,
pub(super) infer: Arc<InferenceResult>,
pub(super) db: &'a dyn HirDatabase,
}
/// Given a set of patterns `matrix`, and pattern to consider `v`, determines
/// whether `v` is useful. A pattern is useful if it covers cases which were
/// not previously covered.
///
/// When calling this function externally (that is, not the recursive calls) it
/// expected that you have already type checked the match arms. All patterns in
/// matrix should be the same type as v, as well as they should all be the same
/// type as the match expression.
pub(super) fn is_useful(
cx: &MatchCheckCtx,
matrix: &Matrix,
v: &PatStack,
) -> MatchCheckResult<Usefulness> {
// Handle two special cases:
// - enum with no variants
// - `!` type
// In those cases, no match arm is useful.
match cx.infer[cx.match_expr].strip_references() {
Ty::Apply(ApplicationTy { ctor: TypeCtor::Adt(AdtId::EnumId(enum_id)), .. }) => {
if cx.db.enum_data(*enum_id).variants.is_empty() {
return Ok(Usefulness::NotUseful);
}
}
Ty::Apply(ApplicationTy { ctor: TypeCtor::Never, .. }) => {
return Ok(Usefulness::NotUseful);
}
_ => (),
}
let head = match v.get_head() {
Some(head) => head,
None => {
let result = if matrix.is_empty() { Usefulness::Useful } else { Usefulness::NotUseful };
return Ok(result);
}
};
if let Pat::Or(pat_ids) = head.as_pat(cx) {
let mut found_unimplemented = false;
let any_useful = pat_ids.iter().any(|&pat_id| {
let v = PatStack::from_pattern(pat_id);
match is_useful(cx, matrix, &v) {
Ok(Usefulness::Useful) => true,
Ok(Usefulness::NotUseful) => false,
_ => {
found_unimplemented = true;
false
}
}
});
return if any_useful {
Ok(Usefulness::Useful)
} else if found_unimplemented {
Err(MatchCheckErr::NotImplemented)
} else {
Ok(Usefulness::NotUseful)
};
}
if let Some(constructor) = pat_constructor(cx, head)? {
let matrix = matrix.specialize_constructor(&cx, &constructor)?;
let v = v
.specialize_constructor(&cx, &constructor)?
.expect("we know this can't fail because we get the constructor from `v.head()` above");
is_useful(&cx, &matrix, &v)
} else {
// expanding wildcard
let mut used_constructors: Vec<Constructor> = vec![];
for pat in matrix.heads() {
if let Some(constructor) = pat_constructor(cx, pat)? {
used_constructors.push(constructor);
}
}
// We assume here that the first constructor is the "correct" type. Since we
// only care about the "type" of the constructor (i.e. if it is a bool we
// don't care about the value), this assumption should be valid as long as
// the match statement is well formed. We currently uphold this invariant by
// filtering match arms before calling `is_useful`, only passing in match arms
// whose type matches the type of the match expression.
match &used_constructors.first() {
Some(constructor) if all_constructors_covered(&cx, constructor, &used_constructors) => {
// If all constructors are covered, then we need to consider whether
// any values are covered by this wildcard.
//
// For example, with matrix '[[Some(true)], [None]]', all
// constructors are covered (`Some`/`None`), so we need
// to perform specialization to see that our wildcard will cover
// the `Some(false)` case.
//
// Here we create a constructor for each variant and then check
// usefulness after specializing for that constructor.
let mut found_unimplemented = false;
for constructor in constructor.all_constructors(cx) {
let matrix = matrix.specialize_constructor(&cx, &constructor)?;
let v = v.expand_wildcard(&cx, &constructor)?;
match is_useful(&cx, &matrix, &v) {
Ok(Usefulness::Useful) => return Ok(Usefulness::Useful),
Ok(Usefulness::NotUseful) => continue,
_ => found_unimplemented = true,
};
}
if found_unimplemented {
Err(MatchCheckErr::NotImplemented)
} else {
Ok(Usefulness::NotUseful)
}
}
_ => {
// Either not all constructors are covered, or the only other arms
// are wildcards. Either way, this pattern is useful if it is useful
// when compared to those arms with wildcards.
let matrix = matrix.specialize_wildcard(&cx);
let v = v.to_tail();
is_useful(&cx, &matrix, &v)
}
}
}
}
#[derive(Debug, Clone, Copy)]
/// Similar to TypeCtor, but includes additional information about the specific
/// value being instantiated. For example, TypeCtor::Bool doesn't contain the
/// boolean value.
enum Constructor {
Bool(bool),
Tuple { arity: usize },
Enum(EnumVariantId),
}
impl Constructor {
fn arity(&self, cx: &MatchCheckCtx) -> MatchCheckResult<usize> {
let arity = match self {
Constructor::Bool(_) => 0,
Constructor::Tuple { arity } => *arity,
Constructor::Enum(e) => {
match cx.db.enum_data(e.parent).variants[e.local_id].variant_data.as_ref() {
VariantData::Tuple(struct_field_data) => struct_field_data.len(),
VariantData::Record(struct_field_data) => struct_field_data.len(),
VariantData::Unit => 0,
}
}
};
Ok(arity)
}
fn all_constructors(&self, cx: &MatchCheckCtx) -> Vec<Constructor> {
match self {
Constructor::Bool(_) => vec![Constructor::Bool(true), Constructor::Bool(false)],
Constructor::Tuple { .. } => vec![*self],
Constructor::Enum(e) => cx
.db
.enum_data(e.parent)
.variants
.iter()
.map(|(local_id, _)| {
Constructor::Enum(EnumVariantId { parent: e.parent, local_id })
})
.collect(),
}
}
}
/// Returns the constructor for the given pattern. Should only return None
/// in the case of a Wild pattern.
fn pat_constructor(cx: &MatchCheckCtx, pat: PatIdOrWild) -> MatchCheckResult<Option<Constructor>> {
let res = match pat.as_pat(cx) {
Pat::Wild => None,
// FIXME somehow create the Tuple constructor with the proper arity. If there are
// ellipsis, the arity is not equal to the number of patterns.
Pat::Tuple { args: pats, ellipsis } if ellipsis.is_none() => {
Some(Constructor::Tuple { arity: pats.len() })
}
Pat::Lit(lit_expr) => match cx.body.exprs[lit_expr] {
Expr::Literal(Literal::Bool(val)) => Some(Constructor::Bool(val)),
_ => return Err(MatchCheckErr::NotImplemented),
},
Pat::TupleStruct { .. } | Pat::Path(_) | Pat::Record { .. } => {
let pat_id = pat.as_id().expect("we already know this pattern is not a wild");
let variant_id =
cx.infer.variant_resolution_for_pat(pat_id).ok_or(MatchCheckErr::Unknown)?;
match variant_id {
VariantId::EnumVariantId(enum_variant_id) => {
Some(Constructor::Enum(enum_variant_id))
}
_ => return Err(MatchCheckErr::NotImplemented),
}
}
_ => return Err(MatchCheckErr::NotImplemented),
};
Ok(res)
}
fn all_constructors_covered(
cx: &MatchCheckCtx,
constructor: &Constructor,
used_constructors: &[Constructor],
) -> bool {
match constructor {
Constructor::Tuple { arity } => {
used_constructors.iter().any(|constructor| match constructor {
Constructor::Tuple { arity: used_arity } => arity == used_arity,
_ => false,
})
}
Constructor::Bool(_) => {
if used_constructors.is_empty() {
return false;
}
let covers_true =
used_constructors.iter().any(|c| matches!(c, Constructor::Bool(true)));
let covers_false =
used_constructors.iter().any(|c| matches!(c, Constructor::Bool(false)));
covers_true && covers_false
}
Constructor::Enum(e) => cx.db.enum_data(e.parent).variants.iter().all(|(id, _)| {
for constructor in used_constructors {
if let Constructor::Enum(e) = constructor {
if id == e.local_id {
return true;
}
}
}
false
}),
}
}
fn enum_variant_matches(cx: &MatchCheckCtx, pat_id: PatId, enum_variant_id: EnumVariantId) -> bool {
Some(enum_variant_id.into()) == cx.infer.variant_resolution_for_pat(pat_id)
}
#[cfg(test)]
mod tests {
use crate::diagnostics::tests::check_diagnostics;
#[test]
fn empty_tuple() {
check_diagnostics(
r#"
fn main() {
match () { }
//^^ Missing match arm
match (()) { }
//^^^^ Missing match arm
match () { _ => (), }
match () { () => (), }
match (()) { (()) => (), }
}
"#,
);
}
#[test]
fn tuple_of_two_empty_tuple() {
check_diagnostics(
r#"
fn main() {
match ((), ()) { }
//^^^^^^^^ Missing match arm
match ((), ()) { ((), ()) => (), }
}
"#,
);
}
#[test]
fn boolean() {
check_diagnostics(
r#"
fn test_main() {
match false { }
//^^^^^ Missing match arm
match false { true => (), }
//^^^^^ Missing match arm
match (false, true) {}
//^^^^^^^^^^^^^ Missing match arm
match (false, true) { (true, true) => (), }
//^^^^^^^^^^^^^ Missing match arm
match (false, true) {
//^^^^^^^^^^^^^ Missing match arm
(false, true) => (),
(false, false) => (),
(true, false) => (),
}
match (false, true) { (true, _x) => (), }
//^^^^^^^^^^^^^ Missing match arm
match false { true => (), false => (), }
match (false, true) {
(false, _) => (),
(true, false) => (),
(_, true) => (),
}
match (false, true) {
(true, true) => (),
(true, false) => (),
(false, true) => (),
(false, false) => (),
}
match (false, true) {
(true, _x) => (),
(false, true) => (),
(false, false) => (),
}
match (false, true, false) {
(false, ..) => (),
(true, ..) => (),
}
match (false, true, false) {
(.., false) => (),
(.., true) => (),
}
match (false, true, false) { (..) => (), }
}
"#,
);
}
#[test]
fn tuple_of_tuple_and_bools() {
check_diagnostics(
r#"
fn main() {
match (false, ((), false)) {}
//^^^^^^^^^^^^^^^^^^^^ Missing match arm
match (false, ((), false)) { (true, ((), true)) => (), }
//^^^^^^^^^^^^^^^^^^^^ Missing match arm
match (false, ((), false)) { (true, _) => (), }
//^^^^^^^^^^^^^^^^^^^^ Missing match arm
match (false, ((), false)) {
(true, ((), true)) => (),
(true, ((), false)) => (),
(false, ((), true)) => (),
(false, ((), false)) => (),
}
match (false, ((), false)) {
(true, ((), true)) => (),
(true, ((), false)) => (),
(false, _) => (),
}
}
"#,
);
}
#[test]
fn enums() {
check_diagnostics(
r#"
enum Either { A, B, }
fn main() {
match Either::A { }
//^^^^^^^^^ Missing match arm
match Either::B { Either::A => (), }
//^^^^^^^^^ Missing match arm
match &Either::B {
//^^^^^^^^^^ Missing match arm
Either::A => (),
}
match Either::B {
Either::A => (), Either::B => (),
}
match &Either::B {
Either::A => (), Either::B => (),
}
}
"#,
);
}
#[test]
fn enum_containing_bool() {
check_diagnostics(
r#"
enum Either { A(bool), B }
fn main() {
match Either::B { }
//^^^^^^^^^ Missing match arm
match Either::B {
//^^^^^^^^^ Missing match arm
Either::A(true) => (), Either::B => ()
}
match Either::B {
Either::A(true) => (),
Either::A(false) => (),
Either::B => (),
}
match Either::B {
Either::B => (),
_ => (),
}
match Either::B {
Either::A(_) => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn enum_different_sizes() {
check_diagnostics(
r#"
enum Either { A(bool), B(bool, bool) }
fn main() {
match Either::A(false) {
//^^^^^^^^^^^^^^^^ Missing match arm
Either::A(_) => (),
Either::B(false, _) => (),
}
match Either::A(false) {
Either::A(_) => (),
Either::B(true, _) => (),
Either::B(false, _) => (),
}
match Either::A(false) {
Either::A(true) | Either::A(false) => (),
Either::B(true, _) => (),
Either::B(false, _) => (),
}
}
"#,
);
}
#[test]
fn tuple_of_enum_no_diagnostic() {
check_diagnostics(
r#"
enum Either { A(bool), B(bool, bool) }
enum Either2 { C, D }
fn main() {
match (Either::A(false), Either2::C) {
(Either::A(true), _) | (Either::A(false), _) => (),
(Either::B(true, _), Either2::C) => (),
(Either::B(false, _), Either2::C) => (),
(Either::B(_, _), Either2::D) => (),
}
}
"#,
);
}
#[test]
fn mismatched_types() {
// Match statements with arms that don't match the
// expression pattern do not fire this diagnostic.
check_diagnostics(
r#"
enum Either { A, B }
enum Either2 { C, D }
fn main() {
match Either::A {
Either2::C => (),
Either2::D => (),
}
match (true, false) {
(true, false, true) => (),
(true) => (),
}
match (0) { () => () }
match Unresolved::Bar { Unresolved::Baz => () }
}
"#,
);
}
#[test]
fn malformed_match_arm_tuple_enum_missing_pattern() {
// We are testing to be sure we don't panic here when the match
// arm `Either::B` is missing its pattern.
check_diagnostics(
r#"
enum Either { A, B(u32) }
fn main() {
match Either::A {
Either::A => (),
Either::B() => (),
}
}
"#,
);
}
#[test]
fn expr_diverges() {
check_diagnostics(
r#"
enum Either { A, B }
fn main() {
match loop {} {
Either::A => (),
Either::B => (),
}
match loop {} {
Either::A => (),
}
match loop { break Foo::A } {
//^^^^^^^^^^^^^^^^^^^^^ Missing match arm
Either::A => (),
}
match loop { break Foo::A } {
Either::A => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn expr_partially_diverges() {
check_diagnostics(
r#"
enum Either<T> { A(T), B }
fn foo() -> Either<!> { Either::B }
fn main() -> u32 {
match foo() {
Either::A(val) => val,
Either::B => 0,
}
}
"#,
);
}
#[test]
fn enum_record() {
check_diagnostics(
r#"
enum Either { A { foo: bool }, B }
fn main() {
let a = Either::A { foo: true };
match a { }
//^ Missing match arm
match a { Either::A { foo: true } => () }
//^ Missing match arm
match a {
Either::A { } => (),
//^^^ Missing structure fields:
// | - foo
Either::B => (),
}
match a {
//^ Missing match arm
Either::A { } => (),
} //^^^ Missing structure fields:
// | - foo
match a {
Either::A { foo: true } => (),
Either::A { foo: false } => (),
Either::B => (),
}
match a {
Either::A { foo: _ } => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn enum_record_fields_out_of_order() {
check_diagnostics(
r#"
enum Either {
A { foo: bool, bar: () },
B,
}
fn main() {
let a = Either::A { foo: true, bar: () };
match a {
//^ Missing match arm
Either::A { bar: (), foo: false } => (),
Either::A { foo: true, bar: () } => (),
}
match a {
Either::A { bar: (), foo: false } => (),
Either::A { foo: true, bar: () } => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn enum_record_ellipsis() {
check_diagnostics(
r#"
enum Either {
A { foo: bool, bar: bool },
B,
}
fn main() {
let a = Either::B;
match a {
//^ Missing match arm
Either::A { foo: true, .. } => (),
Either::B => (),
}
match a {
//^ Missing match arm
Either::A { .. } => (),
}
match a {
Either::A { foo: true, .. } => (),
Either::A { foo: false, .. } => (),
Either::B => (),
}
match a {
Either::A { .. } => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn enum_tuple_partial_ellipsis() {
check_diagnostics(
r#"
enum Either {
A(bool, bool, bool, bool),
B,
}
fn main() {
match Either::B {
//^^^^^^^^^ Missing match arm
Either::A(true, .., true) => (),
Either::A(true, .., false) => (),
Either::A(false, .., false) => (),
Either::B => (),
}
match Either::B {
//^^^^^^^^^ Missing match arm
Either::A(true, .., true) => (),
Either::A(true, .., false) => (),
Either::A(.., true) => (),
Either::B => (),
}
match Either::B {
Either::A(true, .., true) => (),
Either::A(true, .., false) => (),
Either::A(false, .., true) => (),
Either::A(false, .., false) => (),
Either::B => (),
}
match Either::B {
Either::A(true, .., true) => (),
Either::A(true, .., false) => (),
Either::A(.., true) => (),
Either::A(.., false) => (),
Either::B => (),
}
}
"#,
);
}
#[test]
fn never() {
check_diagnostics(
r#"
enum Never {}
fn enum_(never: Never) {
match never {}
}
fn enum_ref(never: &Never) {
match never {}
}
fn bang(never: !) {
match never {}
}
"#,
);
}
#[test]
fn or_pattern_panic() {
check_diagnostics(
r#"
pub enum Category { Infinity, Zero }
fn panic(a: Category, b: Category) {
match (a, b) {
(Category::Zero | Category::Infinity, _) => (),
(_, Category::Zero | Category::Infinity) => (),
}
// FIXME: This is a false positive, but the code used to cause a panic in the match checker,
// so this acts as a regression test for that.
match (a, b) {
//^^^^^^ Missing match arm
(Category::Infinity, Category::Infinity) | (Category::Zero, Category::Zero) => (),
(Category::Infinity | Category::Zero, _) => (),
}
}
"#,
);
}
mod false_negatives {
//! The implementation of match checking here is a work in progress. As we roll this out, we
//! prefer false negatives to false positives (ideally there would be no false positives). This
//! test module should document known false negatives. Eventually we will have a complete
//! implementation of match checking and this module will be empty.
//!
//! The reasons for documenting known false negatives:
//!
//! 1. It acts as a backlog of work that can be done to improve the behavior of the system.
//! 2. It ensures the code doesn't panic when handling these cases.
use super::*;
#[test]
fn integers() {
// We don't currently check integer exhaustiveness.
check_diagnostics(
r#"
fn main() {
match 5 {
10 => (),
11..20 => (),
}
}
"#,
);
}
#[test]
fn internal_or() {
// We do not currently handle patterns with internal `or`s.
check_diagnostics(
r#"
fn main() {
enum Either { A(bool), B }
match Either::B {
Either::A(true | false) => (),
}
}
"#,
);
}
#[test]
fn tuple_of_bools_with_ellipsis_at_end_missing_arm() {
// We don't currently handle tuple patterns with ellipsis.
check_diagnostics(
r#"
fn main() {
match (false, true, false) {
(false, ..) => (),
}
}
"#,
);
}
#[test]
fn tuple_of_bools_with_ellipsis_at_beginning_missing_arm() {
// We don't currently handle tuple patterns with ellipsis.
check_diagnostics(
r#"
fn main() {
match (false, true, false) {
(.., false) => (),
}
}
"#,
);
}
#[test]
fn struct_missing_arm() {
// We don't currently handle structs.
check_diagnostics(
r#"
struct Foo { a: bool }
fn main(f: Foo) {
match f { Foo { a: true } => () }
}
"#,
);
}
}
}
| 32.070323 | 103 | 0.525853 |
755f587f1fd2ca42eb3654ef0c0fddc4b559af07 | 424 | //! Point inclusion and projection.
#[doc(inline)]
pub use self::point_query::{LocalPointQuery, PointQuery};
pub use self::point_bvt::PointInterferencesCollector;
#[doc(hidden)]
pub mod point_query;
mod point_plane;
mod point_ball;
mod point_cuboid;
mod point_aabb;
mod point_bounding_sphere;
mod point_support_map;
mod point_segment;
mod point_triangle;
mod point_compound;
mod point_mesh;
mod point_repr;
mod point_bvt;
| 20.190476 | 57 | 0.804245 |
1d69a459cfa7f3c37e9ad567bbbd145287ca582f | 4,360 | //! Callsites represent the source locations from which spans or events
//! originate.
use std::{
fmt,
hash::{Hash, Hasher},
ptr,
sync::Mutex,
};
use {
dispatcher::{self, Dispatch, Registrar},
subscriber::Interest,
Metadata,
};
lazy_static! {
static ref REGISTRY: Mutex<Registry> = Mutex::new(Registry {
callsites: Vec::new(),
dispatchers: Vec::new(),
});
}
struct Registry {
callsites: Vec<&'static Callsite>,
dispatchers: Vec<dispatcher::Registrar>,
}
impl Registry {
fn rebuild_callsite_interest(&self, callsite: &'static Callsite) {
let meta = callsite.metadata();
let mut interest = Interest::never();
for registrar in &self.dispatchers {
if let Some(sub_interest) = registrar.try_register(meta) {
interest = interest.and(sub_interest);
}
}
callsite.set_interest(interest)
}
fn rebuild_interest(&mut self) {
self.dispatchers.retain(Registrar::is_alive);
self.callsites.iter().for_each(|&callsite| {
self.rebuild_callsite_interest(callsite);
});
}
}
/// Trait implemented by callsites.
///
/// These functions are only intended to be called by the [`Registry`] which
/// correctly handles determining the common interest between all subscribers.
pub trait Callsite: Sync {
/// Sets the [`Interest`] for this callsite.
///
/// [`Interest`]: ../subscriber/struct.Interest.html
fn set_interest(&self, interest: Interest);
/// Returns the [metadata] associated with the callsite.
///
/// [metadata]: ../metadata/struct.Metadata.html
fn metadata(&self) -> &Metadata;
}
/// Uniquely identifies a [`Callsite`]
///
/// Two `Identifier`s are equal if they both refer to the same callsite.
///
/// [`Callsite`]: ../callsite/trait.Callsite.html
#[derive(Clone)]
pub struct Identifier(
/// **Warning**: The fields on this type are currently `pub` because it must
/// be able to be constructed statically by macros. However, when `const
/// fn`s are available on stable Rust, this will no longer be necessary.
/// Thus, these fields are *not* considered stable public API, and they may
/// change warning. Do not rely on any fields on `Identifier`. When
/// constructing new `Identifier`s, use the `identify_callsite!` macro or
/// the `Callsite::id` function instead.
// TODO: When `Callsite::id` is a const fn, this need no longer be `pub`.
#[doc(hidden)]
pub &'static Callsite,
);
/// Clear and reregister interest on every [`Callsite`]
///
/// This function is intended for runtime reconfiguration of filters on traces
/// when the filter recalculation is much less frequent than trace events are.
/// The alternative is to have the [`Subscriber`] that supports runtime
/// reconfiguration of filters always return [`Interest::sometimes()`] so that
/// [`enabled`] is evaluated for every event.
///
/// [`Callsite`]: ../callsite/trait.Callsite.html
/// [`enabled`]: ../subscriber/trait.Subscriber.html#tymethod.enabled
/// [`Interest::sometimes()`]: ../subscriber/struct.Interest.html#method.sometimes
/// [`Subscriber`]: ../subscriber/trait.Subscriber.html
pub fn rebuild_interest_cache() {
let mut registry = REGISTRY.lock().unwrap();
registry.rebuild_interest();
}
/// Register a new `Callsite` with the global registry.
///
/// This should be called once per callsite after the callsite has been
/// constructed.
pub fn register(callsite: &'static Callsite) {
let mut registry = REGISTRY.lock().unwrap();
registry.rebuild_callsite_interest(callsite);
registry.callsites.push(callsite);
}
pub(crate) fn register_dispatch(dispatch: &Dispatch) {
let mut registry = REGISTRY.lock().unwrap();
registry.dispatchers.push(dispatch.registrar());
registry.rebuild_interest();
}
// ===== impl Identifier =====
impl PartialEq for Identifier {
fn eq(&self, other: &Identifier) -> bool {
ptr::eq(self.0, other.0)
}
}
impl Eq for Identifier {}
impl fmt::Debug for Identifier {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Identifier({:p})", self.0)
}
}
impl Hash for Identifier {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(self.0 as *const Callsite).hash(state)
}
}
| 30.48951 | 82 | 0.658486 |
71ded9254f01032997886227a5073d4525e0b8eb | 10,627 | use bytes::{Buf, ByteBuf, MutByteBuf, SliceBuf};
use mio::net::{TcpListener, TcpStream};
use mio::{Events, Interests, Poll, PollOpt, Token};
use slab::Slab;
use std::io;
use {localhost, TryRead, TryWrite};
const SERVER: Token = Token(10_000_000);
const CLIENT: Token = Token(10_000_001);
struct EchoConn {
sock: TcpStream,
buf: Option<ByteBuf>,
mut_buf: Option<MutByteBuf>,
token: Option<Token>,
interests: Option<Interests>,
}
impl EchoConn {
fn new(sock: TcpStream) -> EchoConn {
EchoConn {
sock,
buf: None,
mut_buf: Some(ByteBuf::mut_with_capacity(2048)),
token: None,
interests: None,
}
}
fn writable(&mut self, poll: &mut Poll) -> io::Result<()> {
let mut buf = self.buf.take().unwrap();
match self.sock.try_write_buf(&mut buf) {
Ok(None) => {
debug!("client flushing buf; WOULDBLOCK");
self.buf = Some(buf);
self.interests = match self.interests {
None => Some(Interests::writable()),
Some(i) => Some(i | Interests::writable()),
};
}
Ok(Some(r)) => {
debug!("CONN : we wrote {} bytes!", r);
self.mut_buf = Some(buf.flip());
self.interests = match self.interests {
None => Some(Interests::readable()),
Some(i) => Some((i | Interests::readable()) - Interests::writable()),
};
}
Err(e) => debug!("not implemented; client err={:?}", e),
}
assert!(
self.interests.unwrap().is_readable() || self.interests.unwrap().is_writable(),
"actual={:?}",
self.interests
);
poll.reregister(
&self.sock,
self.token.unwrap(),
self.interests.unwrap(),
PollOpt::edge() | PollOpt::oneshot(),
)
}
fn readable(&mut self, poll: &mut Poll) -> io::Result<()> {
let mut buf = self.mut_buf.take().unwrap();
match self.sock.try_read_buf(&mut buf) {
Ok(None) => {
debug!("CONN : spurious read wakeup");
self.mut_buf = Some(buf);
}
Ok(Some(r)) => {
debug!("CONN : we read {} bytes!", r);
// prepare to provide this to writable
self.buf = Some(buf.flip());
self.interests = match self.interests {
None => Some(Interests::writable()),
Some(i) => Some((i | Interests::writable()) - Interests::readable()),
}
}
Err(e) => {
debug!("not implemented; client err={:?}", e);
if let Some(x) = self.interests.as_mut() {
*x -= Interests::readable();
}
}
};
assert!(
self.interests.unwrap().is_readable() || self.interests.unwrap().is_writable(),
"actual={:?}",
self.interests
);
poll.reregister(
&self.sock,
self.token.unwrap(),
self.interests.unwrap(),
PollOpt::edge(),
)
}
}
struct EchoServer {
sock: TcpListener,
conns: Slab<EchoConn>,
}
impl EchoServer {
fn accept(&mut self, poll: &mut Poll) -> io::Result<()> {
debug!("server accepting socket");
let sock = self.sock.accept().unwrap().0;
let conn = EchoConn::new(sock);
let tok = self.conns.insert(conn);
// Register the connection
self.conns[tok].token = Some(Token(tok));
poll.register(
&self.conns[tok].sock,
Token(tok),
Interests::readable(),
PollOpt::edge() | PollOpt::oneshot(),
)
.expect("could not register socket with event loop");
Ok(())
}
fn conn_readable(&mut self, poll: &mut Poll, tok: Token) -> io::Result<()> {
debug!("server conn readable; tok={:?}", tok);
self.conn(tok).readable(poll)
}
fn conn_writable(&mut self, poll: &mut Poll, tok: Token) -> io::Result<()> {
debug!("server conn writable; tok={:?}", tok);
self.conn(tok).writable(poll)
}
fn conn(&mut self, tok: Token) -> &mut EchoConn {
&mut self.conns[tok.into()]
}
}
struct EchoClient {
sock: TcpStream,
msgs: Vec<&'static str>,
tx: SliceBuf<'static>,
rx: SliceBuf<'static>,
mut_buf: Option<MutByteBuf>,
token: Token,
interests: Option<Interests>,
shutdown: bool,
}
// Sends a message and expects to receive the same exact message, one at a time
impl EchoClient {
fn new(sock: TcpStream, token: Token, mut msgs: Vec<&'static str>) -> EchoClient {
let curr = msgs.remove(0);
EchoClient {
sock,
msgs,
tx: SliceBuf::wrap(curr.as_bytes()),
rx: SliceBuf::wrap(curr.as_bytes()),
mut_buf: Some(ByteBuf::mut_with_capacity(2048)),
token,
interests: None,
shutdown: false,
}
}
fn readable(&mut self, poll: &mut Poll) -> io::Result<()> {
debug!("client socket readable");
let mut buf = self.mut_buf.take().unwrap();
match self.sock.try_read_buf(&mut buf) {
Ok(None) => {
debug!("CLIENT : spurious read wakeup");
self.mut_buf = Some(buf);
}
Ok(Some(r)) => {
debug!("CLIENT : We read {} bytes!", r);
// prepare for reading
let mut buf = buf.flip();
while buf.has_remaining() {
let actual = buf.read_byte().unwrap();
let expect = self.rx.read_byte().unwrap();
assert!(actual == expect, "actual={}; expect={}", actual, expect);
}
self.mut_buf = Some(buf.flip());
if let Some(x) = self.interests.as_mut() {
*x -= Interests::readable();
}
if !self.rx.has_remaining() {
self.next_msg(poll).unwrap();
}
}
Err(e) => {
panic!("not implemented; client err={:?}", e);
}
};
if let Some(x) = self.interests {
poll.reregister(
&self.sock,
self.token,
x,
PollOpt::edge() | PollOpt::oneshot(),
)?;
}
Ok(())
}
fn writable(&mut self, poll: &mut Poll) -> io::Result<()> {
debug!("client socket writable");
match self.sock.try_write_buf(&mut self.tx) {
Ok(None) => {
debug!("client flushing buf; WOULDBLOCK");
self.interests = match self.interests {
None => Some(Interests::writable()),
Some(i) => Some(i | Interests::writable()),
};
}
Ok(Some(r)) => {
debug!("CLIENT : we wrote {} bytes!", r);
self.interests = match self.interests {
None => Some(Interests::readable()),
Some(i) => Some((i | Interests::readable()) - Interests::writable()),
};
}
Err(e) => debug!("not implemented; client err={:?}", e),
}
if self.interests.unwrap().is_readable() || self.interests.unwrap().is_writable() {
try!(poll.reregister(
&self.sock,
self.token,
self.interests.unwrap(),
PollOpt::edge() | PollOpt::oneshot()
));
}
Ok(())
}
fn next_msg(&mut self, poll: &mut Poll) -> io::Result<()> {
if self.msgs.is_empty() {
self.shutdown = true;
return Ok(());
}
let curr = self.msgs.remove(0);
debug!("client prepping next message");
self.tx = SliceBuf::wrap(curr.as_bytes());
self.rx = SliceBuf::wrap(curr.as_bytes());
self.interests = match self.interests {
None => Some(Interests::writable()),
Some(i) => Some(i | Interests::writable()),
};
poll.reregister(
&self.sock,
self.token,
self.interests.unwrap(),
PollOpt::edge() | PollOpt::oneshot(),
)
}
}
struct Echo {
server: EchoServer,
client: EchoClient,
}
impl Echo {
fn new(srv: TcpListener, client: TcpStream, msgs: Vec<&'static str>) -> Echo {
Echo {
server: EchoServer {
sock: srv,
conns: Slab::with_capacity(128),
},
client: EchoClient::new(client, CLIENT, msgs),
}
}
}
#[test]
pub fn test_echo_server() {
debug!("Starting TEST_ECHO_SERVER");
let mut poll = Poll::new().unwrap();
let addr = localhost();
let srv = TcpListener::bind(&addr).unwrap();
info!("listen for connections");
poll.register(
&srv,
SERVER,
Interests::readable(),
PollOpt::edge() | PollOpt::oneshot(),
)
.unwrap();
let sock = TcpStream::connect(&addr).unwrap();
// Connect to the server
poll.register(
&sock,
CLIENT,
Interests::writable(),
PollOpt::edge() | PollOpt::oneshot(),
)
.unwrap();
// == Create storage for events
let mut events = Events::with_capacity(1024);
let mut handler = Echo::new(srv, sock, vec!["foo", "bar"]);
// Start the event loop
while !handler.client.shutdown {
poll.poll(&mut events, None).unwrap();
for event in &events {
debug!("ready {:?} {:?}", event.token(), event.readiness());
if event.readiness().is_readable() {
match event.token() {
SERVER => handler.server.accept(&mut poll).unwrap(),
CLIENT => handler.client.readable(&mut poll).unwrap(),
i => handler.server.conn_readable(&mut poll, i).unwrap(),
}
}
if event.readiness().is_writable() {
match event.token() {
SERVER => panic!("received writable for token 0"),
CLIENT => handler.client.writable(&mut poll).unwrap(),
i => handler.server.conn_writable(&mut poll, i).unwrap(),
};
}
}
}
}
| 29.519444 | 91 | 0.48405 |
9c321c4bd0d741049382bfb205c3271ba7c81b9e | 870 | #![feature(generic_associated_types)]
//~^ WARN the feature `generic_associated_types` is incomplete
fn main() {}
struct X;
impl X {
type Y;
//~^ ERROR associated type in `impl` without body
//~| ERROR associated types are not yet supported in inherent impls
type Z: Ord;
//~^ ERROR associated type in `impl` without body
//~| ERROR bounds on associated `type`s in `impl`s have no effect
//~| ERROR associated types are not yet supported in inherent impls
type W: Ord where Self: Eq;
//~^ ERROR associated type in `impl` without body
//~| ERROR bounds on associated `type`s in `impl`s have no effect
//~| ERROR associated types are not yet supported in inherent impls
type W where Self: Eq;
//~^ ERROR associated type in `impl` without body
//~| ERROR associated types are not yet supported in inherent impls
}
| 36.25 | 71 | 0.686207 |
d76561d79f0e6120ecaaddc35299687b3a12a426 | 464 | fn main() {
let mut new_user = User {
username: String::from("serge"),
email: String::from("[email protected]"),
sign_in_count: 0,
active: false,
};
new_user.username = String::from("paratoner");
print!(
"{}\n{}\n{}\n{}",
new_user.username,
new_user.email,
new_user.sign_in_count,
new_user.active
);
}
#[derive(Debug)]
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
| 17.185185 | 50 | 0.614224 |
4a6bc58051c57aa9b4132d56b332c0d9c2d2c690 | 10,633 | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)]
use self::RegClass::*;
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use trans::cabi::{ArgType, FnType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
impl Copy for RegClass {}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList for Sized? {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => panic!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if !packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => panic!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i] != SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i != e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i != e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c != SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls[i + 1u..]);
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => panic!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type,
is_mem_cls: F,
ind_attr: Attribute)
-> ArgType where
F: FnOnce(&[RegClass]) -> bool,
{
if !ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
| 27.618182 | 91 | 0.413712 |
9ce3ea694b98152dc04db3edc4a82a600594228d | 19,318 | use std::iter::{self, successors};
use either::Either;
use ide_db::{ty_filter::TryEnum, RootDatabase};
use syntax::{
ast::{
self,
edit::{AstNodeEdit, IndentLevel},
make,
},
AstNode,
};
use crate::{
utils::{does_pat_match_variant, unwrap_trivial_block},
AssistContext, AssistId, AssistKind, Assists,
};
// Assist: replace_if_let_with_match
//
// Replaces a `if let` expression with a `match` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
pub(crate) fn replace_if_let_with_match(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let if_expr: ast::IfExpr = ctx.find_node_at_offset()?;
let mut else_block = None;
let if_exprs = successors(Some(if_expr.clone()), |expr| match expr.else_branch()? {
ast::ElseBranch::IfExpr(expr) => Some(expr),
ast::ElseBranch::Block(block) => {
else_block = Some(block);
None
}
});
let scrutinee_to_be_expr = if_expr.condition()?.expr()?;
let mut pat_seen = false;
let mut cond_bodies = Vec::new();
for if_expr in if_exprs {
let cond = if_expr.condition()?;
let expr = cond.expr()?;
let cond = match cond.pat() {
Some(pat) => {
if scrutinee_to_be_expr.syntax().text() != expr.syntax().text() {
// Only if all condition expressions are equal we can merge them into a match
return None;
}
pat_seen = true;
Either::Left(pat)
}
None => Either::Right(expr),
};
let body = if_expr.then_branch()?;
cond_bodies.push((cond, body));
}
if !pat_seen {
// Don't offer turning an if (chain) without patterns into a match
return None;
}
let target = if_expr.syntax().text_range();
acc.add(
AssistId("replace_if_let_with_match", AssistKind::RefactorRewrite),
"Replace with match",
target,
move |edit| {
let match_expr = {
let else_arm = make_else_arm(else_block, &cond_bodies, ctx);
let make_match_arm = |(pat, body): (_, ast::BlockExpr)| {
let body = body.reset_indent().indent(IndentLevel(1));
match pat {
Either::Left(pat) => {
make::match_arm(iter::once(pat), None, unwrap_trivial_block(body))
}
Either::Right(expr) => make::match_arm(
iter::once(make::wildcard_pat().into()),
Some(expr),
unwrap_trivial_block(body),
),
}
};
let arms = cond_bodies.into_iter().map(make_match_arm).chain(iter::once(else_arm));
let match_expr = make::expr_match(scrutinee_to_be_expr, make::match_arm_list(arms));
match_expr.indent(IndentLevel::from_node(if_expr.syntax()))
};
let has_preceding_if_expr =
if_expr.syntax().parent().map_or(false, |it| ast::IfExpr::can_cast(it.kind()));
let expr = if has_preceding_if_expr {
// make sure we replace the `else if let ...` with a block so we don't end up with `else expr`
make::block_expr(None, Some(match_expr)).into()
} else {
match_expr
};
edit.replace_ast::<ast::Expr>(if_expr.into(), expr);
},
)
}
fn make_else_arm(
else_block: Option<ast::BlockExpr>,
cond_bodies: &Vec<(Either<ast::Pat, ast::Expr>, ast::BlockExpr)>,
ctx: &AssistContext,
) -> ast::MatchArm {
if let Some(else_block) = else_block {
let pattern = if let [(Either::Left(pat), _)] = &**cond_bodies {
ctx.sema
.type_of_pat(&pat)
.and_then(|ty| TryEnum::from_ty(&ctx.sema, &ty))
.zip(Some(pat))
} else {
None
};
let pattern = match pattern {
Some((it, pat)) => {
if does_pat_match_variant(&pat, &it.sad_pattern()) {
it.happy_pattern()
} else {
it.sad_pattern()
}
}
None => make::wildcard_pat().into(),
};
make::match_arm(iter::once(pattern), None, unwrap_trivial_block(else_block))
} else {
make::match_arm(iter::once(make::wildcard_pat().into()), None, make::expr_unit().into())
}
}
// Assist: replace_match_with_if_let
//
// Replaces a binary `match` with a wildcard pattern and no guards with an `if let` expression.
//
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// $0match action {
// Action::Move { distance } => foo(distance),
// _ => bar(),
// }
// }
// ```
// ->
// ```
// enum Action { Move { distance: u32 }, Stop }
//
// fn handle(action: Action) {
// if let Action::Move { distance } = action {
// foo(distance)
// } else {
// bar()
// }
// }
// ```
pub(crate) fn replace_match_with_if_let(acc: &mut Assists, ctx: &AssistContext) -> Option<()> {
let match_expr: ast::MatchExpr = ctx.find_node_at_offset()?;
let mut arms = match_expr.match_arm_list()?.arms();
let (first_arm, second_arm) = (arms.next()?, arms.next()?);
if arms.next().is_some() || first_arm.guard().is_some() || second_arm.guard().is_some() {
return None;
}
let (if_let_pat, then_expr, else_expr) = pick_pattern_and_expr_order(
&ctx.sema,
first_arm.pat()?,
second_arm.pat()?,
first_arm.expr()?,
second_arm.expr()?,
)?;
let scrutinee = match_expr.expr()?;
let target = match_expr.syntax().text_range();
acc.add(
AssistId("replace_match_with_if_let", AssistKind::RefactorRewrite),
"Replace with if let",
target,
move |edit| {
let condition = make::condition(scrutinee, Some(if_let_pat));
let then_block = match then_expr.reset_indent() {
ast::Expr::BlockExpr(block) => block,
expr => make::block_expr(iter::empty(), Some(expr)),
};
let else_expr = match else_expr {
ast::Expr::BlockExpr(block) if block.is_empty() => None,
ast::Expr::TupleExpr(tuple) if tuple.fields().next().is_none() => None,
expr => Some(expr),
};
let if_let_expr = make::expr_if(
condition,
then_block,
else_expr
.map(|expr| match expr {
ast::Expr::BlockExpr(block) => block,
expr => (make::block_expr(iter::empty(), Some(expr))),
})
.map(ast::ElseBranch::Block),
)
.indent(IndentLevel::from_node(match_expr.syntax()));
edit.replace_ast::<ast::Expr>(match_expr.into(), if_let_expr);
},
)
}
/// Pick the pattern for the if let condition and return the expressions for the `then` body and `else` body in that order.
fn pick_pattern_and_expr_order(
sema: &hir::Semantics<RootDatabase>,
pat: ast::Pat,
pat2: ast::Pat,
expr: ast::Expr,
expr2: ast::Expr,
) -> Option<(ast::Pat, ast::Expr, ast::Expr)> {
let res = match (pat, pat2) {
(ast::Pat::WildcardPat(_), _) => return None,
(pat, sad_pat) if is_sad_pat(sema, &sad_pat) => (pat, expr, expr2),
(sad_pat, pat) if is_sad_pat(sema, &sad_pat) => (pat, expr2, expr),
(pat, pat2) => match (binds_name(&pat), binds_name(&pat2)) {
(true, true) => return None,
(true, false) => (pat, expr, expr2),
(false, true) => (pat2, expr2, expr),
(false, false) => (pat, expr, expr2),
},
};
Some(res)
}
fn binds_name(pat: &ast::Pat) -> bool {
let binds_name_v = |pat| binds_name(&pat);
match pat {
ast::Pat::IdentPat(_) => true,
ast::Pat::MacroPat(_) => true,
ast::Pat::OrPat(pat) => pat.pats().any(binds_name_v),
ast::Pat::SlicePat(pat) => pat.pats().any(binds_name_v),
ast::Pat::TuplePat(it) => it.fields().any(binds_name_v),
ast::Pat::TupleStructPat(it) => it.fields().any(binds_name_v),
ast::Pat::RecordPat(it) => it
.record_pat_field_list()
.map_or(false, |rpfl| rpfl.fields().flat_map(|rpf| rpf.pat()).any(binds_name_v)),
ast::Pat::RefPat(pat) => pat.pat().map_or(false, binds_name_v),
ast::Pat::BoxPat(pat) => pat.pat().map_or(false, binds_name_v),
ast::Pat::ParenPat(pat) => pat.pat().map_or(false, binds_name_v),
_ => false,
}
}
fn is_sad_pat(sema: &hir::Semantics<RootDatabase>, pat: &ast::Pat) -> bool {
sema.type_of_pat(pat)
.and_then(|ty| TryEnum::from_ty(sema, &ty))
.map_or(false, |it| does_pat_match_variant(pat, &it.sad_pattern()))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{check_assist, check_assist_not_applicable, check_assist_target};
#[test]
fn test_if_let_with_match_unapplicable_for_simple_ifs() {
check_assist_not_applicable(
replace_if_let_with_match,
r#"
fn main() {
if $0true {} else if false {} else {}
}
"#,
)
}
#[test]
fn test_if_let_with_match_no_else() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn foo(&self) {
if $0let VariantData::Struct(..) = *self {
self.foo();
}
}
}
"#,
r#"
impl VariantData {
pub fn foo(&self) {
match *self {
VariantData::Struct(..) => {
self.foo();
}
_ => (),
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_basic() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if $0let VariantData::Struct(..) = *self {
true
} else if let VariantData::Tuple(..) = *self {
false
} else if cond() {
true
} else {
bar(
123
)
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
match *self {
VariantData::Struct(..) => true,
VariantData::Tuple(..) => false,
_ if cond() => true,
_ => {
bar(
123
)
}
}
}
}
"#,
)
}
#[test]
fn test_if_let_with_match_on_tail_if_let() {
check_assist(
replace_if_let_with_match,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else if let$0 VariantData::Tuple(..) = *self {
false
} else {
false
}
}
}
"#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
match *self {
VariantData::Tuple(..) => false,
_ => false,
}
}
}
}
"#,
)
}
#[test]
fn special_case_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_option() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0if let None = x {
println!("none")
} else {
println!("some")
}
}
"#,
r#"
fn foo(x: Option<i32>) {
match x {
None => println!("none"),
Some(_) => println!("some"),
}
}
"#,
);
}
#[test]
fn special_case_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
);
}
#[test]
fn special_case_inverted_result() {
check_assist(
replace_if_let_with_match,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0if let Err(x) = x {
println!("{}", x)
} else {
println!("ok")
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
match x {
Err(x) => println!("{}", x),
Ok(_) => println!("ok"),
}
}
"#,
);
}
#[test]
fn nested_indent() {
check_assist(
replace_if_let_with_match,
r#"
fn main() {
if true {
$0if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
r#"
fn main() {
if true {
match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
)
}
#[test]
fn test_replace_match_with_if_let_unwraps_simple_expressions() {
check_assist(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
if let VariantData::Struct(..) = *self {
true
} else {
false
}
}
} "#,
)
}
#[test]
fn test_replace_match_with_if_let_doesnt_unwrap_multiline_expressions() {
check_assist(
replace_match_with_if_let,
r#"
fn foo() {
$0match a {
VariantData::Struct(..) => {
bar(
123
)
}
_ => false,
}
} "#,
r#"
fn foo() {
if let VariantData::Struct(..) = a {
bar(
123
)
} else {
false
}
} "#,
)
}
#[test]
fn replace_match_with_if_let_target() {
check_assist_target(
replace_match_with_if_let,
r#"
impl VariantData {
pub fn is_struct(&self) -> bool {
$0match *self {
VariantData::Struct(..) => true,
_ => false,
}
}
} "#,
r#"match *self {
VariantData::Struct(..) => true,
_ => false,
}"#,
);
}
#[test]
fn special_case_option_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: option
fn foo(x: Option<i32>) {
$0match x {
Some(x) => println!("{}", x),
None => println!("none"),
}
}
"#,
r#"
fn foo(x: Option<i32>) {
if let Some(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn special_case_result_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
//- minicore: result
fn foo(x: Result<i32, ()>) {
$0match x {
Ok(x) => println!("{}", x),
Err(_) => println!("none"),
}
}
"#,
r#"
fn foo(x: Result<i32, ()>) {
if let Ok(x) = x {
println!("{}", x)
} else {
println!("none")
}
}
"#,
);
}
#[test]
fn nested_indent_match_to_if_let() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
if true {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
}
_ => None,
}
}
}
"#,
r#"
fn main() {
if true {
if let Ok(rel_path) = path.strip_prefix(root_path) {
let rel_path = RelativePathBuf::from_path(rel_path).ok()?;
Some((*id, rel_path))
} else {
None
}
}
}
"#,
)
}
#[test]
fn replace_match_with_if_let_empty_wildcard_expr() {
check_assist(
replace_match_with_if_let,
r#"
fn main() {
$0match path.strip_prefix(root_path) {
Ok(rel_path) => println!("{}", rel_path),
_ => (),
}
}
"#,
r#"
fn main() {
if let Ok(rel_path) = path.strip_prefix(root_path) {
println!("{}", rel_path)
}
}
"#,
)
}
#[test]
fn replace_match_with_if_let_exhaustive() {
check_assist(
replace_match_with_if_let,
r#"
fn print_source(def_source: ModuleSource) {
match def_so$0urce {
ModuleSource::SourceFile(..) => { println!("source file"); }
ModuleSource::Module(..) => { println!("module"); }
}
}
"#,
r#"
fn print_source(def_source: ModuleSource) {
if let ModuleSource::SourceFile(..) = def_source { println!("source file"); } else { println!("module"); }
}
"#,
)
}
#[test]
fn replace_match_with_if_let_prefer_name_bind() {
check_assist(
replace_match_with_if_let,
r#"
fn foo() {
match $0Foo(0) {
Foo(_) => (),
Bar(bar) => println!("bar {}", bar),
}
}
"#,
r#"
fn foo() {
if let Bar(bar) = Foo(0) {
println!("bar {}", bar)
}
}
"#,
);
check_assist(
replace_match_with_if_let,
r#"
fn foo() {
match $0Foo(0) {
Bar(bar) => println!("bar {}", bar),
Foo(_) => (),
}
}
"#,
r#"
fn foo() {
if let Bar(bar) = Foo(0) {
println!("bar {}", bar)
}
}
"#,
);
}
#[test]
fn replace_match_with_if_let_rejects_double_name_bindings() {
check_assist_not_applicable(
replace_match_with_if_let,
r#"
fn foo() {
match $0Foo(0) {
Foo(foo) => println!("bar {}", foo),
Bar(bar) => println!("bar {}", bar),
}
}
"#,
);
}
}
| 24.766667 | 123 | 0.486541 |
38c676a458ddd5b188a04ec7d1aabb08c9d16362 | 213 | use aoc::Result;
pub const YEAR: u32 = 2019;
pub const DAY: u32 = 19;
pub fn part_one(input: &str) -> Result<usize> {
Ok(input.len())
}
pub fn part_two(input: &str) -> Result<usize> {
Ok(input.len())
}
| 16.384615 | 47 | 0.615023 |
224600c0b032ba8cfa1a41c12d739a63f7393104 | 10,562 | #![feature(test)]
extern crate test;
use anyhow::Error;
use spack::loaders::swc::SwcLoader;
use std::{
collections::HashMap,
env,
fs::{create_dir_all, read_dir},
io::{self},
path::{Path, PathBuf},
sync::Arc,
};
use swc::{config::SourceMapsConfig, resolver::NodeResolver};
use swc_atoms::js_word;
use swc_bundler::{BundleKind, Bundler, Config, ModuleRecord};
use swc_common::{FileName, Span, GLOBALS};
use swc_ecma_ast::{
Bool, Expr, ExprOrSuper, Ident, KeyValueProp, Lit, MemberExpr, MetaPropExpr, PropName, Str,
};
use swc_ecma_parser::JscTarget;
use swc_ecma_transforms::fixer;
use swc_ecma_visit::FoldWith;
use test::{
test_main, DynTestFn, Options, ShouldPanic::No, TestDesc, TestDescAndFn, TestName, TestType,
};
use testing::NormalizedOutput;
use walkdir::WalkDir;
fn add_test<F: FnOnce() + Send + 'static>(
tests: &mut Vec<TestDescAndFn>,
name: String,
ignore: bool,
f: F,
) {
tests.push(TestDescAndFn {
desc: TestDesc {
test_type: TestType::UnitTest,
name: TestName::DynTestName(name.replace("-", "_").replace("/", "::")),
ignore,
should_panic: No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
});
}
fn reference_tests(tests: &mut Vec<TestDescAndFn>, errors: bool) -> Result<(), io::Error> {
let root = {
let mut root = Path::new(env!("CARGO_MANIFEST_DIR")).to_path_buf();
root.push("tests");
root.push(if errors { "error" } else { "pass" });
root
};
eprintln!("Loading tests from {}", root.display());
let dir = root;
for entry in WalkDir::new(&dir).into_iter() {
let entry = entry?;
if !entry.path().join("input").exists() {
continue;
}
let ignore = entry
.path()
.file_name()
.unwrap()
.to_string_lossy()
.starts_with(".");
let dir_name = entry
.path()
.strip_prefix(&dir)
.expect("failed to strip prefix")
.to_str()
.unwrap()
.to_string();
let _ = create_dir_all(entry.path().join("output"));
let entries = read_dir(entry.path().join("input"))?
.filter(|e| match e {
Ok(e) => {
if e.path()
.file_name()
.unwrap()
.to_string_lossy()
.starts_with("entry")
{
true
} else {
false
}
}
_ => false,
})
.map(|e| -> Result<_, io::Error> {
let e = e?;
Ok((
e.file_name().to_string_lossy().to_string(),
FileName::Real(e.path()),
))
})
.collect::<Result<HashMap<_, _>, _>>()?;
let name = format!(
"fixture::{}::{}",
if errors { "error" } else { "pass" },
dir_name
);
let ignore = ignore
|| !name.contains(
&env::var("TEST")
.ok()
.unwrap_or("".into())
.replace("::", "/")
.replace("_", "-"),
);
add_test(tests, name, ignore, move || {
eprintln!("\n\n========== Running reference test {}\n", dir_name);
testing::run_test2(false, |cm, handler| {
let compiler = Arc::new(swc::Compiler::new(cm.clone(), Arc::new(handler)));
GLOBALS.set(compiler.globals(), || {
let loader = SwcLoader::new(
compiler.clone(),
swc::config::Options {
swcrc: true,
..Default::default()
},
);
let bundler = Bundler::new(
compiler.globals(),
cm.clone(),
&loader,
NodeResolver::default(),
Config {
require: true,
disable_inliner: true,
module: Default::default(),
external_modules: vec![
"assert",
"buffer",
"child_process",
"console",
"cluster",
"crypto",
"dgram",
"dns",
"events",
"fs",
"http",
"http2",
"https",
"net",
"os",
"path",
"perf_hooks",
"process",
"querystring",
"readline",
"repl",
"stream",
"string_decoder",
"timers",
"tls",
"tty",
"url",
"util",
"v8",
"vm",
"wasi",
"worker",
"zlib",
]
.into_iter()
.map(From::from)
.collect(),
},
Box::new(Hook),
);
let modules = bundler
.bundle(entries)
.map_err(|err| println!("{:?}", err))?;
println!("Bundled as {} modules", modules.len());
let mut error = false;
for bundled in modules {
let code = compiler
.print(
&bundled.module.fold_with(&mut fixer(None)),
None,
None,
JscTarget::Es2020,
SourceMapsConfig::Bool(false),
None,
false,
)
.expect("failed to print?")
.code;
let name = match bundled.kind {
BundleKind::Named { name } | BundleKind::Lib { name } => {
PathBuf::from(name)
}
BundleKind::Dynamic => format!("dynamic.{}.js", bundled.id).into(),
};
let output_path = entry
.path()
.join("output")
.join(name.file_name().unwrap())
.with_extension("js");
println!("Printing {}", output_path.display());
// {
// let status = Command::new("node")
// .arg(&output_path)
// .stdout(Stdio::inherit())
// .stderr(Stdio::inherit())
// .status()
// .unwrap();
// assert!(status.success());
// }
let s = NormalizedOutput::from(code);
match s.compare_to_file(&output_path) {
Ok(_) => {}
Err(err) => {
println!("Diff: {:?}", err);
error = true;
}
}
}
if error {
return Err(());
}
Ok(())
})
})
.expect("failed to process a module");
});
}
Ok(())
}
#[test]
fn pass() {
let _ = pretty_env_logger::try_init();
let args: Vec<_> = env::args().collect();
let mut tests = Vec::new();
reference_tests(&mut tests, false).unwrap();
test_main(&args, tests, Some(Options::new()));
}
#[test]
#[ignore]
fn errors() {
let _ = pretty_env_logger::try_init();
let args: Vec<_> = env::args().collect();
let mut tests = Vec::new();
reference_tests(&mut tests, true).unwrap();
test_main(&args, tests, Some(Options::new()));
}
struct Hook;
impl swc_bundler::Hook for Hook {
fn get_import_meta_props(
&self,
span: Span,
module_record: &ModuleRecord,
) -> Result<Vec<KeyValueProp>, Error> {
Ok(vec![
KeyValueProp {
key: PropName::Ident(Ident::new(js_word!("url"), span)),
value: Box::new(Expr::Lit(Lit::Str(Str {
span,
value: module_record.file_name.to_string().into(),
has_escape: false,
kind: Default::default(),
}))),
},
KeyValueProp {
key: PropName::Ident(Ident::new(js_word!("main"), span)),
value: Box::new(if module_record.is_entry {
Expr::Member(MemberExpr {
span,
obj: ExprOrSuper::Expr(Box::new(Expr::MetaProp(MetaPropExpr {
meta: Ident::new(js_word!("import"), span),
prop: Ident::new(js_word!("meta"), span),
}))),
prop: Box::new(Expr::Ident(Ident::new(js_word!("main"), span))),
computed: false,
})
} else {
Expr::Lit(Lit::Bool(Bool { span, value: false }))
}),
},
])
}
}
| 33.213836 | 96 | 0.362905 |
f5dae4368d256c29b9124bdac3c046139e764361 | 181 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
pub mod constants;
pub mod events;
pub mod resources;
pub use constants::*;
pub use events::*;
pub use resources::*;
| 16.454545 | 38 | 0.707182 |
4a0df115d75a67f9887851c2fe37feebdc6daa3f | 8,139 | //! Utilities for enriching error handling with [`tracing`] diagnostic
//! information.
//!
//! # Overview
//!
//! [`tracing`] is a framework for instrumenting Rust programs to collect
//! scoped, structured, and async-aware diagnostics. This crate provides
//! integrations between [`tracing`] instrumentation and Rust error handling. It
//! enables enriching error types with diagnostic information from `tracing`
//! [span] contexts, formatting those contexts when errors are displayed, and
//! automatically generate `tracing` [events] when errors occur.
//!
//! The crate provides the following:
//!
//! * [`SpanTrace`], a captured trace of the current `tracing` [span] context
//!
//! * [`ErrorLayer`], a [subscriber layer] which enables capturing `SpanTrace`s
//!
//! **Note**: This crate is currently experimental.
//!
//! *Compiler support: [requires `rustc` 1.42+][msrv]*
//!
//! [msrv]: #supported-rust-versions
//!
//! ## Feature Flags
//!
//! - `traced-error` - Enables the [`TracedError`] type and related Traits
//! - [`InstrumentResult`] and [`InstrumentError`] extension traits, which
//! provide an [`in_current_span()`] method for bundling errors with a
//! [`SpanTrace`].
//! - [`ExtractSpanTrace`] extension trait, for extracting `SpanTrace`s from
//! behind `dyn Error` trait objects.
//!
//! ## Usage
//!
//! `tracing-error` provides the [`SpanTrace`] type, which captures the current
//! `tracing` span context when it is constructed and allows it to be displayed
//! at a later time.
//!
//! For example:
//!
//! ```rust
//! use std::{fmt, error::Error};
//! use tracing_error::SpanTrace;
//!
//! #[derive(Debug)]
//! pub struct MyError {
//! context: SpanTrace,
//! // ...
//! }
//!
//! impl fmt::Display for MyError {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! // ... format other parts of the error ...
//!
//! self.context.fmt(f)?;
//!
//! // ... format other error context information, cause chain, etc ...
//! # Ok(())
//! }
//! }
//!
//! impl Error for MyError {}
//!
//! impl MyError {
//! pub fn new() -> Self {
//! Self {
//! context: SpanTrace::capture(),
//! // ... other error information ...
//! }
//! }
//! }
//! ```
//!
//! This crate also provides [`TracedError`], for attaching a [`SpanTrace`] to
//! an existing error. The easiest way to wrap errors in `TracedError` is to
//! either use the [`InstrumentResult`] and [`InstrumentError`] traits or the
//! `From`/`Into` traits.
//!
//! ```rust
//! # use std::error::Error;
//! use tracing_error::prelude::*;
//!
//! # fn fake_main() -> Result<(), Box<dyn Error>> {
//! std::fs::read_to_string("myfile.txt").in_current_span()?;
//! # Ok(())
//! # }
//! ```
//!
//! Once an error has been wrapped with with a [`TracedError`] the [`SpanTrace`]
//! can be extracted one of 3 ways: either via [`TracedError`]'s
//! `Display`/`Debug` implementations, or via the [`ExtractSpanTrace`] trait.
//!
//! For example, here is how one might print the errors but specialize the
//! printing when the error is a placeholder for a wrapping [`SpanTrace`]:
//!
//! ```rust
//! use std::error::Error;
//! use tracing_error::ExtractSpanTrace as _;
//!
//! fn print_extracted_spantraces(error: &(dyn Error + 'static)) {
//! let mut error = Some(error);
//! let mut ind = 0;
//!
//! eprintln!("Error:");
//!
//! while let Some(err) = error {
//! if let Some(spantrace) = err.span_trace() {
//! eprintln!("found a spantrace:\n{}", spantrace);
//! } else {
//! eprintln!("{:>4}: {}", ind, err);
//! }
//!
//! error = err.source();
//! ind += 1;
//! }
//! }
//!
//! ```
//!
//! Whereas here, we can still display the content of the `SpanTraces` without
//! any special casing by simply printing all errors in our error chain.
//!
//! ```rust
//! use std::error::Error;
//!
//! fn print_naive_spantraces(error: &(dyn Error + 'static)) {
//! let mut error = Some(error);
//! let mut ind = 0;
//!
//! eprintln!("Error:");
//!
//! while let Some(err) = error {
//! eprintln!("{:>4}: {}", ind, err);
//! error = err.source();
//! ind += 1;
//! }
//! }
//! ```
//!
//! Applications that wish to use `tracing-error`-enabled errors should
//! construct an [`ErrorLayer`] and add it to their [`Subscriber`] in order to
//! enable capturing [`SpanTrace`]s. For example:
//!
//! ```rust
//! use tracing_error::ErrorLayer;
//! use tracing_subscriber::prelude::*;
//!
//! fn main() {
//! let subscriber = tracing_subscriber::Registry::default()
//! // any number of other subscriber layers may be added before or
//! // after the `ErrorLayer`...
//! .with(ErrorLayer::default());
//!
//! // set the subscriber as the default for the application
//! tracing::subscriber::set_global_default(subscriber);
//! }
//! ```
//!
//! [`SpanTrace`]: struct.SpanTrace.html
//! [`ErrorLayer`]: struct.ErrorLayer.html
//! [`TracedError`]: struct.TracedError.html
//! [`InstrumentResult`]: trait.InstrumentResult.html
//! [`InstrumentError`]: trait.InstrumentError.html
//! [`ExtractSpanTrace`]: trait.ExtractSpanTrace.html
//! [`in_current_span()`]: trait.InstrumentResult.html#tymethod.in_current_span
//! [span]: https://docs.rs/tracing/latest/tracing/span/index.html
//! [events]: https://docs.rs/tracing/latest/tracing/struct.Event.html
//! [`Subscriber`]: https://docs.rs/tracing/latest/tracing/trait.Subscriber.html
//! [subscriber layer]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/layer/trait.Layer.html
//! [`tracing`]: https://docs.rs/tracing
//! [`std::error::Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
//!
//! ## Supported Rust Versions
//!
//! Tracing is built against the latest stable release. The minimum supported
//! version is 1.42. The current Tracing version is not guaranteed to build on
//! Rust versions earlier than the minimum supported version.
//!
//! Tracing follows the same compiler support policies as the rest of the Tokio
//! project. The current stable Rust compiler and the three most recent minor
//! versions before it will always be supported. For example, if the current
//! stable compiler version is 1.45, the minimum supported version will not be
//! increased past 1.42, three minor versions prior. Increasing the minimum
//! supported compiler version is not considered a semver breaking change as
//! long as doing so complies with this policy.
//!
#![cfg_attr(docsrs, feature(doc_cfg), deny(broken_intra_doc_links))]
#![doc(html_root_url = "https://docs.rs/tracing-error/0.1.2")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png",
issue_tracker_base_url = "https://github.com/tokio-rs/tracing/issues/"
)]
#![warn(
missing_debug_implementations,
missing_docs,
rust_2018_idioms,
unreachable_pub,
bad_style,
const_err,
dead_code,
improper_ctypes,
non_shorthand_field_patterns,
no_mangle_generic_items,
overflowing_literals,
path_statements,
patterns_in_fns_without_body,
private_in_public,
unconditional_recursion,
unused,
unused_allocation,
unused_comparisons,
unused_parens,
while_true
)]
mod backtrace;
#[cfg(feature = "traced-error")]
mod error;
mod layer;
pub use self::backtrace::{SpanTrace, SpanTraceStatus};
#[cfg(feature = "traced-error")]
pub use self::error::{ExtractSpanTrace, InstrumentError, InstrumentResult, TracedError};
pub use self::layer::ErrorLayer;
#[cfg(feature = "traced-error")]
#[cfg_attr(docsrs, doc(cfg(feature = "traced-error")))]
pub mod prelude {
//! The `tracing-error` prelude.
//!
//! This brings into scope the `InstrumentError, `InstrumentResult`, and `ExtractSpanTrace`
//! extension traits. These traits allow attaching `SpanTrace`s to errors and
//! subsequently retrieving them from `dyn Error` trait objects.
pub use crate::{ExtractSpanTrace as _, InstrumentError as _, InstrumentResult as _};
}
| 34.341772 | 107 | 0.646394 |
fe3fb448f6ffcf93b877d879b4201b6abb7da5e0 | 661 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:index out of bounds: the len is 1024 but the index is -1
fn main() {
let v = vec::from_fn(1024u, {|n| n});
// this should trip a bounds check
log(error, v[-1i8]);
}
| 38.882353 | 73 | 0.706505 |
2fca2014317d5928e9c0e395e2420534e3f800b1 | 3,966 | // Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::unseparated_literal_suffix,
// TODO: Falsely triggers for async/await:
// see https://github.com/rust-lang/rust-clippy/issues/5360
// clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
///
/// Macro to allow debug logging to a file which bypasses the standard logging systems.
/// This is useful for one-off debugging, and is code that several developers have found they're
/// writing a lot as one-offs when working in the rust code (particularly when working on logging),
/// so this is a useful macro to exist for one-off use.
///
/// This should not be used for actual production logging; use the log crate's macros
/// (info!, debug!, trace!) for that.
///
#[macro_export]
macro_rules! debug_log {
($path:expr, $($arg:tt)+) => {
{
use ::std::io::Write;
let mut f = ::std::fs::OpenOptions::new().create(true).append(true).open($path).unwrap();
writeln!(f, $($arg)+).unwrap()
}
};
}
pub mod logger;
pub type Logger = logger::PantsLogger;
use num_enum::TryFromPrimitive;
// This is a hard-coding of constants in the standard logging python package.
#[derive(Debug, Eq, PartialEq, TryFromPrimitive, Clone, Copy)]
#[repr(u64)]
pub enum PythonLogLevel {
NotSet = 0,
// Trace doesn't exist in a Python world, so set it to "a bit lower than Debug".
Trace = 5,
Debug = 10,
Info = 20,
Warn = 30,
Error_ = 40,
Critical = 50,
}
impl From<log::Level> for PythonLogLevel {
fn from(level: log::Level) -> Self {
match level {
log::Level::Error => PythonLogLevel::Error_,
log::Level::Warn => PythonLogLevel::Warn,
log::Level::Info => PythonLogLevel::Info,
log::Level::Debug => PythonLogLevel::Debug,
log::Level::Trace => PythonLogLevel::Trace,
}
}
}
impl From<PythonLogLevel> for log::LevelFilter {
fn from(level: PythonLogLevel) -> Self {
match level {
PythonLogLevel::NotSet => log::LevelFilter::Off,
PythonLogLevel::Trace => log::LevelFilter::Trace,
PythonLogLevel::Debug => log::LevelFilter::Debug,
PythonLogLevel::Info => log::LevelFilter::Info,
PythonLogLevel::Warn => log::LevelFilter::Warn,
PythonLogLevel::Error_ => log::LevelFilter::Error,
// Rust doesn't have a Critical, so treat them like Errors.
PythonLogLevel::Critical => log::LevelFilter::Error,
}
}
}
impl From<PythonLogLevel> for log::Level {
fn from(level: PythonLogLevel) -> Self {
match level {
PythonLogLevel::NotSet => {
panic!("PythonLogLevel::NotSet doesn't have a translation to Level")
}
PythonLogLevel::Trace => log::Level::Trace,
PythonLogLevel::Debug => log::Level::Debug,
PythonLogLevel::Info => log::Level::Info,
PythonLogLevel::Warn => log::Level::Warn,
PythonLogLevel::Error_ => log::Level::Error,
// Rust doesn't have a Critical, so treat them like Errors.
PythonLogLevel::Critical => log::Level::Error,
}
}
}
mod pants_packages {
include!(concat!(env!("OUT_DIR"), "/packages.rs"));
}
| 33.897436 | 214 | 0.680282 |
e29f22b5c6d11ddc20d7f3d8eb4ce20437674ada | 35,589 | use std::cell::RefCell;
use std::collections::hash_map::{Entry, HashMap};
use std::collections::{BTreeMap, HashSet};
use std::path::{Path, PathBuf};
use std::slice;
use glob::glob;
use log::debug;
use url::Url;
use crate::core::features::Features;
use crate::core::profiles::Profiles;
use crate::core::registry::PackageRegistry;
use crate::core::{Dependency, PackageId, PackageIdSpec};
use crate::core::{EitherManifest, Package, SourceId, VirtualManifest};
use crate::ops;
use crate::sources::PathSource;
use crate::util::errors::{CargoResult, CargoResultExt, ManifestError};
use crate::util::paths;
use crate::util::toml::read_manifest;
use crate::util::{Config, Filesystem};
/// The core abstraction in Cargo for working with a workspace of crates.
///
/// A workspace is often created very early on and then threaded through all
/// other functions. It's typically through this object that the current
/// package is loaded and/or learned about.
#[derive(Debug)]
pub struct Workspace<'cfg> {
config: &'cfg Config,
// This path is a path to where the current cargo subcommand was invoked
// from. That is the `--manifest-path` argument to Cargo, and
// points to the "main crate" that we're going to worry about.
current_manifest: PathBuf,
// A list of packages found in this workspace. Always includes at least the
// package mentioned by `current_manifest`.
packages: Packages<'cfg>,
// If this workspace includes more than one crate, this points to the root
// of the workspace. This is `None` in the case that `[workspace]` is
// missing, `package.workspace` is missing, and no `Cargo.toml` above
// `current_manifest` was found on the filesystem with `[workspace]`.
root_manifest: Option<PathBuf>,
// Shared target directory for all the packages of this workspace.
// `None` if the default path of `root/target` should be used.
target_dir: Option<Filesystem>,
// List of members in this workspace with a listing of all their manifest
// paths. The packages themselves can be looked up through the `packages`
// set above.
members: Vec<PathBuf>,
member_ids: HashSet<PackageId>,
// The subset of `members` that are used by the
// `build`, `check`, `test`, and `bench` subcommands
// when no package is selected with `--package` / `-p` and `--workspace`
// is not used.
//
// This is set by the `default-members` config
// in the `[workspace]` section.
// When unset, this is the same as `members` for virtual workspaces
// (`--workspace` is implied)
// or only the root package for non-virtual workspaces.
default_members: Vec<PathBuf>,
// `true` if this is a temporary workspace created for the purposes of the
// `cargo install` or `cargo package` commands.
is_ephemeral: bool,
// `true` if this workspace should enforce optional dependencies even when
// not needed; false if this workspace should only enforce dependencies
// needed by the current configuration (such as in cargo install). In some
// cases `false` also results in the non-enforcement of dev-dependencies.
require_optional_deps: bool,
// A cache of loaded packages for particular paths which is disjoint from
// `packages` up above, used in the `load` method down below.
loaded_packages: RefCell<HashMap<PathBuf, Package>>,
// If `true`, then the resolver will ignore any existing `Cargo.lock`
// file. This is set for `cargo install` without `--locked`.
ignore_lock: bool,
}
// Separate structure for tracking loaded packages (to avoid loading anything
// twice), and this is separate to help appease the borrow checker.
#[derive(Debug)]
struct Packages<'cfg> {
config: &'cfg Config,
packages: HashMap<PathBuf, MaybePackage>,
}
#[derive(Debug)]
enum MaybePackage {
Package(Package),
Virtual(VirtualManifest),
}
/// Configuration of a workspace in a manifest.
#[derive(Debug, Clone)]
pub enum WorkspaceConfig {
/// Indicates that `[workspace]` was present and the members were
/// optionally specified as well.
Root(WorkspaceRootConfig),
/// Indicates that `[workspace]` was present and the `root` field is the
/// optional value of `package.workspace`, if present.
Member { root: Option<String> },
}
/// Intermediate configuration of a workspace root in a manifest.
///
/// Knows the Workspace Root path, as well as `members` and `exclude` lists of path patterns, which
/// together tell if some path is recognized as a member by this root or not.
#[derive(Debug, Clone)]
pub struct WorkspaceRootConfig {
root_dir: PathBuf,
members: Option<Vec<String>>,
default_members: Option<Vec<String>>,
exclude: Vec<String>,
}
/// An iterator over the member packages of a workspace, returned by
/// `Workspace::members`
pub struct Members<'a, 'cfg> {
ws: &'a Workspace<'cfg>,
iter: slice::Iter<'a, PathBuf>,
}
impl<'cfg> Workspace<'cfg> {
/// Creates a new workspace given the target manifest pointed to by
/// `manifest_path`.
///
/// This function will construct the entire workspace by determining the
/// root and all member packages. It will then validate the workspace
/// before returning it, so `Ok` is only returned for valid workspaces.
pub fn new(manifest_path: &Path, config: &'cfg Config) -> CargoResult<Workspace<'cfg>> {
let mut ws = Workspace::new_default(manifest_path.to_path_buf(), config);
ws.target_dir = config.target_dir()?;
ws.root_manifest = ws.find_root(manifest_path)?;
ws.find_members()?;
ws.validate()?;
Ok(ws)
}
fn new_default(current_manifest: PathBuf, config: &'cfg Config) -> Workspace<'cfg> {
Workspace {
config,
current_manifest,
packages: Packages {
config,
packages: HashMap::new(),
},
root_manifest: None,
target_dir: None,
members: Vec::new(),
member_ids: HashSet::new(),
default_members: Vec::new(),
is_ephemeral: false,
require_optional_deps: true,
loaded_packages: RefCell::new(HashMap::new()),
ignore_lock: false,
}
}
pub fn new_virtual(
root_path: PathBuf,
current_manifest: PathBuf,
manifest: VirtualManifest,
config: &'cfg Config,
) -> CargoResult<Workspace<'cfg>> {
let mut ws = Workspace::new_default(current_manifest, config);
ws.root_manifest = Some(root_path.join("Cargo.toml"));
ws.target_dir = config.target_dir()?;
ws.packages
.packages
.insert(root_path, MaybePackage::Virtual(manifest));
ws.find_members()?;
// TODO: validation does not work because it walks up the directory
// tree looking for the root which is a fake file that doesn't exist.
Ok(ws)
}
/// Creates a "temporary workspace" from one package which only contains
/// that package.
///
/// This constructor will not touch the filesystem and only creates an
/// in-memory workspace. That is, all configuration is ignored, it's just
/// intended for that one package.
///
/// This is currently only used in niche situations like `cargo install` or
/// `cargo package`.
pub fn ephemeral(
package: Package,
config: &'cfg Config,
target_dir: Option<Filesystem>,
require_optional_deps: bool,
) -> CargoResult<Workspace<'cfg>> {
let mut ws = Workspace::new_default(package.manifest_path().to_path_buf(), config);
ws.is_ephemeral = true;
ws.require_optional_deps = require_optional_deps;
let key = ws.current_manifest.parent().unwrap();
let id = package.package_id();
let package = MaybePackage::Package(package);
ws.packages.packages.insert(key.to_path_buf(), package);
ws.target_dir = if let Some(dir) = target_dir {
Some(dir)
} else {
ws.config.target_dir()?
};
ws.members.push(ws.current_manifest.clone());
ws.member_ids.insert(id);
ws.default_members.push(ws.current_manifest.clone());
Ok(ws)
}
/// Returns the current package of this workspace.
///
/// Note that this can return an error if it the current manifest is
/// actually a "virtual Cargo.toml", in which case an error is returned
/// indicating that something else should be passed.
pub fn current(&self) -> CargoResult<&Package> {
let pkg = self.current_opt().ok_or_else(|| {
anyhow::format_err!(
"manifest path `{}` is a virtual manifest, but this \
command requires running against an actual package in \
this workspace",
self.current_manifest.display()
)
})?;
Ok(pkg)
}
pub fn current_opt(&self) -> Option<&Package> {
match *self.packages.get(&self.current_manifest) {
MaybePackage::Package(ref p) => Some(p),
MaybePackage::Virtual(..) => None,
}
}
pub fn is_virtual(&self) -> bool {
match *self.packages.get(&self.current_manifest) {
MaybePackage::Package(..) => false,
MaybePackage::Virtual(..) => true,
}
}
/// Returns the `Config` this workspace is associated with.
pub fn config(&self) -> &'cfg Config {
self.config
}
pub fn profiles(&self) -> &Profiles {
match self.root_maybe() {
MaybePackage::Package(p) => p.manifest().profiles(),
MaybePackage::Virtual(vm) => vm.profiles(),
}
}
/// Returns the root path of this workspace.
///
/// That is, this returns the path of the directory containing the
/// `Cargo.toml` which is the root of this workspace.
pub fn root(&self) -> &Path {
match self.root_manifest {
Some(ref p) => p,
None => &self.current_manifest,
}
.parent()
.unwrap()
}
/// Returns the root Package or VirtualManifest.
fn root_maybe(&self) -> &MaybePackage {
let root = self
.root_manifest
.as_ref()
.unwrap_or(&self.current_manifest);
self.packages.get(root)
}
pub fn target_dir(&self) -> Filesystem {
self.target_dir
.clone()
.unwrap_or_else(|| Filesystem::new(self.root().join("target")))
}
/// Returns the root `[replace]` section of this workspace.
///
/// This may be from a virtual crate or an actual crate.
pub fn root_replace(&self) -> &[(PackageIdSpec, Dependency)] {
match self.root_maybe() {
MaybePackage::Package(p) => p.manifest().replace(),
MaybePackage::Virtual(vm) => vm.replace(),
}
}
/// Returns the root `[patch]` section of this workspace.
///
/// This may be from a virtual crate or an actual crate.
pub fn root_patch(&self) -> &HashMap<Url, Vec<Dependency>> {
match self.root_maybe() {
MaybePackage::Package(p) => p.manifest().patch(),
MaybePackage::Virtual(vm) => vm.patch(),
}
}
/// Returns an iterator over all packages in this workspace
pub fn members<'a>(&'a self) -> Members<'a, 'cfg> {
Members {
ws: self,
iter: self.members.iter(),
}
}
/// Returns an iterator over default packages in this workspace
pub fn default_members<'a>(&'a self) -> Members<'a, 'cfg> {
Members {
ws: self,
iter: self.default_members.iter(),
}
}
/// Returns true if the package is a member of the workspace.
pub fn is_member(&self, pkg: &Package) -> bool {
self.member_ids.contains(&pkg.package_id())
}
pub fn is_ephemeral(&self) -> bool {
self.is_ephemeral
}
pub fn require_optional_deps(&self) -> bool {
self.require_optional_deps
}
pub fn set_require_optional_deps(
&mut self,
require_optional_deps: bool,
) -> &mut Workspace<'cfg> {
self.require_optional_deps = require_optional_deps;
self
}
pub fn ignore_lock(&self) -> bool {
self.ignore_lock
}
pub fn set_ignore_lock(&mut self, ignore_lock: bool) -> &mut Workspace<'cfg> {
self.ignore_lock = ignore_lock;
self
}
/// Finds the root of a workspace for the crate whose manifest is located
/// at `manifest_path`.
///
/// This will parse the `Cargo.toml` at `manifest_path` and then interpret
/// the workspace configuration, optionally walking up the filesystem
/// looking for other workspace roots.
///
/// Returns an error if `manifest_path` isn't actually a valid manifest or
/// if some other transient error happens.
fn find_root(&mut self, manifest_path: &Path) -> CargoResult<Option<PathBuf>> {
fn read_root_pointer(member_manifest: &Path, root_link: &str) -> CargoResult<PathBuf> {
let path = member_manifest
.parent()
.unwrap()
.join(root_link)
.join("Cargo.toml");
debug!("find_root - pointer {}", path.display());
Ok(paths::normalize_path(&path))
};
{
let current = self.packages.load(manifest_path)?;
match *current.workspace_config() {
WorkspaceConfig::Root(_) => {
debug!("find_root - is root {}", manifest_path.display());
return Ok(Some(manifest_path.to_path_buf()));
}
WorkspaceConfig::Member {
root: Some(ref path_to_root),
} => return Ok(Some(read_root_pointer(manifest_path, path_to_root)?)),
WorkspaceConfig::Member { root: None } => {}
}
}
for path in paths::ancestors(manifest_path).skip(2) {
if path.ends_with("target/package") {
break;
}
let ances_manifest_path = path.join("Cargo.toml");
debug!("find_root - trying {}", ances_manifest_path.display());
if ances_manifest_path.exists() {
match *self.packages.load(&ances_manifest_path)?.workspace_config() {
WorkspaceConfig::Root(ref ances_root_config) => {
debug!("find_root - found a root checking exclusion");
if !ances_root_config.is_excluded(manifest_path) {
debug!("find_root - found!");
return Ok(Some(ances_manifest_path));
}
}
WorkspaceConfig::Member {
root: Some(ref path_to_root),
} => {
debug!("find_root - found pointer");
return Ok(Some(read_root_pointer(&ances_manifest_path, path_to_root)?));
}
WorkspaceConfig::Member { .. } => {}
}
}
// Don't walk across `CARGO_HOME` when we're looking for the
// workspace root. Sometimes a package will be organized with
// `CARGO_HOME` pointing inside of the workspace root or in the
// current package, but we don't want to mistakenly try to put
// crates.io crates into the workspace by accident.
if self.config.home() == path {
break;
}
}
Ok(None)
}
/// After the root of a workspace has been located, probes for all members
/// of a workspace.
///
/// If the `workspace.members` configuration is present, then this just
/// verifies that those are all valid packages to point to. Otherwise, this
/// will transitively follow all `path` dependencies looking for members of
/// the workspace.
fn find_members(&mut self) -> CargoResult<()> {
let root_manifest_path = match self.root_manifest {
Some(ref path) => path.clone(),
None => {
debug!("find_members - only me as a member");
self.members.push(self.current_manifest.clone());
self.default_members.push(self.current_manifest.clone());
if let Ok(pkg) = self.current() {
let id = pkg.package_id();
self.member_ids.insert(id);
}
return Ok(());
}
};
let members_paths;
let default_members_paths;
{
let root_package = self.packages.load(&root_manifest_path)?;
match *root_package.workspace_config() {
WorkspaceConfig::Root(ref root_config) => {
members_paths = root_config
.members_paths(root_config.members.as_ref().unwrap_or(&vec![]))?;
default_members_paths = if root_manifest_path == self.current_manifest {
if let Some(ref default) = root_config.default_members {
Some(root_config.members_paths(default)?)
} else {
None
}
} else {
None
};
}
_ => anyhow::bail!(
"root of a workspace inferred but wasn't a root: {}",
root_manifest_path.display()
),
}
}
for path in members_paths {
self.find_path_deps(&path.join("Cargo.toml"), &root_manifest_path, false)?;
}
if let Some(default) = default_members_paths {
for path in default {
let manifest_path = paths::normalize_path(&path.join("Cargo.toml"));
if !self.members.contains(&manifest_path) {
anyhow::bail!(
"package `{}` is listed in workspace’s default-members \
but is not a member.",
path.display()
)
}
self.default_members.push(manifest_path)
}
} else if self.is_virtual() {
self.default_members = self.members.clone()
} else {
self.default_members.push(self.current_manifest.clone())
}
self.find_path_deps(&root_manifest_path, &root_manifest_path, false)
}
fn find_path_deps(
&mut self,
manifest_path: &Path,
root_manifest: &Path,
is_path_dep: bool,
) -> CargoResult<()> {
let manifest_path = paths::normalize_path(manifest_path);
if self.members.contains(&manifest_path) {
return Ok(());
}
if is_path_dep
&& !manifest_path.parent().unwrap().starts_with(self.root())
&& self.find_root(&manifest_path)? != self.root_manifest
{
// If `manifest_path` is a path dependency outside of the workspace,
// don't add it, or any of its dependencies, as a members.
return Ok(());
}
if let WorkspaceConfig::Root(ref root_config) =
*self.packages.load(root_manifest)?.workspace_config()
{
if root_config.is_excluded(&manifest_path) {
return Ok(());
}
}
debug!("find_members - {}", manifest_path.display());
self.members.push(manifest_path.clone());
let candidates = {
let pkg = match *self.packages.load(&manifest_path)? {
MaybePackage::Package(ref p) => p,
MaybePackage::Virtual(_) => return Ok(()),
};
self.member_ids.insert(pkg.package_id());
pkg.dependencies()
.iter()
.map(|d| d.source_id())
.filter(|d| d.is_path())
.filter_map(|d| d.url().to_file_path().ok())
.map(|p| p.join("Cargo.toml"))
.collect::<Vec<_>>()
};
for candidate in candidates {
self.find_path_deps(&candidate, root_manifest, true)
.map_err(|err| ManifestError::new(err, manifest_path.clone()))?;
}
Ok(())
}
pub fn features(&self) -> &Features {
match self.root_maybe() {
MaybePackage::Package(p) => p.manifest().features(),
MaybePackage::Virtual(vm) => vm.features(),
}
}
/// Validates a workspace, ensuring that a number of invariants are upheld:
///
/// 1. A workspace only has one root.
/// 2. All workspace members agree on this one root as the root.
/// 3. The current crate is a member of this workspace.
fn validate(&mut self) -> CargoResult<()> {
// Validate config profiles only once per workspace.
let features = self.features();
let mut warnings = Vec::new();
self.config.profiles()?.validate(features, &mut warnings)?;
for warning in warnings {
self.config.shell().warn(&warning)?;
}
// The rest of the checks require a VirtualManifest or multiple members.
if self.root_manifest.is_none() {
return Ok(());
}
let mut roots = Vec::new();
{
let mut names = BTreeMap::new();
for member in self.members.iter() {
let package = self.packages.get(member);
match *package.workspace_config() {
WorkspaceConfig::Root(_) => {
roots.push(member.parent().unwrap().to_path_buf());
}
WorkspaceConfig::Member { .. } => {}
}
let name = match *package {
MaybePackage::Package(ref p) => p.name(),
MaybePackage::Virtual(_) => continue,
};
if let Some(prev) = names.insert(name, member) {
anyhow::bail!(
"two packages named `{}` in this workspace:\n\
- {}\n\
- {}",
name,
prev.display(),
member.display()
);
}
}
}
match roots.len() {
0 => anyhow::bail!(
"`package.workspace` configuration points to a crate \
which is not configured with [workspace]: \n\
configuration at: {}\n\
points to: {}",
self.current_manifest.display(),
self.root_manifest.as_ref().unwrap().display()
),
1 => {}
_ => {
anyhow::bail!(
"multiple workspace roots found in the same workspace:\n{}",
roots
.iter()
.map(|r| format!(" {}", r.display()))
.collect::<Vec<_>>()
.join("\n")
);
}
}
for member in self.members.clone() {
let root = self.find_root(&member)?;
if root == self.root_manifest {
continue;
}
match root {
Some(root) => {
anyhow::bail!(
"package `{}` is a member of the wrong workspace\n\
expected: {}\n\
actual: {}",
member.display(),
self.root_manifest.as_ref().unwrap().display(),
root.display()
);
}
None => {
anyhow::bail!(
"workspace member `{}` is not hierarchically below \
the workspace root `{}`",
member.display(),
self.root_manifest.as_ref().unwrap().display()
);
}
}
}
if !self.members.contains(&self.current_manifest) {
let root = self.root_manifest.as_ref().unwrap();
let root_dir = root.parent().unwrap();
let current_dir = self.current_manifest.parent().unwrap();
let root_pkg = self.packages.get(root);
// FIXME: Make this more generic by using a relative path resolver between member and
// root.
let members_msg = match current_dir.strip_prefix(root_dir) {
Ok(rel) => format!(
"this may be fixable by adding `{}` to the \
`workspace.members` array of the manifest \
located at: {}",
rel.display(),
root.display()
),
Err(_) => format!(
"this may be fixable by adding a member to \
the `workspace.members` array of the \
manifest located at: {}",
root.display()
),
};
let extra = match *root_pkg {
MaybePackage::Virtual(_) => members_msg,
MaybePackage::Package(ref p) => {
let has_members_list = match *p.manifest().workspace_config() {
WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(),
WorkspaceConfig::Member { .. } => unreachable!(),
};
if !has_members_list {
format!(
"this may be fixable by ensuring that this \
crate is depended on by the workspace \
root: {}",
root.display()
)
} else {
members_msg
}
}
};
anyhow::bail!(
"current package believes it's in a workspace when it's not:\n\
current: {}\n\
workspace: {}\n\n{}\n\
Alternatively, to keep it out of the workspace, add the package \
to the `workspace.exclude` array, or add an empty `[workspace]` \
table to the package's manifest.",
self.current_manifest.display(),
root.display(),
extra
);
}
if let Some(ref root_manifest) = self.root_manifest {
for pkg in self
.members()
.filter(|p| p.manifest_path() != root_manifest)
{
let manifest = pkg.manifest();
let emit_warning = |what| -> CargoResult<()> {
let msg = format!(
"{} for the non root package will be ignored, \
specify {} at the workspace root:\n\
package: {}\n\
workspace: {}",
what,
what,
pkg.manifest_path().display(),
root_manifest.display(),
);
self.config.shell().warn(&msg)
};
if manifest.original().has_profiles() {
emit_warning("profiles")?;
}
if !manifest.replace().is_empty() {
emit_warning("replace")?;
}
if !manifest.patch().is_empty() {
emit_warning("patch")?;
}
}
}
Ok(())
}
pub fn load(&self, manifest_path: &Path) -> CargoResult<Package> {
match self.packages.maybe_get(manifest_path) {
Some(&MaybePackage::Package(ref p)) => return Ok(p.clone()),
Some(&MaybePackage::Virtual(_)) => anyhow::bail!("cannot load workspace root"),
None => {}
}
let mut loaded = self.loaded_packages.borrow_mut();
if let Some(p) = loaded.get(manifest_path).cloned() {
return Ok(p);
}
let source_id = SourceId::for_path(manifest_path.parent().unwrap())?;
let (package, _nested_paths) = ops::read_package(manifest_path, source_id, self.config)?;
loaded.insert(manifest_path.to_path_buf(), package.clone());
Ok(package)
}
/// Preload the provided registry with already loaded packages.
///
/// A workspace may load packages during construction/parsing/early phases
/// for various operations, and this preload step avoids doubly-loading and
/// parsing crates on the filesystem by inserting them all into the registry
/// with their in-memory formats.
pub fn preload(&self, registry: &mut PackageRegistry<'cfg>) {
// These can get weird as this generally represents a workspace during
// `cargo install`. Things like git repositories will actually have a
// `PathSource` with multiple entries in it, so the logic below is
// mostly just an optimization for normal `cargo build` in workspaces
// during development.
if self.is_ephemeral {
return;
}
for pkg in self.packages.packages.values() {
let pkg = match *pkg {
MaybePackage::Package(ref p) => p.clone(),
MaybePackage::Virtual(_) => continue,
};
let mut src = PathSource::new(
pkg.manifest_path(),
pkg.package_id().source_id(),
self.config,
);
src.preload_with(pkg);
registry.add_preloaded(Box::new(src));
}
}
pub fn emit_warnings(&self) -> CargoResult<()> {
for (path, maybe_pkg) in &self.packages.packages {
let warnings = match maybe_pkg {
MaybePackage::Package(pkg) => pkg.manifest().warnings().warnings(),
MaybePackage::Virtual(vm) => vm.warnings().warnings(),
};
let path = path.join("Cargo.toml");
for warning in warnings {
if warning.is_critical {
let err = anyhow::format_err!("{}", warning.message);
let cx =
anyhow::format_err!("failed to parse manifest at `{}`", path.display());
return Err(err.context(cx).into());
} else {
let msg = if self.root_manifest.is_none() {
warning.message.to_string()
} else {
// In a workspace, it can be confusing where a warning
// originated, so include the path.
format!("{}: {}", path.display(), warning.message)
};
self.config.shell().warn(msg)?
}
}
}
Ok(())
}
}
impl<'cfg> Packages<'cfg> {
fn get(&self, manifest_path: &Path) -> &MaybePackage {
self.maybe_get(manifest_path).unwrap()
}
fn maybe_get(&self, manifest_path: &Path) -> Option<&MaybePackage> {
self.packages.get(manifest_path.parent().unwrap())
}
fn load(&mut self, manifest_path: &Path) -> CargoResult<&MaybePackage> {
let key = manifest_path.parent().unwrap();
match self.packages.entry(key.to_path_buf()) {
Entry::Occupied(e) => Ok(e.into_mut()),
Entry::Vacant(v) => {
let source_id = SourceId::for_path(key)?;
let (manifest, _nested_paths) =
read_manifest(manifest_path, source_id, self.config)?;
Ok(v.insert(match manifest {
EitherManifest::Real(manifest) => {
MaybePackage::Package(Package::new(manifest, manifest_path))
}
EitherManifest::Virtual(vm) => MaybePackage::Virtual(vm),
}))
}
}
}
}
impl<'a, 'cfg> Iterator for Members<'a, 'cfg> {
type Item = &'a Package;
fn next(&mut self) -> Option<&'a Package> {
loop {
let next = self.iter.next().map(|path| self.ws.packages.get(path));
match next {
Some(&MaybePackage::Package(ref p)) => return Some(p),
Some(&MaybePackage::Virtual(_)) => {}
None => return None,
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (_, upper) = self.iter.size_hint();
(0, upper)
}
}
impl MaybePackage {
fn workspace_config(&self) -> &WorkspaceConfig {
match *self {
MaybePackage::Package(ref p) => p.manifest().workspace_config(),
MaybePackage::Virtual(ref vm) => vm.workspace_config(),
}
}
}
impl WorkspaceRootConfig {
/// Creates a new Intermediate Workspace Root configuration.
pub fn new(
root_dir: &Path,
members: &Option<Vec<String>>,
default_members: &Option<Vec<String>>,
exclude: &Option<Vec<String>>,
) -> WorkspaceRootConfig {
WorkspaceRootConfig {
root_dir: root_dir.to_path_buf(),
members: members.clone(),
default_members: default_members.clone(),
exclude: exclude.clone().unwrap_or_default(),
}
}
/// Checks the path against the `excluded` list.
///
/// This method does **not** consider the `members` list.
fn is_excluded(&self, manifest_path: &Path) -> bool {
let excluded = self
.exclude
.iter()
.any(|ex| manifest_path.starts_with(self.root_dir.join(ex)));
let explicit_member = match self.members {
Some(ref members) => members
.iter()
.any(|mem| manifest_path.starts_with(self.root_dir.join(mem))),
None => false,
};
!explicit_member && excluded
}
fn has_members_list(&self) -> bool {
self.members.is_some()
}
fn members_paths(&self, globs: &[String]) -> CargoResult<Vec<PathBuf>> {
let mut expanded_list = Vec::new();
for glob in globs {
let pathbuf = self.root_dir.join(glob);
let expanded_paths = Self::expand_member_path(&pathbuf)?;
// If glob does not find any valid paths, then put the original
// path in the expanded list to maintain backwards compatibility.
if expanded_paths.is_empty() {
expanded_list.push(pathbuf);
} else {
expanded_list.extend(expanded_paths);
}
}
Ok(expanded_list)
}
fn expand_member_path(path: &Path) -> CargoResult<Vec<PathBuf>> {
let path = match path.to_str() {
Some(p) => p,
None => return Ok(Vec::new()),
};
let res =
glob(path).chain_err(|| anyhow::format_err!("could not parse pattern `{}`", &path))?;
let res = res
.map(|p| {
p.chain_err(|| anyhow::format_err!("unable to match path to pattern `{}`", &path))
})
.collect::<Result<Vec<_>, _>>()?;
Ok(res)
}
}
| 37.344176 | 99 | 0.539099 |
48d8c8e50b7602a03ea9352bd78c24a18e17fb09 | 704 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_must_use)]
// ignore-emscripten no threads support
// pretty-expanded FIXME #23616
use std::thread;
pub fn main() {
thread::spawn(move|| child("Hello".to_string()) ).join();
}
fn child(_s: String) {
}
| 28.16 | 68 | 0.71733 |
0e556790586feb250cb791236dde1f983e73d059 | 2,141 | //! # Highway
//!
//! The core logic of Casper' Highway consensus protocol.
//!
//! At the center of Highway are:
//! * the _protocol state_, a grow-only data structure which can be considered a directed acyclic
//! graph (DAG), and needs to be continually synchronized among the participating nodes,
//! * rules for the active participants — the _validators_ — to create and add new vertices, and
//! * a finality detector that provides a criterion to consider a block "finalized". Finalized
//! blocks are guaranteed to remain finalized as the DAG grows further, unless too many validators
//! are malicious.
//!
//! It's not a complete protocol. To implement permissioned consensus, several components must be
//! added:
//! * Networking, serialization and cryptographic primitives for signing and hashing.
//! * A _synchronizer_ that exchanges messages with other participating nodes to exchange their DAG
//! vertices and ensure that each vertex becomes eventually known to every node.
//! * Semantics for the consensus values, which can e.g. represent token transfers, or programs to
//! be executed in a virtual machine for a smart contract platform.
//! * Signing of finalized blocks, as a finality proof to third parties/clients.
//!
//! Note that consensus values should be small. If they represent a lot of data, e.g. lists of
//! complex transactions, they should not be passed into `highway_core` directly. Instead, the
//! consensus value should be the list's hash.
//!
//! Permissioned consensus protocols can also be used in a _permissionless_ Proof-of-Stake context,
//! or with some other governance system that can add and remove validators, by starting a new
//! protocol instance whenever the set of validators changes.
// This needs to come before the other modules, so the macros are available everywhere.
#[cfg(test)]
#[macro_use]
mod test_macros;
pub(crate) mod active_validator;
pub(crate) mod finality_detector;
pub(crate) mod highway;
pub(crate) mod state;
pub(crate) mod validators;
mod endorsement;
mod evidence;
#[cfg(test)]
pub(crate) mod highway_testing;
pub(crate) use state::{State, Weight};
| 45.553191 | 100 | 0.754787 |
f41c8c7eafe717a40242dedea7c4929bcf8568e2 | 20,221 | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::build::{
build_constant_value, build_type_annotation, build_variable_definitions, ValidationLevel,
};
use crate::constants::ARGUMENT_DEFINITION;
use crate::errors::{ValidationMessage, ValidationMessageWithData};
use crate::ir::{ConstantValue, VariableDefinition};
use crate::{associated_data_impl, build_directive};
use common::{
Diagnostic, DiagnosticsResult, FeatureFlag, Location, SourceLocationKey, WithLocation,
};
use errors::{par_try_map, try2};
use intern::string_key::{Intern, StringKey, StringKeyMap};
use lazy_static::lazy_static;
use schema::suggestion_list::GraphQLSuggestions;
use schema::{SDLSchema, Schema, Type, TypeReference};
use std::collections::HashMap;
lazy_static! {
static ref TYPE: StringKey = "type".intern();
static ref DEFAULT_VALUE: StringKey = "defaultValue".intern();
static ref PROVIDER: StringKey = "provider".intern();
pub static ref UNUSED_LOCAL_VARIABLE_DEPRECATED: StringKey =
"unusedLocalVariable_DEPRECATED".intern();
static ref DIRECTIVES: StringKey = "directives".intern();
}
pub type FragmentSignatures = StringKeyMap<FragmentSignature>;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ProvidedVariableMetadata {
pub module_name: StringKey,
pub original_variable_name: StringKey,
}
associated_data_impl!(ProvidedVariableMetadata);
/// Describes the public API of a fragment, excluding its selections.
/// When translating ASTs to IR, fragment spread arguments must be
/// checked against the corresponding fragment definition. However,
/// this process can't use the IR fragment definition, since that
/// would depend on having checked its body! Since recursive fragments
/// are allowed, we break the cycle by first computing signatures
/// and using these to type check fragment spreads in selections.
#[derive(Debug, Eq, PartialEq)]
pub struct FragmentSignature {
pub name: WithLocation<StringKey>,
pub variable_definitions: Vec<VariableDefinition>,
pub type_condition: Type,
}
pub fn build_signatures(
schema: &SDLSchema,
definitions: &[graphql_syntax::ExecutableDefinition],
enable_provided_variables: &FeatureFlag,
) -> DiagnosticsResult<FragmentSignatures> {
let suggestions = GraphQLSuggestions::new(schema);
let mut seen_signatures: StringKeyMap<FragmentSignature> =
HashMap::with_capacity_and_hasher(definitions.len(), Default::default());
let signatures = par_try_map(definitions, |definition| match definition {
graphql_syntax::ExecutableDefinition::Fragment(fragment) => Ok(Some(
build_fragment_signature(schema, fragment, &suggestions, enable_provided_variables)?,
)),
graphql_syntax::ExecutableDefinition::Operation(_) => Ok(None),
})?;
let mut errors = Vec::new();
for signature in signatures.into_iter().flatten() {
let previous_signature = seen_signatures.get(&signature.name.item);
if let Some(previous_signature) = previous_signature {
errors.push(
Diagnostic::error(
ValidationMessage::DuplicateDefinition(signature.name.item),
previous_signature.name.location,
)
.annotate("also defined here", signature.name.location),
);
continue;
}
seen_signatures.insert(signature.name.item, signature);
}
if errors.is_empty() {
Ok(seen_signatures)
} else {
Err(errors)
}
}
fn build_fragment_signature(
schema: &SDLSchema,
fragment: &graphql_syntax::FragmentDefinition,
suggestions: &GraphQLSuggestions<'_>,
enable_provided_variables: &FeatureFlag,
) -> DiagnosticsResult<FragmentSignature> {
let type_name = fragment.type_condition.type_.value;
let type_condition = match schema.get_type(type_name) {
Some(type_condition) => match type_condition {
Type::Interface(..) | Type::Object(..) | Type::Union(..) => Ok(type_condition),
_ => Err(Diagnostic::error(
ValidationMessage::ExpectedCompositeType(type_condition),
fragment
.location
.with_span(fragment.type_condition.type_.span),
)
.into()),
},
None => Err(Diagnostic::error_with_data(
ValidationMessageWithData::UnknownType {
type_name,
suggestions: suggestions.composite_type_suggestions(type_name),
},
fragment
.location
.with_span(fragment.type_condition.type_.span),
)
.into()),
};
let argument_definition_directives = fragment
.directives
.iter()
.filter(|x| x.name.value == *ARGUMENT_DEFINITION)
.collect::<Vec<_>>();
if fragment.variable_definitions.is_some() && !argument_definition_directives.is_empty() {
return Err(Diagnostic::error(
ValidationMessage::VariableDefinitionsAndArgumentDirective(),
fragment
.location
.with_span(argument_definition_directives[0].span),
)
.annotate(
"variables are previously defined here",
fragment.location.with_span(
fragment
.variable_definitions
.as_ref()
.map(|list| list.span)
.unwrap(),
),
)
.into());
} else if argument_definition_directives.len() > 1 {
return Err(Diagnostic::error(
ValidationMessage::ExpectedOneArgumentDefinitionsDirective(),
fragment
.location
.with_span(argument_definition_directives[1].span),
)
.into());
}
let variable_definitions = fragment
.variable_definitions
.as_ref()
.map(|variable_definitions| {
build_variable_definitions(schema, &variable_definitions.items, fragment.location)
})
.or_else(|| {
argument_definition_directives.get(0).map(|x| {
build_fragment_variable_definitions(schema, fragment, x, enable_provided_variables)
})
})
.unwrap_or_else(|| Ok(Default::default()));
let (type_condition, variable_definitions) = try2(type_condition, variable_definitions)?;
Ok(FragmentSignature {
name: fragment
.name
.name_with_location(fragment.location.source_location()),
type_condition,
variable_definitions,
})
}
fn build_fragment_variable_definitions(
schema: &SDLSchema,
fragment: &graphql_syntax::FragmentDefinition,
directive: &graphql_syntax::Directive,
enable_provided_variables: &FeatureFlag,
) -> DiagnosticsResult<Vec<VariableDefinition>> {
if let Some(arguments) = &directive.arguments {
Ok(arguments
.items
.iter()
.map(|variable_arg| {
if let graphql_syntax::Value::Constant(graphql_syntax::ConstantValue::Object(
object,
)) = &variable_arg.value
{
let mut type_arg = None;
let mut default_arg = None;
let mut unused_local_variable_arg = None;
let mut provider_arg = None;
let mut directives_arg = None;
let mut extra_items = Vec::new();
for item in &object.items {
let name = item.name.value;
if name == *TYPE {
type_arg = Some(item);
} else if name == *DEFAULT_VALUE {
default_arg = Some(item);
} else if name == *UNUSED_LOCAL_VARIABLE_DEPRECATED {
unused_local_variable_arg = Some(item);
} else if name == *DIRECTIVES {
directives_arg = Some(item);
} else if name == *PROVIDER {
if !enable_provided_variables.is_enabled_for(fragment.name.value) {
return Err(vec![Diagnostic::error(
format!("Invalid usage of provided variable: this feature is gated and currently set to {}",
enable_provided_variables),
fragment.location.with_span(item.span),
)]);
}
provider_arg = Some(item);
} else {
extra_items.push(item);
}
}
// Check that no extraneous keys were supplied
if !extra_items.is_empty() {
return Err(extra_items
.iter()
.map(|item| {
Diagnostic::error(
ValidationMessage::InvalidArgumentDefinitionsKey(
item.name.value,
),
fragment.location.with_span(item.span),
)
})
.collect());
}
let variable_name = &variable_arg.name;
let mut directives = Vec::new();
// Convert variable type, validate that it's an input type
let type_ = get_argument_type(schema, fragment.location, type_arg, object)?;
if !type_.inner().is_input_type() {
return Err(Diagnostic::error(
ValidationMessage::ExpectedFragmentArgumentToHaveInputType(
schema.get_type_name(type_.inner()),
),
fragment.location.with_span(variable_arg.value.span()),
)
.into());
}
if let Some(unused_local_variable_arg) = unused_local_variable_arg {
if !matches!(
unused_local_variable_arg,
graphql_syntax::ConstantArgument {
value: graphql_syntax::ConstantValue::Boolean(
graphql_syntax::BooleanNode { value: true, .. }
),
..
}
) {
return Err(vec![Diagnostic::error(
ValidationMessage::InvalidUnusedFragmentVariableSuppressionArg,
fragment
.location
.with_span(unused_local_variable_arg.value.span()),
)]);
}
directives.push(crate::Directive {
name: WithLocation::new(
fragment.location.with_span(unused_local_variable_arg.span),
*UNUSED_LOCAL_VARIABLE_DEPRECATED,
),
arguments: Vec::new(),
data: None,
});
}
if let Some(provider_arg) = provider_arg {
let provider_module_name = provider_arg.value.get_string_literal().ok_or_else(|| {
vec![Diagnostic::error(
ValidationMessage::LiteralStringArgumentExpectedForDirective{arg_name: *PROVIDER, directive_name: *ARGUMENT_DEFINITION },
fragment
.location
.with_span(provider_arg.value.span()),
)]
})?;
if let Some(default_arg_) = default_arg {
return Err(vec![Diagnostic::error(
ValidationMessage::ProvidedVariableIncompatibleWithDefaultValue{argument_name: variable_name.value},
fragment
.location
.with_span(provider_arg.span),
).annotate("Default value declared here",
fragment
.location
.with_span(default_arg_.span))]);
}
directives.push(crate::Directive {
name: WithLocation::new(
fragment.location.with_span(provider_arg.span),
ProvidedVariableMetadata::directive_name(),
),
arguments: Vec::new(),
data: Some(Box::new(ProvidedVariableMetadata{
module_name: provider_module_name,
original_variable_name: variable_name.value
})),
});
}
if let Some(directives_arg) = directives_arg {
if let graphql_syntax::ConstantValue::List(items) = &directives_arg.value {
for item in &items.items {
if let graphql_syntax::ConstantValue::String(directive_string) = item {
let ast_directive = graphql_syntax::parse_directive(
directive_string.value.lookup(),
// We currently don't have the ability to pass offset locations
// to the parser call, so we first use a generated location and
// later override it with an approximation.
SourceLocationKey::generated(),
)
.map_err(|mut diagnostics| {
for diagnostic in &mut diagnostics {
diagnostic.override_location(fragment.location.with_span(directive_string.token.span));
}
diagnostics
})?;
let directive = build_directive(
schema,
&ast_directive,
graphql_syntax::DirectiveLocation::VariableDefinition,
// We currently don't have the ability to pass offset locations
// to the parser call, so we first use a generated location and
// later override it with an approximation.
Location::generated(),
)
.map_err(|mut diagnostics| {
for diagnostic in &mut diagnostics {
diagnostic.override_location(fragment.location.with_span(directive_string.token.span));
}
diagnostics
})?;
directives.push(directive);
} else {
return Err(vec![Diagnostic::error(
ValidationMessage::ArgumentDefinitionsDirectivesNotStringListLiteral,
fragment.location.with_span(item.span()),
)]);
}
}
} else {
return Err(vec![Diagnostic::error(
ValidationMessage::ArgumentDefinitionsDirectivesNotStringListLiteral,
fragment.location.with_span(directives_arg.value.span()),
)]);
}
}
let default_value =
get_default_value(schema, fragment.location, default_arg, &type_)?;
Ok(VariableDefinition {
name: variable_name
.name_with_location(fragment.location.source_location()),
type_,
directives,
default_value,
})
} else {
Err(Diagnostic::error(
ValidationMessage::ExpectedArgumentDefinitionToBeObject(),
fragment.location.with_span(variable_arg.value.span()),
)
.into())
}
})
.collect::<DiagnosticsResult<Vec<VariableDefinition>>>()?)
} else {
Ok(Default::default())
}
}
fn get_argument_type(
schema: &SDLSchema,
location: Location,
type_arg: Option<&graphql_syntax::ConstantArgument>,
object: &graphql_syntax::List<graphql_syntax::ConstantArgument>,
) -> DiagnosticsResult<TypeReference> {
let type_name_and_span = match type_arg {
Some(graphql_syntax::ConstantArgument {
value: graphql_syntax::ConstantValue::String(type_name_node),
span,
..
}) => Some((type_name_node.value, span)),
Some(graphql_syntax::ConstantArgument {
value: graphql_syntax::ConstantValue::Enum(type_name_node),
span,
..
}) => Some((type_name_node.value, span)),
_ => None,
};
if let Some((type_name, &span)) = type_name_and_span {
let type_ast = graphql_syntax::parse_type(type_name.lookup(), location.source_location())
.map_err(|diagnostics| {
diagnostics
.into_iter()
.map(|diagnostic| {
let message = diagnostic.message().to_string();
Diagnostic::error(
message,
// TODO: ideally, `parse_type()` would take in the offset
// location and report the error at the right location.
location.with_span(span),
)
})
.collect::<Vec<_>>()
})?;
let type_ = build_type_annotation(schema, &type_ast, location)?;
Ok(type_)
} else {
Err(Diagnostic::error(
ValidationMessage::ExpectedArgumentDefinitionLiteralType(),
location.with_span(type_arg.map_or(object.span, |x| x.span)),
)
.into())
}
}
fn get_default_value(
schema: &SDLSchema,
location: Location,
default_arg: Option<&graphql_syntax::ConstantArgument>,
type_: &TypeReference,
) -> DiagnosticsResult<Option<WithLocation<ConstantValue>>> {
default_arg
.map(|x| {
let constant_value_span = x.value.span();
build_constant_value(schema, &x.value, type_, location, ValidationLevel::Strict).map(
|constant_value| {
WithLocation::from_span(
location.source_location(),
constant_value_span,
constant_value,
)
},
)
})
.transpose()
}
| 44.83592 | 153 | 0.505564 |
1d74778392002e35df9354dd0eff386353b8ca03 | 3,468 | mod dir;
mod file;
pub mod sched;
pub mod stdio;
use std::future::Future;
use std::path::Path;
pub use wasi_cap_std_sync::{clocks_ctx, random_ctx};
use wasi_common::{Error, Table, WasiCtx, WasiFile};
pub use dir::Dir;
pub use file::File;
use crate::sched::sched_ctx;
pub struct WasiCtxBuilder(WasiCtx);
impl WasiCtxBuilder {
pub fn new() -> Self {
WasiCtxBuilder(WasiCtx::new(
random_ctx(),
clocks_ctx(),
sched_ctx(),
Table::new(),
))
}
pub fn env(mut self, var: &str, value: &str) -> Result<Self, wasi_common::StringArrayError> {
self.0.push_env(var, value)?;
Ok(self)
}
pub fn envs(mut self, env: &[(String, String)]) -> Result<Self, wasi_common::StringArrayError> {
for (k, v) in env {
self.0.push_env(k, v)?;
}
Ok(self)
}
pub fn inherit_env(mut self) -> Result<Self, wasi_common::StringArrayError> {
for (key, value) in std::env::vars() {
self.0.push_env(&key, &value)?;
}
Ok(self)
}
pub fn arg(mut self, arg: &str) -> Result<Self, wasi_common::StringArrayError> {
self.0.push_arg(arg)?;
Ok(self)
}
pub fn args(mut self, arg: &[String]) -> Result<Self, wasi_common::StringArrayError> {
for a in arg {
self.0.push_arg(&a)?;
}
Ok(self)
}
pub fn inherit_args(mut self) -> Result<Self, wasi_common::StringArrayError> {
for arg in std::env::args() {
self.0.push_arg(&arg)?;
}
Ok(self)
}
pub fn stdin(mut self, f: Box<dyn WasiFile>) -> Self {
self.0.set_stdin(f);
self
}
pub fn stdout(mut self, f: Box<dyn WasiFile>) -> Self {
self.0.set_stdout(f);
self
}
pub fn stderr(mut self, f: Box<dyn WasiFile>) -> Self {
self.0.set_stderr(f);
self
}
pub fn inherit_stdin(self) -> Self {
self.stdin(Box::new(crate::stdio::stdin()))
}
pub fn inherit_stdout(self) -> Self {
self.stdout(Box::new(crate::stdio::stdout()))
}
pub fn inherit_stderr(self) -> Self {
self.stderr(Box::new(crate::stdio::stderr()))
}
pub fn inherit_stdio(self) -> Self {
self.inherit_stdin().inherit_stdout().inherit_stderr()
}
pub fn preopened_dir(
mut self,
dir: cap_std::fs::Dir,
guest_path: impl AsRef<Path>,
) -> Result<Self, Error> {
let dir = Box::new(crate::dir::Dir::from_cap_std(dir));
self.0.push_preopened_dir(dir, guest_path)?;
Ok(self)
}
pub fn build(self) -> WasiCtx {
self.0
}
}
// Much of this crate is implemented in terms of `async` methods from the
// wasi-cap-std-sync crate. These methods may be async in signature, however,
// they are synchronous in implementation (always Poll::Ready on first poll)
// and perform blocking syscalls.
//
// This function takes this blocking code and executes it using a dummy executor
// to assert its immediate readiness. We tell tokio this is a blocking operation
// with the block_in_place function.
pub(crate) fn block_on_dummy_executor<'a, F, Fut, T>(f: F) -> Result<T, Error>
where
F: FnOnce() -> Fut + Send + 'a,
Fut: Future<Output = Result<T, Error>>,
T: Send + 'static,
{
tokio::task::block_in_place(move || {
wiggle::run_in_dummy_executor(f()).expect("wrapped operation should be synchronous")
})
}
| 30.156522 | 100 | 0.592849 |
033d7d60c771648ac08d777fbd26b0c59ef98436 | 5,131 | // Copyright 2019 Karl Sundequist Blomdahl <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::slice;
use super::tensor::Tensor;
use super::Error;
use dg_cuda::cudnn::DataType;
use dg_utils::types::f16;
use dg_utils::json::{JsonKey, JsonToken, JsonStream};
use dg_utils::b85;
/// Load all tensors in the given buffer and returns a map from
/// their name to description. If we failed to load any tensors
/// from the given file then `None` is returned.
///
/// # Arguments
///
/// * `path` -
///
fn load_aux<R: Read>(reader: R) -> Result<HashMap<String, Tensor>, Error> {
let mut out: HashMap<String, Tensor> = HashMap::new();
for entry in JsonStream::new(reader) {
match (&entry.stack()[..], entry.token()) {
([], JsonToken::ObjectStart) => {},
([], JsonToken::ObjectEnd) => {},
([JsonKey::Object(name)], JsonToken::ObjectStart) => {
out.insert(name.clone(), Tensor::default());
},
([JsonKey::Object(_)], JsonToken::ObjectEnd) => {},
([JsonKey::Object(_)], JsonToken::StringPtr { ptr: _, len: _ }) => {},
([JsonKey::Object(name), JsonKey::Object(attribute)], JsonToken::StringPtr { ptr, len }) => {
let value = unsafe { slice::from_raw_parts(*ptr, *len) };
let tensor = out.get_mut(name).expect("could not get tensor");
if attribute == "s" {
if let Some(parsed_value) = b85::decode::<f32, f32>(&value) {
tensor.set_scale(parsed_value[0]);
} else {
return Err(Error::MalformedWeights);
}
} else if attribute == "t" {
let str_data_type = ::std::str::from_utf8(value).map_err(|_| Error::MalformedWeights)?;
tensor.set_data_type(match str_data_type {
"i1" => DataType::Int8,
"i4" => DataType::Int32,
"f2" => DataType::Half,
"f4" => DataType::Float,
_ => { return Err(Error::MalformedWeights) }
});
} else if attribute == "v" {
macro_rules! decode_as_and_set_host {
($dtype:ty) => {{
let array = b85::decode::<$dtype, $dtype>(&value).ok_or(Error::MalformedWeights);
if let Err(reason) = array.and_then(|h| tensor.set_host(h)) {
return Err(reason);
}
}};
}
match tensor.data_type() {
DataType::Int8 => decode_as_and_set_host!(i8),
DataType::Int32 => decode_as_and_set_host!(i32),
DataType::Half => decode_as_and_set_host!(f16),
DataType::Float => decode_as_and_set_host!(f32),
_ => unreachable!()
};
} else {
return Err(Error::MalformedWeights);
}
}
_ => { return Err(Error::MalformedWeights) }
}
}
// an empty result-set is an error
if out.is_empty() {
Err(Error::MissingWeights)
} else {
Ok(out)
}
}
/// Load all tensors in the given file and returns a map from
/// their name to description. If we failed to load any tensors
/// from the given file then `None` is returned.
///
/// # Arguments
///
/// * `path` -
///
pub fn load(path: &Path) -> Result<HashMap<String, Tensor>, Error> {
if let Ok(file) = File::open(path) {
load_aux(file)
} else {
Err(Error::MissingWeights)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[test]
fn empty_json() {
let out = load_aux(Cursor::new(""));
assert!(out.is_err());
}
#[test]
fn load_json() {
let out = load_aux(Cursor::new("{\"11v_value/linear_2/offset:0\": {\"s\": \"(^d>V\", \"t\": \"f2\", \"v\": \"(^d>V\"}}"));
assert!(out.is_ok());
// verify internal values
let out = out.unwrap();
assert_eq!(out.len(), 1, "{:?}", out.keys().map(|x| x.clone()).collect::<Vec<String>>());
assert_eq!(out["11v_value/linear_2/offset:0"].scale(), 0.13704996);
assert_eq!(out["11v_value/linear_2/offset:0"].size_in_bytes(), 4);
}
}
| 35.881119 | 130 | 0.529137 |
2181bde65ad9ee19ef9e8dde81c279576274ba35 | 37,353 | use crate::{
cli::{
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult, SignerIndex,
},
cli_output::CliNonceAccount,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{
input_parsers::*, input_validators::*, offline::BLOCKHASH_ARG, ArgConstant,
};
use solana_client::rpc_client::RpcClient;
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
use solana_sdk::{
account::Account,
account_utils::StateMut,
hash::Hash,
message::Message,
nonce::{
self,
state::{Data, Versions},
State,
},
pubkey::Pubkey,
system_instruction::{
advance_nonce_account, authorize_nonce_account, create_nonce_account,
create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError,
},
system_program,
transaction::Transaction,
};
use std::sync::Arc;
use thiserror::Error;
#[derive(Debug, Error, PartialEq)]
pub enum CliNonceError {
#[error("invalid account owner")]
InvalidAccountOwner,
#[error("invalid account data")]
InvalidAccountData,
#[error("unexpected account data size")]
UnexpectedDataSize,
#[error("query hash does not match stored hash")]
InvalidHash,
#[error("query authority does not match account authority")]
InvalidAuthority,
#[error("invalid state for requested operation")]
InvalidStateForOperation,
#[error("client error: {0}")]
Client(String),
}
pub const NONCE_ARG: ArgConstant<'static> = ArgConstant {
name: "nonce",
long: "nonce",
help: "Provide the nonce account to use when creating a nonced \n\
transaction. Nonced transactions are useful when a transaction \n\
requires a lengthy signing process. Learn more about nonced \n\
transactions at https://docs.solana.com/offline-signing/durable-nonce",
};
pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant {
name: "nonce_authority",
long: "nonce-authority",
help: "Provide the nonce authority keypair to use when signing a nonced transaction",
};
pub trait NonceSubCommands {
fn nonce_subcommands(self) -> Self;
}
pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires(BLOCKHASH_ARG.name)
.validator(is_valid_pubkey)
.help(NONCE_ARG.help)
}
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_valid_signer)
.help(NONCE_AUTHORITY_ARG.help)
}
impl NonceSubCommands for App<'_, '_> {
fn nonce_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("authorize-nonce-account")
.about("Assign account authority to a new entity")
.arg(
pubkey!(Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT_ADDRESS")
.required(true),
"Address of the nonce account. "),
)
.arg(
pubkey!(Arg::with_name("new_authority")
.index(2)
.value_name("AUTHORITY_PUBKEY")
.required(true),
"Account to be granted authority of the nonce account. "),
)
.arg(nonce_authority_arg()),
)
.subcommand(
SubCommand::with_name("create-nonce-account")
.about("Create a nonce account")
.arg(
Arg::with_name("nonce_account_keypair")
.index(1)
.value_name("ACCOUNT_KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_valid_signer)
.help("Keypair of the nonce account to fund"),
)
.arg(
Arg::with_name("amount")
.index(2)
.value_name("AMOUNT")
.takes_value(true)
.required(true)
.validator(is_amount)
.help("The amount to load the nonce account with, in SOL"),
)
.arg(
pubkey!(Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.value_name("PUBKEY"),
"Assign noncing authority to another entity. "),
)
.arg(
Arg::with_name("seed")
.long("seed")
.value_name("STRING")
.takes_value(true)
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
),
)
.subcommand(
SubCommand::with_name("nonce")
.about("Get the current nonce value")
.alias("get-nonce")
.arg(
pubkey!(Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT_ADDRESS")
.required(true),
"Address of the nonce account to display. "),
),
)
.subcommand(
SubCommand::with_name("new-nonce")
.about("Generate a new nonce, rendering the existing nonce useless")
.arg(
pubkey!(Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT_ADDRESS")
.required(true),
"Address of the nonce account. "),
)
.arg(nonce_authority_arg()),
)
.subcommand(
SubCommand::with_name("nonce-account")
.about("Show the contents of a nonce account")
.alias("show-nonce-account")
.arg(
pubkey!(Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT_ADDRESS")
.required(true),
"Address of the nonce account to display. "),
)
.arg(
Arg::with_name("lamports")
.long("lamports")
.takes_value(false)
.help("Display balance in lamports instead of SOL"),
),
)
.subcommand(
SubCommand::with_name("withdraw-from-nonce-account")
.about("Withdraw SOL from the nonce account")
.arg(
pubkey!(Arg::with_name("nonce_account_pubkey")
.index(1)
.value_name("NONCE_ACCOUNT_ADDRESS")
.required(true),
"Nonce account to withdraw from. "),
)
.arg(
pubkey!(Arg::with_name("destination_account_pubkey")
.index(2)
.value_name("RECIPIENT_ADDRESS")
.required(true),
"The account to which the SOL should be transferred. "),
)
.arg(
Arg::with_name("amount")
.index(3)
.value_name("AMOUNT")
.takes_value(true)
.required(true)
.validator(is_amount)
.help("The amount to withdraw from the nonce account, in SOL"),
)
.arg(nonce_authority_arg()),
)
}
}
pub fn get_account(
rpc_client: &RpcClient,
nonce_pubkey: &Pubkey,
) -> Result<Account, CliNonceError> {
rpc_client
.get_account(nonce_pubkey)
.map_err(|e| CliNonceError::Client(format!("{}", e)))
.and_then(|a| match account_identity_ok(&a) {
Ok(()) => Ok(a),
Err(e) => Err(e),
})
}
pub fn account_identity_ok(account: &Account) -> Result<(), CliNonceError> {
if account.owner != system_program::id() {
Err(CliNonceError::InvalidAccountOwner)
} else if account.data.is_empty() {
Err(CliNonceError::UnexpectedDataSize)
} else {
Ok(())
}
}
pub fn state_from_account(account: &Account) -> Result<State, CliNonceError> {
account_identity_ok(account)?;
StateMut::<Versions>::state(account)
.map_err(|_| CliNonceError::InvalidAccountData)
.map(|v| v.convert_to_current())
}
pub fn data_from_account(account: &Account) -> Result<Data, CliNonceError> {
account_identity_ok(account)?;
state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone()))
}
pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
match state {
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation),
State::Initialized(data) => Ok(data),
}
}
pub fn parse_authorize_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, nonce_authority],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
new_authority,
},
signers: signer_info.signers,
})
}
pub fn parse_nonce_create_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let (nonce_account, nonce_account_pubkey) =
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
let seed = matches.value_of("seed").map(|s| s.to_string());
let lamports = lamports_of_sol(matches, "amount").unwrap();
let nonce_authority = pubkey_of_signer(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, nonce_account],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: signer_info.index_of(nonce_account_pubkey).unwrap(),
seed,
nonce_authority,
lamports,
},
signers: signer_info.signers,
})
}
pub fn parse_get_nonce(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
Ok(CliCommandInfo {
command: CliCommand::GetNonce(nonce_account_pubkey),
signers: vec![],
})
}
pub fn parse_new_nonce(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, nonce_authority],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
},
signers: signer_info.signers,
})
}
pub fn parse_show_nonce_account(
matches: &ArgMatches<'_>,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account_pubkey =
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let use_lamports_unit = matches.is_present("lamports");
Ok(CliCommandInfo {
command: CliCommand::ShowNonceAccount {
nonce_account_pubkey,
use_lamports_unit,
},
signers: vec![],
})
}
pub fn parse_withdraw_from_nonce_account(
matches: &ArgMatches<'_>,
default_signer_path: &str,
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
let destination_account_pubkey =
pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap();
let lamports = lamports_of_sol(matches, "amount").unwrap();
let (nonce_authority, nonce_authority_pubkey) =
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
let payer_provided = None;
let signer_info = generate_unique_signers(
vec![payer_provided, nonce_authority],
matches,
default_signer_path,
wallet_manager,
)?;
Ok(CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account,
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
destination_account_pubkey,
lamports,
},
signers: signer_info.signers,
})
}
/// Check if a nonce account is initialized with the given authority and hash
pub fn check_nonce_account(
nonce_account: &Account,
nonce_authority: &Pubkey,
nonce_hash: &Hash,
) -> Result<(), CliError> {
match state_from_account(nonce_account)? {
State::Initialized(ref data) => {
if &data.blockhash != nonce_hash {
Err(CliNonceError::InvalidHash.into())
} else if nonce_authority != &data.authority {
Err(CliNonceError::InvalidAuthority.into())
} else {
Ok(())
}
}
State::Uninitialized => Err(CliNonceError::InvalidStateForOperation.into()),
}
}
pub fn process_authorize_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: SignerIndex,
new_authority: &Pubkey,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let nonce_authority = config.signers[nonce_authority];
let ix = authorize_nonce_account(nonce_account, &nonce_authority.pubkey(), new_authority);
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result, &config)
}
pub fn process_create_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: SignerIndex,
seed: Option<String>,
nonce_authority: Option<Pubkey>,
lamports: u64,
) -> ProcessResult {
let nonce_account_pubkey = config.signers[nonce_account].pubkey();
let nonce_account_address = if let Some(ref seed) = seed {
Pubkey::create_with_seed(&nonce_account_pubkey, seed, &system_program::id())?
} else {
nonce_account_pubkey
};
check_unique_pubkeys(
(&config.signers[0].pubkey(), "cli keypair".to_string()),
(&nonce_account_address, "nonce_account".to_string()),
)?;
if let Ok(nonce_account) = get_account(rpc_client, &nonce_account_address) {
let err_msg = if state_from_account(&nonce_account).is_ok() {
format!("Nonce account {} already exists", nonce_account_address)
} else {
format!(
"Account {} already exists and is not a nonce account",
nonce_account_address
)
};
return Err(CliError::BadParameter(err_msg).into());
}
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(State::size())?;
if lamports < minimum_balance {
return Err(CliError::BadParameter(format!(
"need at least {} lamports for nonce account to be rent exempt, provided lamports: {}",
minimum_balance, lamports
))
.into());
}
let nonce_authority = nonce_authority.unwrap_or_else(|| config.signers[0].pubkey());
let ixs = if let Some(seed) = seed {
create_nonce_account_with_seed(
&config.signers[0].pubkey(), // from
&nonce_account_address, // to
&nonce_account_pubkey, // base
&seed, // seed
&nonce_authority,
lamports,
)
} else {
create_nonce_account(
&config.signers[0].pubkey(),
&nonce_account_pubkey,
&nonce_authority,
lamports,
)
};
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new_with_payer(&ixs, Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_get_nonce(rpc_client: &RpcClient, nonce_account_pubkey: &Pubkey) -> ProcessResult {
match get_account(rpc_client, nonce_account_pubkey).and_then(|ref a| state_from_account(a))? {
State::Uninitialized => Ok("Nonce account is uninitialized".to_string()),
State::Initialized(ref data) => Ok(format!("{:?}", data.blockhash)),
}
}
pub fn process_new_nonce(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: SignerIndex,
) -> ProcessResult {
check_unique_pubkeys(
(&config.signers[0].pubkey(), "cli keypair".to_string()),
(&nonce_account, "nonce_account_pubkey".to_string()),
)?;
if rpc_client.get_account(&nonce_account).is_err() {
return Err(CliError::BadParameter(
"Unable to create new nonce, no nonce account found".to_string(),
)
.into());
}
let nonce_authority = config.signers[nonce_authority];
let ix = advance_nonce_account(&nonce_account, &nonce_authority.pubkey());
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client
.send_and_confirm_transaction_with_spinner(&mut tx, &[config.signers[0], nonce_authority]);
log_instruction_custom_error::<SystemError>(result, &config)
}
pub fn process_show_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account_pubkey: &Pubkey,
use_lamports_unit: bool,
) -> ProcessResult {
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
let print_account = |data: Option<&nonce::state::Data>| {
let mut nonce_account = CliNonceAccount {
balance: nonce_account.lamports,
minimum_balance_for_rent_exemption: rpc_client
.get_minimum_balance_for_rent_exemption(State::size())?,
use_lamports_unit,
..CliNonceAccount::default()
};
if let Some(ref data) = data {
nonce_account.nonce = Some(data.blockhash.to_string());
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
nonce_account.authority = Some(data.authority.to_string());
}
Ok(config.output_format.formatted_string(&nonce_account))
};
match state_from_account(&nonce_account)? {
State::Uninitialized => print_account(None),
State::Initialized(ref data) => print_account(Some(data)),
}
}
pub fn process_withdraw_from_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: SignerIndex,
destination_account_pubkey: &Pubkey,
lamports: u64,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let nonce_authority = config.signers[nonce_authority];
let ix = withdraw_nonce_account(
nonce_account,
&nonce_authority.pubkey(),
destination_account_pubkey,
lamports,
);
let message = Message::new_with_payer(&[ix], Some(&config.signers[0].pubkey()));
let mut tx = Transaction::new_unsigned(message);
tx.try_sign(&config.signers, recent_blockhash)?;
check_account_for_fee(
rpc_client,
&config.signers[0].pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
log_instruction_custom_error::<NonceError>(result, &config)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cli::{app, parse_command};
use solana_sdk::{
account::Account,
fee_calculator::FeeCalculator,
hash::hash,
nonce::{self, State},
signature::{read_keypair_file, write_keypair, Keypair, Signer},
system_program,
};
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
#[test]
fn test_parse_command() {
let test_commands = app("test", "desc", "version");
let default_keypair = Keypair::new();
let (default_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
let (keypair_file, mut tmp_file) = make_tmp_file();
let nonce_account_keypair = Keypair::new();
write_keypair(&nonce_account_keypair, tmp_file.as_file_mut()).unwrap();
let nonce_account_pubkey = nonce_account_keypair.pubkey();
let nonce_account_string = nonce_account_pubkey.to_string();
let (authority_keypair_file, mut tmp_file2) = make_tmp_file();
let nonce_authority_keypair = Keypair::new();
write_keypair(&nonce_authority_keypair, tmp_file2.as_file_mut()).unwrap();
// Test AuthorizeNonceAccount Subcommand
let test_authorize_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"authorize-nonce-account",
&keypair_file,
&Pubkey::default().to_string(),
]);
assert_eq!(
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: nonce_account_pubkey,
nonce_authority: 0,
new_authority: Pubkey::default(),
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test AuthorizeNonceAccount Subcommand with authority
let test_authorize_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"authorize-nonce-account",
&keypair_file,
&Pubkey::default().to_string(),
"--nonce-authority",
&authority_keypair_file,
]);
assert_eq!(
parse_command(
&test_authorize_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: 1,
new_authority: Pubkey::default(),
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&authority_keypair_file).unwrap().into()
],
}
);
// Test CreateNonceAccount SubCommand
let test_create_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"create-nonce-account",
&keypair_file,
"50",
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
seed: None,
nonce_authority: None,
lamports: 50_000_000_000,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&keypair_file).unwrap().into()
],
}
);
// Test CreateNonceAccount SubCommand with authority
let test_create_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"create-nonce-account",
&keypair_file,
"50",
"--nonce-authority",
&authority_keypair_file,
]);
assert_eq!(
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::CreateNonceAccount {
nonce_account: 1,
seed: None,
nonce_authority: Some(nonce_authority_keypair.pubkey()),
lamports: 50_000_000_000,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&keypair_file).unwrap().into()
],
}
);
// Test GetNonce Subcommand
let test_get_nonce = test_commands.clone().get_matches_from(vec![
"test",
"get-nonce",
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_get_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::GetNonce(nonce_account_keypair.pubkey()),
signers: vec![],
}
);
// Test NewNonce SubCommand
let test_new_nonce =
test_commands
.clone()
.get_matches_from(vec!["test", "new-nonce", &keypair_file]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
nonce_authority: 0,
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test NewNonce SubCommand with authority
let test_new_nonce = test_commands.clone().get_matches_from(vec![
"test",
"new-nonce",
&keypair_file,
"--nonce-authority",
&authority_keypair_file,
]);
let nonce_account = read_keypair_file(&keypair_file).unwrap();
assert_eq!(
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::NewNonce {
nonce_account: nonce_account.pubkey(),
nonce_authority: 1,
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&authority_keypair_file).unwrap().into()
],
}
);
// Test ShowNonceAccount Subcommand
let test_show_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"nonce-account",
&nonce_account_string,
]);
assert_eq!(
parse_command(&test_show_nonce_account, &default_keypair_file, &mut None).unwrap(),
CliCommandInfo {
command: CliCommand::ShowNonceAccount {
nonce_account_pubkey: nonce_account_keypair.pubkey(),
use_lamports_unit: false,
},
signers: vec![],
}
);
// Test WithdrawFromNonceAccount Subcommand
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-from-nonce-account",
&keypair_file,
&nonce_account_string,
"42",
]);
assert_eq!(
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: 0,
destination_account_pubkey: nonce_account_pubkey,
lamports: 42_000_000_000
},
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
}
);
// Test WithdrawFromNonceAccount Subcommand with authority
let test_withdraw_from_nonce_account = test_commands.clone().get_matches_from(vec![
"test",
"withdraw-from-nonce-account",
&keypair_file,
&nonce_account_string,
"42",
"--nonce-authority",
&authority_keypair_file,
]);
assert_eq!(
parse_command(
&test_withdraw_from_nonce_account,
&default_keypair_file,
&mut None
)
.unwrap(),
CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
nonce_authority: 1,
destination_account_pubkey: nonce_account_pubkey,
lamports: 42_000_000_000
},
signers: vec![
read_keypair_file(&default_keypair_file).unwrap().into(),
read_keypair_file(&authority_keypair_file).unwrap().into()
],
}
);
}
#[test]
fn test_check_nonce_account() {
let blockhash = Hash::default();
let nonce_pubkey = Pubkey::new_rand();
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: nonce_pubkey,
blockhash,
fee_calculator: FeeCalculator::default(),
}));
let valid = Account::new_data(1, &data, &system_program::ID);
assert!(check_nonce_account(&valid.unwrap(), &nonce_pubkey, &blockhash).is_ok());
let invalid_owner = Account::new_data(1, &data, &Pubkey::new(&[1u8; 32]));
assert_eq!(
check_nonce_account(&invalid_owner.unwrap(), &nonce_pubkey, &blockhash),
Err(CliNonceError::InvalidAccountOwner.into()),
);
let invalid_data = Account::new_data(1, &"invalid", &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_data.unwrap(), &nonce_pubkey, &blockhash),
Err(CliNonceError::InvalidAccountData.into()),
);
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: nonce_pubkey,
blockhash: hash(b"invalid"),
fee_calculator: FeeCalculator::default(),
}));
let invalid_hash = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_hash.unwrap(), &nonce_pubkey, &blockhash),
Err(CliNonceError::InvalidHash.into()),
);
let data = Versions::new_current(State::Initialized(nonce::state::Data {
authority: Pubkey::new_rand(),
blockhash,
fee_calculator: FeeCalculator::default(),
}));
let invalid_authority = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_authority.unwrap(), &nonce_pubkey, &blockhash),
Err(CliNonceError::InvalidAuthority.into()),
);
let data = Versions::new_current(State::Uninitialized);
let invalid_state = Account::new_data(1, &data, &system_program::ID);
assert_eq!(
check_nonce_account(&invalid_state.unwrap(), &nonce_pubkey, &blockhash),
Err(CliNonceError::InvalidStateForOperation.into()),
);
}
#[test]
fn test_account_identity_ok() {
let nonce_account = nonce::create_account(1).into_inner();
assert_eq!(account_identity_ok(&nonce_account), Ok(()));
let system_account = Account::new(1, 0, &system_program::id());
assert_eq!(
account_identity_ok(&system_account),
Err(CliNonceError::UnexpectedDataSize),
);
let other_program = Pubkey::new(&[1u8; 32]);
let other_account_no_data = Account::new(1, 0, &other_program);
assert_eq!(
account_identity_ok(&other_account_no_data),
Err(CliNonceError::InvalidAccountOwner),
);
}
#[test]
fn test_state_from_account() {
let mut nonce_account = nonce::create_account(1).into_inner();
assert_eq!(state_from_account(&nonce_account), Ok(State::Uninitialized));
let data = nonce::state::Data {
authority: Pubkey::new(&[1u8; 32]),
blockhash: Hash::new(&[42u8; 32]),
fee_calculator: FeeCalculator::new(42),
};
nonce_account
.set_state(&Versions::new_current(State::Initialized(data.clone())))
.unwrap();
assert_eq!(
state_from_account(&nonce_account),
Ok(State::Initialized(data))
);
let wrong_data_size_account = Account::new(1, 1, &system_program::id());
assert_eq!(
state_from_account(&wrong_data_size_account),
Err(CliNonceError::InvalidAccountData),
);
}
#[test]
fn test_data_from_helpers() {
let mut nonce_account = nonce::create_account(1).into_inner();
let state = state_from_account(&nonce_account).unwrap();
assert_eq!(
data_from_state(&state),
Err(CliNonceError::InvalidStateForOperation)
);
assert_eq!(
data_from_account(&nonce_account),
Err(CliNonceError::InvalidStateForOperation)
);
let data = nonce::state::Data {
authority: Pubkey::new(&[1u8; 32]),
blockhash: Hash::new(&[42u8; 32]),
fee_calculator: FeeCalculator::new(42),
};
nonce_account
.set_state(&Versions::new_current(State::Initialized(data.clone())))
.unwrap();
let state = state_from_account(&nonce_account).unwrap();
assert_eq!(data_from_state(&state), Ok(&data));
assert_eq!(data_from_account(&nonce_account), Ok(data));
}
}
| 36.620588 | 154 | 0.587342 |
9ca57101d98454b6777385bf441936ee833ba110 | 3,748 | pub mod auth;
pub mod crockford;
pub mod db;
pub mod graphql;
pub mod image;
pub mod repo;
pub mod rest;
pub mod settings;
pub mod warp_ext;
use crate::auth::{AuthProvider, ClaimsProvider};
use crate::repo::company::MongoCompanyRepo;
use crate::repo::device::MongoDeviceRepo;
use crate::repo::gas_reading::MongoGasReadingRepo;
use crate::repo::incident::MongoIncidentRepo;
use crate::repo::incident_stats::MongoIncidentStatsRepo;
use crate::repo::location_reading::MongoLocationReadingRepo;
use crate::repo::person::MongoPersonRepo;
use crate::repo::team::MongoTeamRepo;
use crate::repo::user_account::MongoUserAccountRepo;
use crate::settings::Settings;
use mongodb::Database;
use std::env;
use std::net::Ipv4Addr;
use warp::cors::Cors;
use warp::filters::BoxedFilter;
use warp::{Filter, Reply};
const DEFAULT_PORT: u16 = 3001;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let settings = Settings::read();
let db = db::connect_and_prepare(&settings.db_uri).await?;
let graphql_deps = graphql_deps(db.clone(), &settings.private_key);
let rest_context = rest_context(db.clone());
let route = filter(graphql_deps, rest_context).with(log()).with(cors());
let port = get_port();
warp::serve(route).run((Ipv4Addr::UNSPECIFIED, port)).await;
Ok(())
}
fn graphql_deps(db: Database, private_key: &str) -> graphql::Deps {
graphql::Deps {
company_repo: MongoCompanyRepo::new(db.clone()).into(),
device_repo: MongoDeviceRepo::new(db.clone()).into(),
gas_reading_repo: MongoGasReadingRepo::new(db.clone()).into(),
incident_repo: MongoIncidentRepo::new(db.clone()).into(),
incident_stats_repo: MongoIncidentStatsRepo::new(db.clone()).into(),
location_reading_repo: MongoLocationReadingRepo::new(db.clone()).into(),
person_repo: MongoPersonRepo::new(db.clone()).into(),
team_repo: MongoTeamRepo::new(db.clone()).into(),
user_account_repo: MongoUserAccountRepo::new(db.clone()).into(),
auth_provider: AuthProvider {
user_account_repo: MongoUserAccountRepo::new(db).into(),
},
claims_provider: ClaimsProvider {
private_key: private_key.to_string(),
},
}
}
fn rest_context(db: Database) -> rest::Context {
rest::Context {
user_account_repo: MongoUserAccountRepo::new(db.clone()).into(),
db,
}
}
fn filter(graphql_deps: graphql::Deps, rest_context: rest::Context) -> BoxedFilter<(impl Reply,)> {
graphql::graphql_filter(graphql_deps)
.or(graphql::playground_filter())
.or(graphql_doc())
.or(rest::v1(rest_context))
.or(robots())
.boxed()
}
fn graphql_doc() -> BoxedFilter<(impl Reply,)> {
warp::path("doc").and(warp::fs::dir("doc/public")).boxed()
}
fn robots() -> BoxedFilter<(impl Reply,)> {
warp::path("robots.txt")
.map(|| "User-agent: *\nDisallow: /")
.boxed()
}
fn cors() -> Cors {
warp::cors()
.allow_any_origin()
.allow_headers(vec![
"Origin",
"Content-Type",
"Referer",
"Access-Control-Request-Method",
"Access-Control-Request-Headers",
"User-Agent",
"Sec-Fetch-Mode",
"Authorization",
])
.allow_methods(vec!["POST", "GET"])
.build()
}
fn log() -> warp::log::Log<impl Fn(warp::log::Info) + Copy> {
warp::log("api")
}
pub fn get_port() -> u16 {
// When running as an Azure Function use the supplied port, otherwise use the default.
match env::var("FUNCTIONS_CUSTOMHANDLER_PORT") {
Ok(port) => port.parse().expect("Custom Handler port is not a number"),
Err(_) => DEFAULT_PORT,
}
}
| 31.762712 | 99 | 0.643543 |
01a2b1782564aa75091989d258ef0de0bea0b02e | 4,461 | // Copyright 2022 Stefan Zobel
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Scalar multiplication
use crate::matrix::*;
use std::ops::Mul;
// Unfortunately, we have to implement scalar multiplication where the
// RHS is a scalar from std manually for each scalar type we want to
// support. See
// https://rust-lang.github.io/rfcs/2451-re-rebalancing-coherence.html
// and especially:
// "impl<T> ForeignTrait<LocalTypeCrateA> for T" is not allowed,
// because it might conflict with another crate writing
// "impl<T> ForeignTrait<T> for LocalTypeCrateB" which is always
// allowed. See also https://www.jstuber.net/2019/04/17/scalar-multiplication-in-rust/
#[inline]
fn scalar_mul_i8<const ROWS: usize, const COLS: usize>(
scalar: i8,
a: &[[i8; COLS]; ROWS],
b: &mut [[i8; COLS]; ROWS],
) {
for row in 0..ROWS {
for col in 0..COLS {
b[row][col] = scalar * a[row][col];
}
}
}
impl<const ROWS: usize, const COLS: usize> Mul<SMatrix<i8, ROWS, COLS>> for i8 {
type Output = SMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: SMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_stack();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
impl<const ROWS: usize, const COLS: usize> Mul<HMatrix<i8, ROWS, COLS>> for i8 {
type Output = HMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: HMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_heap();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
impl<const ROWS: usize, const COLS: usize> Mul<&SMatrix<i8, ROWS, COLS>> for i8 {
type Output = SMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: &SMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_stack();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
impl<const ROWS: usize, const COLS: usize> Mul<&HMatrix<i8, ROWS, COLS>> for i8 {
type Output = HMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: &HMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_heap();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
impl<const ROWS: usize, const COLS: usize> Mul<&mut SMatrix<i8, ROWS, COLS>> for i8 {
type Output = SMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: &mut SMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_stack();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
impl<const ROWS: usize, const COLS: usize> Mul<&mut HMatrix<i8, ROWS, COLS>> for i8 {
type Output = HMatrix<i8, ROWS, COLS>;
#[inline]
fn mul(self, rhs: &mut HMatrix<i8, ROWS, COLS>) -> Self::Output {
let mut b = MF::<i8, ROWS, COLS>::new_heap();
scalar_mul_i8(self, rhs.array(), b.array_mut());
b
}
}
#[cfg(test)]
mod scalar_mul_tests {
use super::*;
#[test]
fn test_1() {
let mut a = MF::<i8, 2, 2>::new_stack();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = 2i8 * a;
println!("{:?}", b);
let mut a = MF::<i8, 2, 2>::new_heap();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = 2i8 * a;
println!("{:?}", b);
}
#[test]
fn test_2() {
let mut a = &mut MF::<i8, 2, 2>::new_stack();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = 2i8 * a;
println!("{:?}", b);
let mut a = &mut MF::<i8, 2, 2>::new_heap();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = 2i8 * a;
println!("{:?}", b);
}
#[test]
fn test_3() {
let mut a = MF::<i8, 2, 2>::new_stack();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = &a;
let c = 2i8 * b;
println!("{:?}", c);
let mut a = MF::<i8, 2, 2>::new_heap();
a[0][0] = 2i8;
a[1][1] = 4i8;
let b = &a;
let c = 2i8 * b;
println!("{:?}", c);
}
}
| 30.979167 | 87 | 0.529926 |
d9546e02eea914bd165bed59b45f55170dcdd039 | 2,018 | //!
//! The if-conditional statement.
//!
use crate::yul::lexer::lexeme::Lexeme;
use crate::yul::lexer::Lexer;
use crate::yul::parser::statement::block::Block;
use crate::yul::parser::statement::expression::Expression;
///
/// The if-conditional statement.
///
#[derive(Debug, PartialEq, Clone)]
pub struct IfConditional {
/// The condition expression.
pub condition: Expression,
/// The conditional block.
pub block: Block,
}
impl IfConditional {
///
/// The element parser.
///
pub fn parse(lexer: &mut Lexer, initial: Option<Lexeme>) -> anyhow::Result<Self> {
let lexeme = crate::yul::parser::take_or_next(initial, lexer)?;
let condition = Expression::parse(lexer, Some(lexeme))?;
let block = Block::parse(lexer, None)?;
Ok(Self { condition, block })
}
}
impl<D> compiler_llvm_context::WriteLLVM<D> for IfConditional
where
D: compiler_llvm_context::Dependency,
{
fn into_llvm(self, context: &mut compiler_llvm_context::Context<D>) -> anyhow::Result<()> {
let condition = self
.condition
.into_llvm(context)?
.expect("Always exists")
.to_llvm()
.into_int_value();
let condition = context.builder().build_int_z_extend_or_bit_cast(
condition,
context.field_type(),
"if_condition_extended",
);
let condition = context.builder().build_int_compare(
inkwell::IntPredicate::NE,
condition,
context.field_const(0),
"if_condition_compared",
);
let main_block = context.append_basic_block("if_main");
let join_block = context.append_basic_block("if_join");
context.build_conditional_branch(condition, main_block, join_block);
context.set_basic_block(main_block);
self.block.into_llvm(context)?;
context.build_unconditional_branch(join_block);
context.set_basic_block(join_block);
Ok(())
}
}
| 29.246377 | 95 | 0.627354 |
dde20b2d8c0c75561d28403c472a5d4e126cd965 | 9,621 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::{
app::{strategies::base::AppStrategy, InternalSender, MessageInternal},
input::UserInputMessage,
view::{
strategies::{
base::{FlatlandParams, ScenicParams, ViewStrategyParams, ViewStrategyPtr},
flatland::FlatlandViewStrategy,
scenic::ScenicViewStrategy,
},
ViewKey,
},
};
use anyhow::{bail, Context as _, Error};
use async_trait::async_trait;
use euclid::size2;
use fidl::endpoints::{create_endpoints, create_proxy};
use fidl_fuchsia_ui_app::{ViewProviderRequest, ViewProviderRequestStream};
use fidl_fuchsia_ui_scenic::{ScenicProxy, SessionListenerRequest};
use fidl_fuchsia_ui_views::{ViewRef, ViewRefControl, ViewToken};
use fuchsia_async::{self as fasync};
use fuchsia_component::server::{ServiceFs, ServiceObjLocal};
use fuchsia_scenic::{Session, SessionPtr, ViewRefPair, ViewTokenPair};
use fuchsia_zircon::EventPair;
use futures::{channel::mpsc::UnboundedSender, TryFutureExt, TryStreamExt};
pub(crate) struct ScenicAppStrategy {
pub(crate) scenic: ScenicProxy,
}
impl ScenicAppStrategy {
fn setup_session(
&self,
view_key: ViewKey,
app_sender: &UnboundedSender<MessageInternal>,
) -> Result<SessionPtr, Error> {
let (session_listener, session_listener_request) = create_endpoints()?;
let (session_proxy, session_request) = create_proxy()?;
self.scenic.create_session(session_request, Some(session_listener))?;
let sender = app_sender.clone();
fasync::Task::local(
session_listener_request
.into_stream()?
.map_ok(move |request| match request {
SessionListenerRequest::OnScenicEvent { events, .. } => {
for event in events {
match event {
fidl_fuchsia_ui_scenic::Event::Gfx(gfx_event) => match gfx_event {
fidl_fuchsia_ui_gfx::Event::Metrics(metrics_event) => {
sender
.unbounded_send(MessageInternal::MetricsChanged(
view_key,
size2(
metrics_event.metrics.scale_x,
metrics_event.metrics.scale_y,
),
))
.expect("MessageInternal::MetricsChanged");
}
fidl_fuchsia_ui_gfx::Event::ViewPropertiesChanged(
view_properties_event,
) => {
let bounding_box =
&view_properties_event.properties.bounding_box;
let new_size = size2(
bounding_box.max.x - bounding_box.min.x,
bounding_box.max.y - bounding_box.min.y,
);
sender
.unbounded_send(MessageInternal::SizeChanged(
view_key, new_size,
))
.expect("MessageInternal::SizeChanged");
}
_ => (),
},
fidl_fuchsia_ui_scenic::Event::Input(input_event) => {
sender
.unbounded_send(MessageInternal::UserInputMessage(
view_key,
UserInputMessage::ScenicInputEvent(input_event),
))
.expect("MessageInternal::ScenicInputEvent");
}
_ => (),
}
}
}
_ => (),
})
.try_collect::<()>()
.unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)),
)
.detach();
Ok(Session::new(session_proxy))
}
fn create_scenic_view(
sender: &UnboundedSender<MessageInternal>,
token: EventPair,
control_ref: ViewRefControl,
view_ref: ViewRef,
) {
let view_token = ViewToken { value: token };
sender
.unbounded_send(MessageInternal::CreateView(ViewStrategyParams::Scenic(ScenicParams {
view_token,
control_ref,
view_ref,
})))
.expect("send");
}
}
#[async_trait(?Send)]
impl AppStrategy for ScenicAppStrategy {
async fn create_view_strategy(
&self,
key: ViewKey,
app_sender: UnboundedSender<MessageInternal>,
strategy_params: ViewStrategyParams,
) -> Result<ViewStrategyPtr, Error> {
let session = self.setup_session(key, &app_sender)?;
match strategy_params {
ViewStrategyParams::Scenic(strategy_params) => Ok(ScenicViewStrategy::new(
key,
&session,
strategy_params.view_token,
strategy_params.control_ref,
strategy_params.view_ref,
app_sender.clone(),
)
.await?),
ViewStrategyParams::Flatland(flatland_params) => {
Ok(FlatlandViewStrategy::new(key, flatland_params, app_sender.clone()).await?)
}
_ => bail!("Incorrect ViewStrategyParams passed to create_view_strategy for scenic"),
}
}
fn create_view_for_testing(
&self,
app_sender: &UnboundedSender<MessageInternal>,
) -> Result<(), Error> {
let token = ViewTokenPair::new().context("ViewTokenPair::new")?;
let ViewRefPair { control_ref, view_ref } =
ViewRefPair::new().context("ViewRefPair::new")?;
app_sender
.unbounded_send(MessageInternal::CreateView(ViewStrategyParams::Scenic(ScenicParams {
view_token: token.view_token,
control_ref,
view_ref,
})))
.expect("send");
Ok(())
}
fn supports_scenic(&self) -> bool {
return true;
}
fn start_services<'a, 'b>(
&self,
app_sender: UnboundedSender<MessageInternal>,
fs: &'a mut ServiceFs<ServiceObjLocal<'b, ()>>,
) -> Result<(), Error> {
let mut public = fs.dir("svc");
let sender = app_sender.clone();
let f = move |stream: ViewProviderRequestStream| {
let sender = sender.clone();
fasync::Task::local(
stream
.try_for_each(move |req| {
match req {
ViewProviderRequest::CreateView { token, .. } => {
// We do not get passed a view ref so create our own
let ViewRefPair { control_ref, view_ref } =
ViewRefPair::new().expect("unable to create view ref pair");
Self::create_scenic_view(&sender, token, control_ref, view_ref);
}
ViewProviderRequest::CreateViewWithViewRef {
token,
view_ref_control,
view_ref,
..
} => {
Self::create_scenic_view(
&sender,
token,
view_ref_control,
view_ref,
);
}
ViewProviderRequest::CreateView2 { args, .. } => {
sender
.unbounded_send(MessageInternal::CreateView(
ViewStrategyParams::Flatland(FlatlandParams {
args,
debug_name: Some("Carnelian View".to_string()),
}),
))
.expect("unbounded_send");
}
};
futures::future::ready(Ok(()))
})
.unwrap_or_else(|e| eprintln!("error running ViewProvider server: {:?}", e)),
)
.detach()
};
public.add_fidl_service(f);
Ok(())
}
fn get_scenic_proxy(&self) -> Option<&ScenicProxy> {
return Some(&self.scenic);
}
async fn post_setup(&mut self, _internal_sender: &InternalSender) -> Result<(), Error> {
Ok(())
}
}
| 41.649351 | 98 | 0.45619 |
7ac0b807e8c66cc8a9841610f0661e2c5f78b497 | 3,585 | //! Contains macros which together define a benchmark harness that can be used
//! in place of the standard benchmark harness. This allows the user to run
//! Criterion.rs benchmarks with `cargo bench`.
/// Macro used to define a benchmark group for the benchmark harness; see the
/// criterion_main! macro for more details.
///
/// This is used to define a benchmark group; a collection of related benchmarks
/// which share a common configuration. Accepts two forms which can be seen
/// below.
///
/// # Examples:
///
/// Complete form:
///
/// ```
/// # #[macro_use]
/// # extern crate criterion;
/// # use criterion::Criterion;
/// # fn bench_method1(c: &mut Criterion) {
/// # }
/// #
/// # fn bench_method2(c: &mut Criterion) {
/// # }
/// #
/// criterion_group!{
/// name = benches;
/// config = Criterion::default();
/// targets = bench_method1, bench_method2
/// }
/// #
/// # fn main() {}
/// ```
///
/// In this form, all of the options are clearly spelled out. This expands to
/// a function named benches, which uses the given config expression to create
/// an instance of the Criterion struct. This is then passed by mutable
/// reference to the targets.
///
/// Compact Form:
///
/// ```
/// # #[macro_use]
/// # extern crate criterion;
/// # use criterion::Criterion;
/// # fn bench_method1(c: &mut Criterion) {
/// # }
/// #
/// # fn bench_method2(c: &mut Criterion) {
/// # }
/// #
/// criterion_group!(benches, bench_method1, bench_method2);
/// #
/// # fn main() {}
/// ```
/// In this form, the first parameter is the name of the group and subsequent
/// parameters are the target methods. The Criterion struct will be created using
/// the `Criterion::default()` function. If you wish to customize the
/// configuration, use the complete form and provide your own configuration
/// function.
#[macro_export]
macro_rules! criterion_group {
(name = $name:ident; config = $config:expr; targets = $( $target:path ),+ $(,)*) => {
pub fn $name() {
let mut criterion: $crate::Criterion = $config
.configure_from_args();
$(
$target(&mut criterion);
)+
}
};
($name:ident, $( $target:path ),+ $(,)*) => {
criterion_group!{
name = $name;
config = $crate::Criterion::default();
targets = $( $target ),+
}
}
}
/// Macro which expands to a benchmark harness.
///
/// Currently, using Criterion.rs requires disabling the benchmark harness
/// generated automatically by rustc. This can be done like so:
///
/// ```toml
/// [[bench]]
/// name = "my_bench"
/// harness = false
/// ```
///
/// In this case, `my_bench` must be a rust file inside the 'benches' directory,
/// like so:
///
/// `benches/my_bench.rs`
///
/// Since we've disabled the default benchmark harness, we need to add our own:
///
/// ```ignore
/// #[macro_use]
/// extern crate criterion;
/// use criterion::Criterion;
/// fn bench_method1(c: &mut Criterion) {
/// }
///
/// fn bench_method2(c: &mut Criterion) {
/// }
///
/// criterion_group!(benches, bench_method1, bench_method2);
/// criterion_main!(benches);
/// ```
///
/// The `criterion_main` macro expands to a `main` function which runs all of the
/// benchmarks in the given groups.
///
#[macro_export]
macro_rules! criterion_main {
( $( $group:path ),+ $(,)* ) => {
fn main() {
$(
$group();
)+
$crate::Criterion::default()
.configure_from_args()
.final_summary();
}
}
}
| 27.576923 | 89 | 0.597768 |
7a3237162d8725ddf6fdf64041b0da57c34d4846 | 6,257 | // WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!!
#[cfg(any(feature = "all", feature = "android-view-animation-AlphaAnimation"))]
__jni_bindgen! {
/// public class [AlphaAnimation](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html)
///
/// Required feature: android-view-animation-AlphaAnimation
public class AlphaAnimation ("android/view/animation/AlphaAnimation") extends crate::android::view::animation::Animation {
/// [AlphaAnimation](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html#AlphaAnimation(android.content.Context,%20android.util.AttributeSet))
///
/// Required features: "android-content-Context", "android-util-AttributeSet"
#[cfg(any(feature = "all", all(feature = "android-content-Context", feature = "android-util-AttributeSet")))]
pub fn new_Context_AttributeSet<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::content::Context>>, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::util::AttributeSet>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::view::animation::AlphaAnimation>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/view/animation/AlphaAnimation", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Landroid/content/Context;Landroid/util/AttributeSet;)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into()), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())];
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/animation/AlphaAnimation\0", "<init>\0", "(Landroid/content/Context;Landroid/util/AttributeSet;)V\0");
__jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr())
}
}
/// [AlphaAnimation](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html#AlphaAnimation(float,%20float))
pub fn new_float_float<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: f32, arg1: f32) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::view::animation::AlphaAnimation>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/view/animation/AlphaAnimation", java.flags == PUBLIC, .name == "<init>", .descriptor == "(FF)V"
unsafe {
let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0), __jni_bindgen::AsJValue::as_jvalue(&arg1)];
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/animation/AlphaAnimation\0", "<init>\0", "(FF)V\0");
__jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr())
}
}
// // Not emitting: Non-public method
// /// [applyTransformation](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html#applyTransformation(float,%20android.view.animation.Transformation))
// ///
// /// Required features: "android-view-animation-Transformation"
// #[cfg(any(feature = "all", all(feature = "android-view-animation-Transformation")))]
// fn applyTransformation<'env>(&'env self, arg0: f32, arg1: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::view::animation::Transformation>>) -> __jni_bindgen::std::result::Result<(), __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// // class.path == "android/view/animation/AlphaAnimation", java.flags == PROTECTED, .name == "applyTransformation", .descriptor == "(FLandroid/view/animation/Transformation;)V"
// unsafe {
// let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0), __jni_bindgen::AsJValue::as_jvalue(&arg1.into())];
// let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
// let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/animation/AlphaAnimation\0", "applyTransformation\0", "(FLandroid/view/animation/Transformation;)V\0");
// __jni_env.call_void_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
// }
// }
/// [willChangeTransformationMatrix](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html#willChangeTransformationMatrix())
pub fn willChangeTransformationMatrix<'env>(&'env self) -> __jni_bindgen::std::result::Result<bool, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/view/animation/AlphaAnimation", java.flags == PUBLIC, .name == "willChangeTransformationMatrix", .descriptor == "()Z"
unsafe {
let __jni_args = [];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/animation/AlphaAnimation\0", "willChangeTransformationMatrix\0", "()Z\0");
__jni_env.call_boolean_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
/// [willChangeBounds](https://developer.android.com/reference/android/view/animation/AlphaAnimation.html#willChangeBounds())
pub fn willChangeBounds<'env>(&'env self) -> __jni_bindgen::std::result::Result<bool, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> {
// class.path == "android/view/animation/AlphaAnimation", java.flags == PUBLIC, .name == "willChangeBounds", .descriptor == "()Z"
unsafe {
let __jni_args = [];
let __jni_env = __jni_bindgen::Env::from_ptr(self.0.env);
let (__jni_class, __jni_method) = __jni_env.require_class_method("android/view/animation/AlphaAnimation\0", "willChangeBounds\0", "()Z\0");
__jni_env.call_boolean_method_a(self.0.object, __jni_method, __jni_args.as_ptr())
}
}
}
}
| 86.902778 | 503 | 0.675563 |
4bd63e620e9505c72d2cee1a21dff489121a7c43 | 643 | use std::io;
#[derive(Debug, Fail)]
pub enum BenchError {
#[fail(display = "Internal error: {}", _0)]
InternalError(&'static str),
#[fail(display = "ABI error: {}", _0)]
ABIError(&'static str),
#[fail(display = "Parse error: {}", _0)]
ParseError(String),
#[fail(display = "{}", _0)]
Io(#[cause] io::Error),
#[fail(display = "Unsupported")]
Unsupported,
}
impl From<io::Error> for BenchError {
fn from(e: io::Error) -> BenchError {
BenchError::Io(e)
}
}
impl From<&'static str> for BenchError {
fn from(e: &'static str) -> BenchError {
BenchError::InternalError(e)
}
}
| 22.964286 | 47 | 0.575428 |
d63b5af4683d84d3d2165b441af42871f50d950e | 226 | /// Possible errors that can be encountered when working with a keypair.
#[derive(Clone, Debug)]
pub enum KeysError {
InvalidPrivateKeyBytes,
SignatureFailed(String),
InvalidSignatureBytes,
InvalidSignature,
}
| 25.111111 | 72 | 0.747788 |
79aeb064fa8c492b4231b069310b8ad69c253662 | 7,122 | use hyper::header::HeaderValue;
use hyper::{header, Body, Response, StatusCode};
use ini::Ini;
use log::*;
use serde_json::{json, Map, Value};
use std::collections::HashMap;
use std::fmt::Debug;
use std::path::Path;
use std::process;
/*
* Constants and static variables
*/
pub const STUB_VTPM: bool = false;
pub const STUB_IMA: bool = true;
pub const TPM_DATA_PCR: usize = 16;
pub const IMA_PCR: usize = 10;
pub static RSA_PUBLICKEY_EXPORTABLE: &'static str = "rsa placeholder";
pub static TPM_TOOLS_PATH: &'static str = "/usr/local/bin/";
pub static IMA_ML_STUB: &'static str =
"../scripts/ima/ascii_runtime_measurements";
pub static IMA_ML: &'static str =
"/sys/kernel/security/ima/ascii_runtime_measurements";
pub static KEY: &'static str = "secret";
pub static WORK_DIR: &'static str = "/tmp";
// Secure mount of tpmfs (False is generally used for development environments)
pub static MOUNT_SECURE: bool = true;
/*
* Input: config file location (e.g. /etc/keylime.conf), [section] and key
* Return: Returns the matched key
*
* Example call:
* let port = common::config_get("/etc/keylime.conf""general","cloudagent_port");
*/
pub fn config_get(conf_name: &str, section: &str, key: &str) -> String {
let conf = match Ini::load_from_file(conf_name) {
Ok(conf) => conf,
Err(_error) => {
error!("Error: unable to read config file: {} ", conf_name);
process::exit(1);
}
};
let section = match conf.section(Some(section.to_owned())) {
Some(section) => section,
None => {
error!(
"Cannot find section called {} within file {}",
section, conf_name
);
process::exit(1)
}
};
let value = match section.get(key) {
Some(value) => value,
None => {
error!("Cannot find key value {} within file {}", key, conf_name);
process::exit(1)
}
};
return value.clone();
}
/*
* Input: Response status code
* Response result status
* Json output content
*
* Return: HTTP Respnose struct
*
* convert the input into HTTP Response struct with json output formatting.
* Follow original python-keylime echo_json_response() output structure. But
* there are two difference between this response json content and the
* original echo_json_response() response json content.
* 1. The serde_json crate sorts keys in alphebetic order, which is
* different than the python version response structure.
* 2. There is no space in the json content, but python version response
* content contains white space in between keys.
*/
pub fn set_response_content(
code: i32,
status: &str,
results: Map<String, Value>,
response: &mut Response<Body>,
) -> Result<(), Box<i32>> {
let integerated_result = json!({
"status": status,
"code": code,
"results": results,
});
match serde_json::to_string(&integerated_result) {
Ok(s) => {
// Dereferencing apply here because it needs to derefer the variable
// so it can assign the new value to it. But changing the headers
// doesn't require dereference is because that it uses the returned
// header reference and update it instead of changing it, so no
// dereference is needed in this case.
*response.body_mut() = s.into();
response.headers_mut().insert(
header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
Ok(())
}
Err(e) => {
error!("Failed to convert Json to string for Response body, error {}.", e);
Err(Box::new(-1))
}
}
}
/*
* Input: URL string
*
* Ouput: Map contains the request type and content in the request
*
* Convert a api resquest path to a map that contains the key and value, which
* are the requested function and content given in pair from the original url.
* Same implementation as the original python version get_resrful_parameters()
* function.
*/
pub fn get_restful_parameters(urlstring: &str) -> HashMap<&str, &str> {
let mut parameters = HashMap::new();
let list: Vec<&str> = urlstring.split('/').collect();
// error hanlding, empty url
if list.len() <= 0 {
return parameters;
}
// capture the version number
let (_, right) = list[1].split_at(1);
parameters.insert("api_version", right);
// starting from the second element, which is the first requested function
for x in 2..(list.len() - 1) {
parameters.insert(list[x], list[x + 1]);
}
parameters
}
/*
* Input: path directory to be changed owner to root
* Return: Result contains execution result
* - directory name for successful execution
* - -1 code for failure execution.
*
* If privilege requirement is met, change the owner of the path to root
* This function is unsafely using libc. Result is returned indicating
* execution result.
*/
pub fn chownroot(path: String) -> Result<String, i32> {
unsafe {
// check privilege
if libc::geteuid() != 0 {
error!("Privilege level unable to change ownership to root for file: {}", path);
return Err(-1);
}
// change directory owner to root
if libc::chown(path.as_bytes().as_ptr() as *const i8, 0, 0) != 0 {
error!("Failed to change file {} owner.", path);
return Err(-1);
}
info!("Changed file {} owner to root.", path);
Ok(path)
}
}
/*
* Input: error message
* Error (Option)
* Return: integrated error message string
*
* A error message helper funciton to integrate error message with error
* information. Integrate the error message and error into a single error
* message string. Error could be None. Message is return as a Err<> for
* error handling Result<>.
*/
pub fn emsg<T, E>(message: &str, error: Option<T>) -> Result<E, Box<String>>
where
T: Debug,
{
match error {
Some(e) => Err(Box::new(format!("{} Error, {:?}.", message, e))),
None => Err(Box::new(message.to_string())),
}
}
// Unit Testing
#[cfg(test)]
mod tests {
use super::*;
// Test the get_restful_parameters function with a given sampel url
#[test]
fn test_get_restful_parameters() {
let mut map = HashMap::new();
map.insert("verify", "pubkey");
map.insert("api_version", "2");
// Map content "{"api_version": "v2", "verify": "pubkey"}"
assert_eq!(
get_restful_parameters("127.0.0.1:1337/v2/verify/pubkey"),
map
);
}
#[test]
fn test_set_response_content() {
let mut my_res: Response<Body> = Response::new("nothing".into());
assert!(
set_response_content(0, "Ok", Map::new(), &mut my_res).is_ok()
);
}
#[test]
fn test_config_get_parameters_exist() {
let result = config_get("keylime.conf", "general", "cloudagent_port");
assert_eq!(result, "9002");
}
}
| 31.513274 | 92 | 0.617383 |
e58b89d62cce1e5fe7498b07c19ce949f2c17ba0 | 7,405 | use std::sync::Arc;
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::BenchmarkId;
use criterion::Criterion;
use holo_hash::EntryHash;
use holo_hash::EntryHashes;
use holochain::conductor::handle::DevSettingsDelta;
use holochain::sweettest::*;
use holochain_conductor_api::conductor::ConductorConfig;
use holochain_conductor_api::AdminInterfaceConfig;
use holochain_conductor_api::InterfaceDriver;
use holochain_test_wasm_common::AnchorInput;
use holochain_test_wasm_common::ManyAnchorInput;
use holochain_wasm_test_utils::TestWasm;
use kitsune_p2p::KitsuneP2pConfig;
use tokio::runtime::Builder;
use tokio::runtime::Runtime;
// TODO: Produce a high data version of this bench.
// TODO: Add profile function to queries that need optimizing.
// TODO: Research indexing.
criterion_group!(benches, consistency);
criterion_main!(benches);
fn consistency(bench: &mut Criterion) {
observability::test_run().ok();
let mut group = bench.benchmark_group("consistency");
group.sample_size(
std::env::var_os("BENCH_SAMPLE_SIZE")
.and_then(|s| s.to_string_lossy().parse::<usize>().ok())
.unwrap_or(100),
);
let runtime = rt();
let (mut producer, mut consumer, others) = runtime.block_on(setup());
if let Some(n) = std::env::var_os("BENCH_NUM_OPS") {
let num_ops = n.to_string_lossy().parse::<usize>().unwrap();
runtime.block_on(async {
producer.fill(num_ops).await;
let mut cells = vec![&consumer.cell, &producer.cell];
cells.extend(others.cells.iter());
let num_tries = std::env::var_os("BENCH_NUM_WAITS")
.and_then(|s| s.to_string_lossy().parse::<usize>().ok())
.unwrap_or(100);
holochain::test_utils::consistency(
&cells,
num_tries,
std::time::Duration::from_millis(500),
)
.await;
// holochain_state::prelude::dump_tmp(consumer.cell.env());
});
}
let mut cells = vec![consumer.cell.clone(), producer.cell.clone()];
cells.extend(others.cells.clone());
runtime.spawn(async move {
producer.run().await;
producer.conductor.shutdown_and_wait().await;
});
group.bench_function(BenchmarkId::new("test", format!("test")), |b| {
b.iter(|| {
runtime.block_on(async { consumer.run(&cells[..]).await });
});
});
runtime.block_on(async move {
// The line below was added when migrating to rust edition 2021, per
// https://doc.rust-lang.org/edition-guide/rust-2021/disjoint-capture-in-closures.html#migration
let _ = &others;
consumer.conductor.shutdown_and_wait().await;
drop(consumer);
for c in others.conductors {
c.shutdown_and_wait().await;
drop(c);
}
});
runtime.shutdown_background();
}
struct Producer {
conductor: SweetConductor,
cell: SweetCell,
rx: tokio::sync::mpsc::Receiver<usize>,
}
struct Consumer {
conductor: SweetConductor,
cell: SweetCell,
last: usize,
tx: tokio::sync::mpsc::Sender<usize>,
}
struct Others {
conductors: Vec<SweetConductor>,
cells: Vec<SweetCell>,
}
impl Producer {
async fn run(&mut self) {
while let Some(mut i) = self.rx.recv().await {
i += 1;
let _: EntryHash = self
.conductor
.call(
&self.cell.zome("anchor"),
"anchor",
AnchorInput("alice".to_string(), i.to_string()),
)
.await;
}
}
#[tracing::instrument(skip(self))]
async fn fill(&mut self, num_ops: usize) {
let inputs: Vec<_> = (0..num_ops)
.map(|i| AnchorInput("alice_fill".to_string(), i.to_string()))
.collect();
let _: Vec<EntryHash> = self
.conductor
.call(
&self.cell.zome("anchor"),
"anchor_many",
ManyAnchorInput(inputs),
)
.await;
// holochain_state::prelude::dump_tmp(self.cell.env());
}
}
impl Consumer {
async fn run(&mut self, cells: &[SweetCell]) {
let start = std::time::Instant::now();
let mut num = self.last;
while num <= self.last {
let hashes: EntryHashes = self
.conductor
.call(
&self.cell.zome("anchor"),
"list_anchor_addresses",
"alice".to_string(),
)
.await;
num = hashes.0.len();
if start.elapsed().as_secs() > 1 {
for cell in cells {
holochain::test_utils::consistency(
&[cell],
1,
std::time::Duration::from_millis(10),
)
.await;
}
}
// dump_tmp(self.cell.env());
// dump_tmp(prod.env());
}
self.last = num;
dbg!(start.elapsed());
self.tx.send(num).await.unwrap();
}
}
async fn setup() -> (Producer, Consumer, Others) {
let (tx, rx) = tokio::sync::mpsc::channel(1);
let (dna, _) = SweetDnaFile::unique_from_test_wasms(vec![TestWasm::Anchor])
.await
.unwrap();
let config = || {
let mut tuning =
kitsune_p2p_types::config::tuning_params_struct::KitsuneP2pTuningParams::default();
tuning.gossip_strategy = "sharded-gossip".to_string();
let mut network = KitsuneP2pConfig::default();
network.transport_pool = vec![kitsune_p2p::TransportConfig::Quic {
bind_to: None,
override_host: None,
override_port: None,
}];
network.tuning_params = Arc::new(tuning);
ConductorConfig {
network: Some(network),
admin_interfaces: Some(vec![AdminInterfaceConfig {
driver: InterfaceDriver::Websocket { port: 0 },
}]),
..Default::default()
}
};
let configs = vec![config(), config(), config(), config(), config()];
let mut conductors = SweetConductorBatch::from_configs(configs.clone()).await;
for c in conductors.iter() {
c.update_dev_settings(DevSettingsDelta {
publish: Some(false),
..Default::default()
});
}
let apps = conductors.setup_app("app", &[dna]).await.unwrap();
let mut cells = apps
.into_inner()
.into_iter()
.map(|c| c.into_cells().into_iter().next().unwrap());
let alice = cells.next().unwrap();
let bobbo = cells.next().unwrap();
conductors.exchange_peer_info().await;
let mut conductors = conductors.into_inner().into_iter();
tx.send(0).await.unwrap();
(
Producer {
conductor: conductors.next().unwrap(),
cell: alice,
rx,
},
Consumer {
conductor: conductors.next().unwrap(),
cell: bobbo,
tx,
last: 0,
},
Others {
conductors: conductors.collect(),
cells: cells.collect(),
},
)
}
pub fn rt() -> Runtime {
Builder::new_multi_thread().enable_all().build().unwrap()
}
| 31.645299 | 104 | 0.558542 |
3393cfe5ee8d9fcdf8a875737fa9af62028f81b8 | 129,718 | use std::{
borrow::Cow,
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
convert::{TryFrom, TryInto},
fmt::Display,
sync::Arc,
};
use hashbrown::{hash_map, HashMap};
use itertools::Itertools;
use snafu::{ResultExt, Snafu};
use crate::column::{self, cmp::Operator, Column, RowIDs, RowIDsOption};
use crate::schema;
use crate::schema::{AggregateType, LogicalDataType, ResultSchema};
use crate::value::{
AggregateVec, EncodedValues, OwnedValue, Scalar, Value, Values, ValuesIterator,
};
use arrow::{
array,
array::ArrayRef,
datatypes::{DataType, TimeUnit},
record_batch::RecordBatch,
};
use datafusion::{
logical_plan::Expr as DfExpr, logical_plan::Operator as DFOperator,
scalar::ScalarValue as DFScalarValue,
};
use internal_types::schema::{InfluxColumnType, Schema};
use internal_types::selection::Selection;
use std::num::NonZeroU64;
/// The name used for a timestamp column.
pub const TIME_COLUMN_NAME: &str = internal_types::schema::TIME_COLUMN_NAME;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("arrow conversion error: {}", source))]
ArrowError { source: arrow::error::ArrowError },
#[snafu(display("schema conversion error: {}", source))]
SchemaError {
source: internal_types::schema::builder::Error,
},
#[snafu(display("unsupported operation: {}", msg))]
UnsupportedOperation { msg: String },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// A `RowGroup` is an immutable horizontal chunk of a single `Table`. By
/// definition it has the same schema as all the other row groups in the table.
/// All the columns within the `RowGroup` must have the same number of logical
/// rows.
pub struct RowGroup {
meta: MetaData,
columns: Vec<Column>,
all_columns_by_name: BTreeMap<String, usize>,
time_column: usize,
}
impl RowGroup {
pub fn new(rows: u32, columns: Vec<(String, ColumnType)>) -> Self {
let mut meta = MetaData {
rows,
..MetaData::default()
};
let mut all_columns = vec![];
let mut all_columns_by_name = BTreeMap::new();
let mut time_column = None;
for (name, ct) in columns {
match ct {
ColumnType::Tag(c) => {
assert_eq!(c.num_rows(), rows);
meta.add_column(
&name,
c.size(),
schema::ColumnType::Tag(name.clone()),
c.logical_datatype(),
c.column_range(),
c.cardinality(),
);
all_columns_by_name.insert(name.clone(), all_columns.len());
all_columns.push(c);
}
ColumnType::Field(c) => {
assert_eq!(c.num_rows(), rows);
meta.add_column(
&name,
c.size(),
schema::ColumnType::Field(name.clone()),
c.logical_datatype(),
c.column_range(),
c.cardinality(),
);
all_columns_by_name.insert(name.clone(), all_columns.len());
all_columns.push(c);
}
ColumnType::Time(c) => {
assert_eq!(c.num_rows(), rows);
meta.add_column(
&name,
c.size(),
schema::ColumnType::Timestamp(name.clone()),
c.logical_datatype(),
c.column_range(),
c.cardinality(),
);
all_columns_by_name.insert(name.clone(), all_columns.len());
time_column = Some(all_columns.len());
all_columns.push(c);
}
}
}
// Meta data should have same columns for types and ranges.
assert_eq!(meta.columns.keys().len(), all_columns.len());
Self {
meta,
columns: all_columns,
all_columns_by_name,
time_column: time_column.unwrap(),
}
}
/// The total estimated size in bytes of the row group in memory
pub fn size(&self) -> usize {
let base_size = std::mem::size_of::<Self>()
+ self
.all_columns_by_name
.keys()
.map(|key| key.len() + std::mem::size_of::<usize>())
.sum::<usize>();
base_size + self.meta.size()
}
/// Returns an iterator of (column_name, estimated_size) for all
/// columns in this row_group
pub fn column_sizes(&self) -> impl Iterator<Item = (&str, usize)> + '_ {
self.all_columns_by_name.iter().map(move |(name, idx)| {
let column = &self.columns[*idx];
(name.as_str(), column.size())
})
}
/// The total estimated size in bytes of all columns in the row group if
/// the values within were stored contiguously with no compression.
/// `include_nulls` controls whether to size NULL values according to the
/// base size of the data-type or to ignore them from the calculation.
pub fn size_raw(&self, include_nulls: bool) -> usize {
self.columns
.iter()
.map(|c| c.size_raw(include_nulls))
.sum::<usize>()
}
/// The number of rows in the `RowGroup` (all columns have the same number
/// of rows).
pub fn rows(&self) -> u32 {
self.meta.rows
}
// The row group's meta data.
pub fn metadata(&self) -> &MetaData {
&self.meta
}
// Returns a reference to a column from the column name.
//
// It is the caller's responsibility to ensure the column exists in the read
// group. Panics if the column doesn't exist.
fn column_by_name(&self, name: ColumnName<'_>) -> &Column {
&self.columns[*self.all_columns_by_name.get(name).unwrap()]
}
// Takes a `ColumnName`, looks up that column in the `RowGroup`, and
// returns a reference to that column's name owned by the `RowGroup` along
// with a reference to the column itself. The returned column name will have
// the lifetime of `self`, not the lifetime of the input.
fn column_name_and_column(&self, name: ColumnName<'_>) -> (&str, &Column) {
let (column_name, column_index) = self.all_columns_by_name.get_key_value(name).unwrap();
(column_name, &self.columns[*column_index])
}
// Returns a reference to the timestamp column.
fn time_column(&self) -> &Column {
&self.columns[self.time_column]
}
/// Efficiently determines if the row group _might_ satisfy all of the
/// provided binary expressions, when conjunctively applied.
///
/// `false` indicates that one or more of the expressions would not match
/// any rows in the row group.
pub fn could_satisfy_conjunctive_binary_expressions<'a>(
&self,
exprs: impl IntoIterator<Item = &'a BinaryExpr>,
) -> bool {
// if a single expression returns `false` then the whole operation
// returns `false` because the expressions are conjunctively applied.
exprs
.into_iter()
.all(|expr| self.meta.column_could_satisfy_binary_expr(expr))
}
/// Determines if the row group contains one or more rows that satisfy all
/// of the provided binary expressions, when conjunctively applied.
///
/// `satisfies_predicate` currently constructs a set of row ids for all
/// rows that satisfy the predicate, but does not materialise any
/// values. There are some optimisation opportunities here, but I don't
/// think they're at all worth it at the moment.
///
/// They could be:
/// * for predicates with single expression just find a matching value in
/// the column;
/// * in some cases perhaps work row by row rather than column by column.
pub fn satisfies_predicate(&self, predicate: &Predicate) -> bool {
if !self.could_satisfy_conjunctive_binary_expressions(predicate.iter()) {
return false;
}
// return false if there were no rows ids returned that satisfy the
// predicate.
!matches!(
self.row_ids_from_predicate(predicate),
RowIDsOption::None(_)
)
}
//
// Methods for reading the `RowGroup`
//
/// Returns a set of materialised column values that optionally satisfy a
/// predicate.
///
/// TODO(edd): this should probably return an Option and the caller can
/// filter None results.
pub fn read_filter(
&self,
columns: &[ColumnName<'_>],
predicates: &Predicate,
) -> ReadFilterResult<'_> {
let select_columns = self.meta.schema_for_column_names(&columns);
assert_eq!(select_columns.len(), columns.len());
let schema = ResultSchema {
select_columns,
..Default::default()
};
// apply predicates to determine candidate rows.
let row_ids = self.row_ids_from_predicate(predicates);
let col_data = self.materialise_rows(&schema, row_ids);
ReadFilterResult {
schema,
data: col_data,
}
}
fn materialise_rows(&self, schema: &ResultSchema, row_ids: RowIDsOption) -> Vec<Values<'_>> {
let mut col_data = Vec::with_capacity(schema.len());
match row_ids {
RowIDsOption::None(_) => col_data, // nothing to materialise
RowIDsOption::Some(row_ids) => {
// TODO(edd): causes an allocation. Implement a way to pass a
// pooled buffer to the croaring Bitmap API.
let row_ids = row_ids.to_vec();
for (ct, _) in &schema.select_columns {
let (_, col) = self.column_name_and_column(ct.as_str());
if let schema::ColumnType::Tag(_) = ct {
col_data.push(col.values_as_dictionary(row_ids.as_slice()));
} else {
col_data.push(col.values(row_ids.as_slice()));
}
}
col_data
}
RowIDsOption::All(_) => {
for (ct, _) in &schema.select_columns {
let (_, col) = self.column_name_and_column(ct.as_str());
if let schema::ColumnType::Tag(_) = ct {
col_data.push(col.all_values_as_dictionary());
} else {
col_data.push(col.all_values());
}
}
col_data
}
}
}
// Determines the set of row ids that satisfy the provided predicate.
fn row_ids_from_predicate(&self, predicate: &Predicate) -> RowIDsOption {
// TODO(edd): perf - potentially pool this so we can re-use it once rows
// have been materialised and it's no longer needed. Initialise a bitmap
// RowIDs because it's like that set operations will be necessary.
let mut result_row_ids = RowIDs::new_bitmap();
// TODO(edd): perf - pool the dst buffer so we can re-use it across
// subsequent calls to `row_ids_from_predicates`. Right now this buffer
// will be re-used across all columns in the `RowGroup` but not re-used
// for subsequent calls _to_ the `RowGroup`.
let mut dst = RowIDs::new_bitmap();
let mut predicate = Cow::Borrowed(predicate);
// If there is a time-range in the predicate (two time expressions),
// then execute an optimised version that will use a range based
// filter on the time column, effectively avoiding a scan and
// intersection.
if predicate.contains_time_range() {
predicate = predicate.to_owned(); // We need to modify the predicate
let time_range = predicate
.to_mut()
.remove_expr_by_column_name(TIME_COLUMN_NAME);
// removes time expression from predicate
let time_pred_row_ids = self.row_ids_from_time_range(&time_range, dst);
match time_pred_row_ids {
// No matching rows based on time range
RowIDsOption::None(_) => return time_pred_row_ids,
// all rows match - continue to apply other predicates
RowIDsOption::All(_dst) => {
dst = _dst; // hand buffer back
}
// some rows match - continue to apply predicates
RowIDsOption::Some(row_ids) => {
// fill the result row id set with the matching rows from
// the time column.
result_row_ids.union(&row_ids);
dst = row_ids // hand buffer back
}
}
}
for expr in predicate.iter() {
// N.B column should always exist because validation of predicates
// should happen at the `Table` level.
let (_, col) = self.column_name_and_column(expr.column());
// Explanation of how this buffer pattern works. The idea is that
// the buffer should be returned to the caller so it can be re-used
// on other columns. Each call to `row_ids_filter` returns the
// buffer back enabling it to be re-used.
match col.row_ids_filter(&expr.op, &expr.literal_as_value(), dst) {
// No rows will be returned for the `RowGroup` because this
// column does not match any rows.
RowIDsOption::None(_dst) => return RowIDsOption::None(_dst),
// Intersect the row ids found at this column with all those
// found on other column predicates.
RowIDsOption::Some(row_ids) => {
if result_row_ids.is_empty() {
result_row_ids.union(&row_ids)
}
result_row_ids.intersect(&row_ids);
dst = row_ids; // hand buffer back
}
// This is basically a no-op because all rows match the
// predicate on this column.
RowIDsOption::All(_dst) => {
dst = _dst; // hand buffer back
}
}
}
if result_row_ids.is_empty() {
// All rows matched all predicates because any predicates not
// matching any rows would have resulted in an early return.
return RowIDsOption::All(result_row_ids);
}
RowIDsOption::Some(result_row_ids)
}
// An optimised function for applying two comparison predicates to a time
// column at once.
fn row_ids_from_time_range(&self, time_range: &[BinaryExpr], dst: RowIDs) -> RowIDsOption {
assert_eq!(time_range.len(), 2);
self.time_column().row_ids_filter_range(
&(time_range[0].op, time_range[0].literal_as_value()), // min time
&(time_range[1].op, time_range[1].literal_as_value()), // max time
dst,
)
}
/// Materialises a collection of data in group columns and aggregate
/// columns, optionally filtered by the provided predicate.
///
/// Collectively, row-wise values in the group columns comprise a "group
/// key", and each value in the same row for the aggregate columns contains
/// aggregate values for those group keys.
///
/// Note: `read_aggregate` currently only supports "tag" columns.
/// Note: `read_aggregate` does not order results.
pub fn read_aggregate(
&self,
predicate: &Predicate,
group_columns: &[ColumnName<'_>],
aggregates: &[(ColumnName<'_>, AggregateType)],
) -> ReadAggregateResult<'_> {
let schema = ResultSchema {
select_columns: vec![],
group_columns: self.meta.schema_for_column_names(group_columns),
aggregate_columns: self.meta.schema_for_aggregate_column_names(aggregates),
};
let mut result = ReadAggregateResult {
schema,
..ReadAggregateResult::default()
};
// Pure column aggregates - no grouping.
if group_columns.is_empty() {
self.aggregate_columns(predicate, &mut result);
return result;
}
// All of the below assume grouping by columns.
// Handle case where there are no predicates and all the columns being
// grouped support constant-time expression of the row_ids belonging to
// each grouped value.
let all_group_cols_pre_computed = result.schema.group_column_names_iter().all(|name| {
self.column_by_name(name)
.properties()
.has_pre_computed_row_ids
});
if predicate.is_empty() && all_group_cols_pre_computed {
self.read_group_all_rows_all_rle(&mut result);
return result;
}
// There are predicates. The next stage is apply them and determine the
// intermediate set of row ids.
let row_ids = self.row_ids_from_predicate(predicate);
let filter_row_ids = match row_ids {
RowIDsOption::None(_) => {
return result;
} // no matching rows
RowIDsOption::Some(row_ids) => Some(row_ids.to_vec()),
RowIDsOption::All(_) => None,
};
let agg_cols_num = result.schema.aggregate_columns.len();
// materialise all *encoded* values for each column we are grouping on.
// These will not be the logical (typically string) values, but will be
// vectors of integers representing the physical values.
let groupby_encoded_ids: Vec<_> = result
.schema
.group_column_names_iter()
.map(|name| {
let col = self.column_by_name(name);
let mut encoded_values_buf =
EncodedValues::with_capacity_u32(col.num_rows() as usize);
// Do we want some rows for the column (predicate filtered some
// rows) or all of them (predicates filtered no rows).
match &filter_row_ids {
Some(row_ids) => {
encoded_values_buf = col.encoded_values(row_ids, encoded_values_buf);
}
None => {
// None here means "no partial set of row ids" meaning
// get all of them.
encoded_values_buf = col.all_encoded_values(encoded_values_buf);
}
}
encoded_values_buf.take_u32()
})
.collect();
// Materialise values in aggregate columns.
let mut aggregate_columns_data = Vec::with_capacity(agg_cols_num);
for (col_type, _, _) in &result.schema.aggregate_columns {
let col = self.column_by_name(col_type.as_str());
// TODO(edd): this materialises a column per aggregate. If there are
// multiple aggregates for the same column then this will
// over-allocate
// Do we want some rows for the column or all of them?
let column_values = match &filter_row_ids {
Some(row_ids) => col.values(row_ids),
None => {
// None here means "no partial set of row ids", i.e., get
// all of the row ids because they all satisfy the
// predicates.
col.all_values()
}
};
aggregate_columns_data.push(column_values);
}
// Perform the group by using a hashmap
self.read_group_with_hashing(&mut result, &groupby_encoded_ids, aggregate_columns_data);
result
}
// read_group_hash executes a read-group-aggregate operation on the
// `RowGroup` using a hashmap to build up a collection of group keys and
// aggregates.
//
// read_group_hash accepts a set of conjunctive predicates.
fn read_group_with_hashing<'a>(
&'a self,
dst: &mut ReadAggregateResult<'a>,
groupby_encoded_ids: &[Vec<u32>],
aggregate_columns_data: Vec<Values<'a>>,
) {
// An optimised approach to building the hashmap of group keys using a
// single 128-bit integer as the group key. If grouping is on more than
// four columns then a fallback to using an vector as a key will happen.
if dst.schema.group_columns.len() <= 4 {
self.read_group_hash_with_u128_key(dst, &groupby_encoded_ids, aggregate_columns_data);
return;
}
self.read_group_hash_with_vec_key(dst, &groupby_encoded_ids, aggregate_columns_data);
}
// This function is used with `read_group_hash` when the number of columns
// being grouped on requires the use of a `Vec<u32>` as the group key in the
// hash map.
fn read_group_hash_with_vec_key<'a>(
&'a self,
dst: &mut ReadAggregateResult<'a>,
groupby_encoded_ids: &[Vec<u32>],
aggregate_input_columns: Vec<Values<'a>>,
) {
let total_rows = groupby_encoded_ids[0].len();
assert!(groupby_encoded_ids.iter().all(|x| x.len() == total_rows));
// These vectors will hold the decoded values of each part of each
// group key. They are the output columns of the input columns used for
// the grouping operation.
let mut group_cols_out = vec![vec![]; groupby_encoded_ids.len()];
// Each of these vectors will be used to store each aggregate row-value
// for a specific aggregate result column.
let mut agg_cols_out = dst
.schema
.aggregate_columns
.iter()
.map(|(_, agg_type, data_type)| AggregateVec::from((agg_type, data_type)))
.collect::<Vec<_>>();
// Maps each group key to an ordinal offset on output columns. This
// offset is used to update aggregate values for each group key and to
// store the decoded representations of the group keys themselves in
// the associated output columns.
let mut group_keys: HashMap<Vec<u32>, usize> = HashMap::default();
// reference back to underlying group columns for fetching decoded group
// key values.
let input_group_columns = dst
.schema
.group_column_names_iter()
.map(|name| self.column_by_name(name))
.collect::<Vec<_>>();
// key_buf will be used as a temporary buffer for group keys represented
// as a `Vec<u32>`.
let mut key_buf = vec![0; dst.schema.group_columns.len()];
let mut next_ordinal_id = 0; // assign a position for each group key in output columns.
for row in 0..total_rows {
// update the group key buffer with the group key for this row
for (j, col_ids) in groupby_encoded_ids.iter().enumerate() {
key_buf[j] = col_ids[row];
}
match group_keys.raw_entry_mut().from_key(&key_buf) {
hash_map::RawEntryMut::Occupied(entry) => {
let ordinal_id = entry.get();
// Update each aggregate column at this ordinal offset
// with the values present in the input columns at the
// current row.
for (agg_col_i, aggregate_result) in agg_cols_out.iter_mut().enumerate() {
aggregate_result.update(
&aggregate_input_columns[agg_col_i],
row,
*ordinal_id,
)
}
}
// group key does not exist, so create it.
hash_map::RawEntryMut::Vacant(entry) => {
// Update each aggregate column at this ordinal offset
// with the values present in the input columns at the
// current row.
for (agg_col_i, aggregate_result) in agg_cols_out.iter_mut().enumerate() {
aggregate_result.update(
&aggregate_input_columns[agg_col_i],
row,
next_ordinal_id,
)
}
// Add decoded group key values to the output group columns.
for (group_col_i, group_key_col) in group_cols_out.iter_mut().enumerate() {
if group_key_col.len() >= next_ordinal_id {
group_key_col.resize(next_ordinal_id + 1, None);
}
let decoded_value = input_group_columns[group_col_i]
.decode_id(groupby_encoded_ids[group_col_i][row]);
group_key_col[next_ordinal_id] = match decoded_value {
Value::Null => None,
Value::String(s) => Some(s),
_ => panic!("currently unsupported group column"),
};
}
// update the hashmap with the encoded group key and the
// associated ordinal offset.
entry.insert(key_buf.clone(), next_ordinal_id);
next_ordinal_id += 1;
}
}
}
dst.group_key_cols = group_cols_out;
dst.aggregate_cols = agg_cols_out;
}
// This function is similar to `read_group_hash_with_vec_key` in that it
// calculates groups keys and aggregates for a read-group-aggregate
// operation using a hashmap.
//
// This function can be invoked when fewer than four columns are being
// grouped. In this case the key to the hashmap can be a `u128` integer,
// which is significantly more performant than using a `Vec<u32>`.
fn read_group_hash_with_u128_key<'a>(
&'a self,
dst: &mut ReadAggregateResult<'a>,
groupby_encoded_ids: &[Vec<u32>],
aggregate_input_columns: Vec<Values<'a>>,
) {
let total_rows = groupby_encoded_ids[0].len();
assert!(groupby_encoded_ids.iter().all(|x| x.len() == total_rows));
assert!(dst.schema.group_columns.len() <= 4);
// These vectors will hold the decoded values of each part of each
// group key. They are the output columns derived from the input
// grouping columns.
let mut group_cols_out: Vec<Vec<Option<ColumnName<'a>>>> = vec![];
group_cols_out.resize(groupby_encoded_ids.len(), vec![]);
// Each of these vectors will be used to store each aggregate row-value
// for a specific aggregate result column.
let mut agg_cols_out = dst
.schema
.aggregate_columns
.iter()
.map(|(_, agg_type, data_type)| AggregateVec::from((agg_type, data_type)))
.collect::<Vec<_>>();
// Maps each group key to an ordinal offset on output columns. This
// offset is used to update aggregate values for each group key and to
// store the decoded representations of the group keys themselves in
// the associated output columns.
let mut group_keys: HashMap<u128, usize> = HashMap::default();
// reference back to underlying group columns for fetching decoded group
// key values.
let input_group_columns = dst
.schema
.group_column_names_iter()
.map(|name| self.column_by_name(name))
.collect::<Vec<_>>();
let mut next_ordinal_id = 0; // assign a position for each group key in output columns.
for row in 0..total_rows {
// pack each column's encoded value for the row into a packed
// group key.
let mut group_key_packed = 0_u128;
for (i, col_ids) in groupby_encoded_ids.iter().enumerate() {
group_key_packed = pack_u32_in_u128(group_key_packed, col_ids[row], i);
}
match group_keys.raw_entry_mut().from_key(&group_key_packed) {
hash_map::RawEntryMut::Occupied(entry) => {
let ordinal_id = entry.get();
// Update each aggregate column at this ordinal offset
// with the values present in the input columns at the
// current row.
for (agg_col_i, aggregate_result) in agg_cols_out.iter_mut().enumerate() {
aggregate_result.update(
&aggregate_input_columns[agg_col_i],
row,
*ordinal_id,
)
}
}
hash_map::RawEntryMut::Vacant(entry) => {
// Update each aggregate column at this ordinal offset
// with the values present in the input columns at the
// current row.
for (agg_col_i, aggregate_result) in agg_cols_out.iter_mut().enumerate() {
aggregate_result.update(
&aggregate_input_columns[agg_col_i],
row,
next_ordinal_id,
)
}
// Add decoded group key values to the output group columns.
for (group_col_i, group_key_col) in group_cols_out.iter_mut().enumerate() {
if group_key_col.len() >= next_ordinal_id {
group_key_col.resize(next_ordinal_id + 1, None);
}
let decoded_value = input_group_columns[group_col_i]
.decode_id(groupby_encoded_ids[group_col_i][row]);
group_key_col[next_ordinal_id] = match decoded_value {
Value::Null => None,
Value::String(s) => Some(s),
_ => panic!("currently unsupported group column"),
};
}
// update the hashmap with the encoded group key and the
// associated ordinal offset.
entry.insert(group_key_packed, next_ordinal_id);
next_ordinal_id += 1;
}
}
}
dst.group_key_cols = group_cols_out;
dst.aggregate_cols = agg_cols_out;
}
// Optimised `read_group` method when there are no predicates and all the
// group columns are RLE-encoded.
//
// In this case all the grouping columns pre-computed bitsets for each
// distinct value.
fn read_group_all_rows_all_rle<'a>(&'a self, dst: &mut ReadAggregateResult<'a>) {
// References to the columns to be used as input for producing the
// output aggregates.
let input_group_columns = dst
.schema
.group_column_names_iter()
.map(|name| self.column_by_name(name))
.collect::<Vec<_>>();
// References to the columns to be used as input for producing the
// output aggregates. Also returns the required aggregate type.
let input_aggregate_columns = dst
.schema
.aggregate_columns
.iter()
.map(|(col_type, agg_type, _)| (self.column_by_name(col_type.as_str()), *agg_type))
.collect::<Vec<_>>();
let groupby_encoded_ids = dst
.schema
.group_column_names_iter()
.map(|col_type| {
self.column_by_name(col_type.as_str())
.grouped_row_ids()
.unwrap_left()
})
.collect::<Vec<_>>();
// These vectors will hold the decoded values of each part of each
// group key. They are the output columns derived from the input
// grouping columns.
let mut group_cols_out: Vec<Vec<Option<ColumnName<'a>>>> = vec![];
group_cols_out.resize(groupby_encoded_ids.len(), vec![]);
// Each of these vectors will be used to store each aggregate row-value
// for a specific aggregate result column.
let mut agg_cols_out = dst
.schema
.aggregate_columns
.iter()
.map(|(_, agg_type, data_type)| AggregateVec::from((agg_type, data_type)))
.collect::<Vec<_>>();
let mut output_rows = 0;
// multi_cartesian_product will create the cartesian product of all
// grouping-column values. This is likely going to be more group keys
// than there exists row-data for, so don't materialise them yet...
//
// For example, we have two columns like:
//
// [0, 1, 1, 2, 2, 3, 4] // column encodes the values as integers
// [3, 3, 3, 3, 4, 2, 1] // column encodes the values as integers
//
// The columns have these distinct values:
//
// [0, 1, 2, 3, 4] [1, 2, 3, 4]
//
// We will produce the following "group key" candidates:
//
// [0, 1], [0, 2], [0, 3], [0, 4] [1, 1], [1, 2], [1, 3], [1, 4]
// [2, 1], [2, 2], [2, 3], [2, 4] [3, 1], [3, 2], [3, 3], [3, 4]
// [4, 1], [4, 2], [4, 3], [4, 4]
//
// Based on the columns we can see that we only have data for the
// following group keys:
//
// [0, 3], [1, 3], [2, 3], [2, 4], [3, 2], [4, 1]
//
// We figure out which group keys have data and which don't in the loop
// below, by intersecting row_id bitsets for each encoded id, and
// checking for non-empty sets.
let candidate_group_keys = groupby_encoded_ids
.iter()
.map(|ids| (0..ids.len()))
.multi_cartesian_product();
// Let's figure out which of the candidate group keys are actually group
// keys with data.
'outer: for group_key_buf in candidate_group_keys {
let mut group_key_row_ids =
Cow::Borrowed(groupby_encoded_ids[0][group_key_buf[0]].unwrap_bitmap());
if group_key_row_ids.is_empty() {
continue;
}
for i in 1..group_key_buf.len() {
let other = groupby_encoded_ids[i][group_key_buf[i]].unwrap_bitmap();
if group_key_row_ids.and_cardinality(other) > 0 {
group_key_row_ids = Cow::Owned(group_key_row_ids.and(other));
} else {
continue 'outer;
}
}
// There exist rows for this group key combination. Materialise the
// group key and calculate the aggregates for this key using set
// of row IDs.
output_rows += 1;
// Add decoded group key values to the output group columns.
for (group_col_i, col) in group_cols_out.iter_mut().enumerate() {
let decoded_value =
input_group_columns[group_col_i].decode_id(group_key_buf[group_col_i] as u32);
col.push(match decoded_value {
Value::Null => None,
Value::String(s) => Some(s),
_ => panic!("currently unsupported group column"),
});
}
// Calculate an aggregate from each input aggregate column and
// set it at the relevant offset in the output column.
for (agg_col_i, (agg_col, typ)) in input_aggregate_columns.iter().enumerate() {
match typ {
AggregateType::Count => {
let agg = agg_col.count(&group_key_row_ids.to_vec()) as u64;
agg_cols_out[agg_col_i].push(Value::Scalar(Scalar::U64(agg)))
}
AggregateType::First => {}
AggregateType::Last => {}
AggregateType::Min => {
let agg = agg_col.min(&group_key_row_ids.to_vec());
agg_cols_out[agg_col_i].push(agg);
}
AggregateType::Max => {
let agg = agg_col.max(&group_key_row_ids.to_vec());
agg_cols_out[agg_col_i].push(agg);
}
AggregateType::Sum => {
let agg = agg_col.sum(&group_key_row_ids.to_vec());
agg_cols_out[agg_col_i].push(Value::Scalar(agg));
}
}
}
}
for col in &group_cols_out {
assert_eq!(col.len(), output_rows);
}
for col in &agg_cols_out {
assert_eq!(col.len(), output_rows);
}
dst.group_key_cols = group_cols_out;
dst.aggregate_cols = agg_cols_out;
}
// Optimised `read_group` method for cases where the columns being grouped
// are already totally ordered in the `RowGroup`.
//
// In this case the rows are already in "group key order" and the aggregates
// can be calculated by reading the rows in order.
fn _read_group_sorted_stream(
&self,
_predicates: &Predicate,
_group_column: ColumnName<'_>,
_aggregates: &[(ColumnName<'_>, AggregateType)],
) {
todo!()
}
// Applies aggregates on multiple columns with an optional predicate.
fn aggregate_columns<'a>(&'a self, predicate: &Predicate, dst: &mut ReadAggregateResult<'a>) {
let row_ids = match predicate.is_empty() {
true => {
// TODO(edd): PERF - teach each column encoding how to produce
// an aggregate for all its rows without needed
// to see the entire set of row ids. Currently
// column encodings aggregate based on the slice
// of row ids they see.
(0..self.rows()).into_iter().collect::<Vec<u32>>()
}
false => match self.row_ids_from_predicate(predicate) {
RowIDsOption::Some(row_ids) => row_ids.to_vec(),
RowIDsOption::None(_) => vec![],
RowIDsOption::All(_) => {
// see above comment.
(0..self.rows()).into_iter().collect::<Vec<u32>>()
}
},
};
dst.aggregate_cols = dst
.schema
.aggregate_columns
.iter()
.map(|(col_type, agg_type, data_type)| {
let col = self.column_by_name(col_type.as_str()); // input aggregate column
let mut agg_vec = AggregateVec::from((agg_type, data_type));
// produce single aggregate for the input column subject to a
// predicate filter.
match agg_type {
AggregateType::Count => {
let value = Value::Scalar(Scalar::U64(col.count(&row_ids) as u64));
agg_vec.push(value);
}
AggregateType::First => unimplemented!("First not yet implemented"),
AggregateType::Last => unimplemented!("Last not yet implemented"),
AggregateType::Min => agg_vec.push(col.min(&row_ids)),
AggregateType::Max => agg_vec.push(col.max(&row_ids)),
AggregateType::Sum => agg_vec.push(Value::Scalar(col.sum(&row_ids))),
}
agg_vec
})
.collect::<Vec<_>>();
}
/// Given the predicate (which may be empty), determine a set of rows
/// contained in this row group that satisfy it. Any column that contains a
/// non-null value at any of these row positions is then included in the
/// results, which are added to `dst`.
///
/// As an optimisation, the contents of `dst` are checked before execution
/// and any columns already existing in the set are not interrogated.
///
/// If you are familiar with InfluxDB, this is essentially an implementation
/// of `SHOW TAG KEYS`.
pub fn column_names(
&self,
predicate: &Predicate,
columns: Selection<'_>,
dst: &mut BTreeSet<String>,
) {
// Determine the set of columns in this row group that are not already
// present in `dst`, i.e., they haven't been identified in other row
// groups already.
let candidate_columns = self
.all_columns_by_name
.iter()
.filter_map(|(name, &id)| match dst.contains(name) {
// N.B there is bool::then() but it's currently unstable.
true => None,
false => match columns {
Selection::All => Some((name, &self.columns[id])),
Selection::Some(names) => {
if names.iter().any(|selection| name == selection) {
Some((name, &self.columns[id]))
} else {
None
}
}
},
})
.collect::<Vec<_>>();
match self.row_ids_from_predicate(predicate) {
RowIDsOption::None(_) => {} // nothing matches predicate
RowIDsOption::Some(row_ids) => {
let row_ids = row_ids.to_vec();
for (name, column) in candidate_columns {
if column.has_non_null_value(&row_ids) {
dst.insert(name.to_owned());
}
}
}
RowIDsOption::All(_) => {
for (name, column) in candidate_columns {
if column.has_any_non_null_value() {
dst.insert(name.to_owned());
}
}
}
}
}
/// Returns the distinct set of values for the selected columns, constrained
/// by an optional predicate.
pub fn column_values<'a>(
&'a self,
predicate: &Predicate,
columns: &[ColumnName<'_>],
mut dst: BTreeMap<String, BTreeSet<String>>,
) -> BTreeMap<String, BTreeSet<String>> {
// Build up candidate columns
let candidate_columns = self
.all_columns_by_name
.iter()
// Filter any columns that are not present in the `Selection`.
.filter_map(|(name, &id)| {
if columns.iter().any(|selection| name == selection) {
Some((name, &self.columns[id]))
} else {
None
}
})
// Further filter candidate columns by removing any columns that we
// can prove we already have all the distinct values for.
.filter(|(name, column)| {
match dst.get(*name) {
// process the column if we haven't got all the distinct
// values.
Some(values) => column.has_other_non_null_string_values(values),
// no existing values for this column - we will need to
// process it.
None => true,
}
})
.collect::<Vec<_>>();
let row_ids = self.row_ids_from_predicate(predicate);
for (name, column) in candidate_columns {
// If no rows match there is nothing to do, if some rows match then
// extract an iterator of those IDs. If all rows match then create
// an iterator of all rows without materialising them.
let row_itr: Box<dyn Iterator<Item = u32>> = match &row_ids {
RowIDsOption::None(_) => return dst,
RowIDsOption::Some(row_ids) => Box::new(row_ids.iter()),
RowIDsOption::All(_) => Box::new(0..self.rows()),
};
let results = dst.entry(name.clone()).or_default();
for v in column.distinct_values(row_itr).into_iter().flatten() {
if !results.contains(v) {
results.insert(v.to_owned());
}
}
}
dst
}
pub(crate) fn column_storage_statistics(&self) -> Vec<column::Statistics> {
self.columns.iter().map(|c| c.storage_stats()).collect()
}
}
impl std::fmt::Display for &RowGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.metadata().fmt(f)?;
writeln!(f, "[DATA]")?;
for (name, idx) in &self.all_columns_by_name {
writeln!(
f,
"'{}' (pos {}): size: {} {}",
name,
idx,
self.size(),
self.columns[*idx]
)?;
}
Ok(())
}
}
/// Initialise a `RowGroup` from an Arrow RecordBatch.
///
/// Presently this requires the RecordBatch to contain meta-data that specifies
/// the semantic meaning of each column in terms of an Influx time-series
/// use-case, i.e., whether the column is a tag column, field column or a time
/// column.
impl From<RecordBatch> for RowGroup {
fn from(rb: RecordBatch) -> Self {
let rows = rb.num_rows();
// TODO proper error handling here if the input schema is bad
let schema: Schema = rb
.schema()
.try_into()
.expect("Valid time-series schema when creating row group");
let mut columns = vec![];
for (i, arrow_column) in rb.columns().iter().enumerate() {
let (lp_type, field) = schema.field(i);
let col_name = field.name();
match lp_type {
Some(InfluxColumnType::Tag) => {
let column_data = match arrow_column.data_type() {
DataType::Utf8 => Column::from(arrow::array::StringArray::from(
arrow_column.data().clone(),
)),
DataType::Dictionary(key, value)
if key.as_ref() == &DataType::Int32
&& value.as_ref() == &DataType::Utf8 =>
{
Column::from(
arrow::array::DictionaryArray::<arrow::datatypes::Int32Type>::from(
arrow_column.data().clone(),
),
)
}
_ => panic!("invalid tag column type"),
};
columns.push((col_name.to_owned(), ColumnType::Tag(column_data)));
}
Some(InfluxColumnType::Field(_)) => {
let column_data = match arrow_column.data_type() {
DataType::Int64 => Column::from(arrow::array::Int64Array::from(
arrow_column.data().clone(),
)),
DataType::Float64 => Column::from(arrow::array::Float64Array::from(
arrow_column.data().clone(),
)),
DataType::UInt64 => Column::from(arrow::array::UInt64Array::from(
arrow_column.data().clone(),
)),
DataType::Boolean => Column::from(arrow::array::BooleanArray::from(
arrow_column.data().clone(),
)),
DataType::Utf8 => Column::from(arrow::array::StringArray::from(
arrow_column.data().clone(),
)),
dt => unimplemented!(
"data type {:?} currently not supported for field columns",
dt
),
};
columns.push((col_name.to_owned(), ColumnType::Field(column_data)));
}
Some(InfluxColumnType::Timestamp) => {
assert_eq!(col_name, TIME_COLUMN_NAME);
let column_data =
Column::from(arrow::array::Int64Array::from(arrow_column.data().clone()));
columns.push((col_name.to_owned(), ColumnType::Time(column_data)));
}
_ => panic!("unknown column type"),
}
}
Self::new(rows as u32, columns)
}
}
// Packs an encoded values into a `u128` at `pos`, which must be `[0,4)`.
#[inline(always)]
fn pack_u32_in_u128(packed_value: u128, encoded_id: u32, pos: usize) -> u128 {
packed_value | (encoded_id as u128) << (32 * pos)
}
// Given a packed encoded group key, unpacks them into `n` individual `u32`
// group keys, and stores them in `dst`. It is the caller's responsibility to
// ensure n <= 4.
#[cfg(test)]
fn unpack_u128_group_key(group_key_packed: u128, n: usize, mut dst: Vec<u32>) -> Vec<u32> {
dst.resize(n, 0);
for (i, encoded_id) in dst.iter_mut().enumerate() {
*encoded_id = (group_key_packed >> (i * 32)) as u32;
}
dst
}
#[derive(Clone, Default, Debug, PartialEq)]
pub struct Predicate(Vec<BinaryExpr>);
impl Predicate {
pub fn new(expr: Vec<BinaryExpr>) -> Self {
Self(expr)
}
/// Constructs a `Predicate` based on the provided collection of expressions
/// and explicit time bounds.
///
/// The `from` and `to` values will be converted into appropriate
/// expressions, which result in the `Predicate` expressing the following:
///
/// time >= from AND time < to
pub fn with_time_range(exprs: &[BinaryExpr], from: i64, to: i64) -> Self {
let mut time_exprs = vec![
BinaryExpr::from((TIME_COLUMN_NAME, ">=", from)),
BinaryExpr::from((TIME_COLUMN_NAME, "<", to)),
];
time_exprs.extend_from_slice(exprs);
Self(time_exprs)
}
/// A `Predicate` is empty if it has no expressions.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn iter(&self) -> std::slice::Iter<'_, BinaryExpr> {
self.0.iter()
}
/// Returns a vector of all expressions on the predicate.
pub fn expressions(&self) -> &[BinaryExpr] {
&self.0
}
// Removes all expressions for specified column from the predicate and
// returns them.
//
// The current use-case for this to separate processing the time column on
// its own using an optimised filtering function (because the time column is
// very likely to have two expressions in the predicate).
fn remove_expr_by_column_name(&mut self, name: ColumnName<'_>) -> Vec<BinaryExpr> {
let mut exprs = vec![];
while let Some(i) = self.0.iter().position(|expr| expr.col == name) {
exprs.push(self.0.remove(i));
}
exprs
}
// Returns true if the Predicate contains two time expressions.
fn contains_time_range(&self) -> bool {
self.0
.iter()
.filter(|expr| expr.col == TIME_COLUMN_NAME)
.count()
== 2
}
}
/// Supported literal values for expressions. These map to a sub-set of logical
/// datatypes supported by the `ReadBuffer`.
#[derive(Clone, Debug, PartialEq)]
pub enum Literal {
String(String),
Integer(i64),
Unsigned(u64),
Float(f64),
Boolean(bool),
}
impl<'a> TryFrom<&DFScalarValue> for Literal {
type Error = String;
fn try_from(value: &DFScalarValue) -> Result<Self, Self::Error> {
match value {
DFScalarValue::Boolean(v) => match v {
Some(v) => Ok(Self::Boolean(*v)),
None => Err("NULL literal not supported".to_owned()),
},
DFScalarValue::Float64(v) => match v {
Some(v) => Ok(Self::Float(*v)),
None => Err("NULL literal not supported".to_owned()),
},
DFScalarValue::Int64(v) => match v {
Some(v) => Ok(Self::Integer(*v)),
None => Err("NULL literal not supported".to_owned()),
},
DFScalarValue::UInt64(v) => match v {
Some(v) => Ok(Self::Unsigned(*v)),
None => Err("NULL literal not supported".to_owned()),
},
DFScalarValue::Utf8(v) => match v {
Some(v) => Ok(Self::String(v.clone())),
None => Err("NULL literal not supported".to_owned()),
},
DFScalarValue::TimestampNanosecond(v) => match v {
Some(v) => Ok(Self::Integer(*v)),
None => Err("NULL literal not supported".to_owned()),
},
_ => Err("scalar type not supported".to_owned()),
}
}
}
/// An expression that contains a column name on the left side, an operator, and
/// a literal value on the right side.
#[derive(Clone, Debug, PartialEq)]
pub struct BinaryExpr {
col: String,
op: Operator,
value: Literal,
}
impl BinaryExpr {
pub fn new(column_name: impl Into<String>, op: Operator, value: Literal) -> Self {
Self {
col: column_name.into(),
op,
value,
}
}
pub fn column(&self) -> ColumnName<'_> {
self.col.as_str()
}
pub fn op(&self) -> Operator {
self.op
}
pub fn literal(&self) -> &Literal {
&self.value
}
fn literal_as_value(&self) -> Value<'_> {
match self.literal() {
Literal::String(v) => Value::String(v),
Literal::Integer(v) => Value::Scalar(Scalar::I64(*v)),
Literal::Unsigned(v) => Value::Scalar(Scalar::U64(*v)),
Literal::Float(v) => Value::Scalar(Scalar::F64(*v)),
Literal::Boolean(v) => Value::Boolean(*v),
}
}
}
impl From<(&str, &str, &str)> for BinaryExpr {
fn from(expr: (&str, &str, &str)) -> Self {
Self::new(
expr.0,
Operator::try_from(expr.1).unwrap(),
Literal::String(expr.2.to_owned()),
)
}
}
// These From implementations are useful for expressing expressions easily in
// tests by allowing for example:
//
// BinaryExpr::from("region", ">=", "east")
// BinaryExpr::from("counter", "=", 321.3)
macro_rules! binary_expr_from_impls {
($(($type:ident, $variant:ident),)*) => {
$(
impl From<(&str, &str, $type)> for BinaryExpr {
fn from(expr: (&str, &str, $type)) -> Self {
Self::new(
expr.0,
Operator::try_from(expr.1).unwrap(),
Literal::$variant(expr.2.to_owned()),
)
}
}
)*
};
}
binary_expr_from_impls! {
(String, String),
(i64, Integer),
(f64, Float),
(u64, Unsigned),
(bool, Boolean),
}
impl TryFrom<&DfExpr> for BinaryExpr {
type Error = String;
fn try_from(df_expr: &DfExpr) -> Result<Self, Self::Error> {
match df_expr {
DfExpr::BinaryExpr { left, op, right } => {
match (&**left, &**right) {
(DfExpr::Column(name), DfExpr::Literal(scalar)) => Ok(Self::new(
name,
Operator::try_from(op)?,
Literal::try_from(scalar)?,
)),
(DfExpr::Literal(_), DfExpr::Column(_)) => {
// In this case we may have been give (literal, op, column).
// Swap left and right around and retry.
Self::try_from(&DfExpr::BinaryExpr {
left: right.clone(),
// since relation has been swapped we need the
// converse operator, e.g., `a < b` becomes `b > a`.
op: match op {
DFOperator::Eq => DFOperator::Eq,
DFOperator::NotEq => DFOperator::NotEq,
DFOperator::Lt => DFOperator::Gt,
DFOperator::LtEq => DFOperator::GtEq,
DFOperator::Gt => DFOperator::Lt,
DFOperator::GtEq => DFOperator::LtEq,
op => return Err(format!("unsupported DF operator {:?}", op)),
},
right: left.clone(),
})
}
(_, _) => {
return Err(format!(
"unsupported expression {:?} {:?} {:?}",
*left, op, *right
))
}
}
}
_ => return Err(format!("unsupported expression type {:?}", df_expr)),
}
}
}
// A representation of a column name.
pub type ColumnName<'a> = &'a str;
/// The InfluxDB-specific semantic meaning of a column.
pub enum ColumnType {
Tag(Column),
Field(Column),
Time(Column),
}
impl ColumnType {
/// The total size in bytes of the column
pub fn size(&self) -> usize {
match &self {
Self::Tag(c) => c.size(),
Self::Field(c) => c.size(),
Self::Time(c) => c.size(),
}
}
/// The number of distinct values if known
pub fn distinct_count(&self) -> Option<NonZeroU64> {
match &self {
Self::Tag(c) => c.cardinality(),
Self::Field(c) => c.cardinality(),
Self::Time(c) => c.cardinality(),
}
}
/// Helper function to construct a `Tag` column from a slice of `&str`
pub fn create_tag(values: &[&str]) -> Self {
Self::Tag(Column::from(values))
}
/// Helper function to construct a `Time` column from a slice of `i64`
pub fn create_time(values: &[i64]) -> Self {
Self::Time(Column::from(values))
}
}
#[derive(Debug, Clone)]
pub struct ColumnMeta {
pub typ: crate::schema::ColumnType,
pub logical_data_type: LogicalDataType,
pub range: (OwnedValue, OwnedValue),
pub distinct_count: Option<NonZeroU64>,
}
impl ColumnMeta {
pub fn size(&self) -> usize {
std::mem::size_of::<Self>() + self.range.0.size() + self.range.1.size()
}
}
impl Display for &ColumnMeta {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Can't rely on ColumnType Display impl.
let semantic_type = match self.typ {
schema::ColumnType::Tag(_) => "TAG",
schema::ColumnType::Field(_) => "FIELD",
schema::ColumnType::Timestamp(_) => "TIMESTAMP",
schema::ColumnType::Other(_) => "IOX",
};
write!(
f,
"sem_type: {}, log_type: {}, range: ({}, {})",
semantic_type, self.logical_data_type, &self.range.0, &self.range.1
)
}
}
// column metadata is equivalent for two columns if their logical type and
// semantic type are equivalent.
impl PartialEq for ColumnMeta {
fn eq(&self, other: &Self) -> bool {
self.typ == other.typ && self.logical_data_type == other.logical_data_type
}
}
#[derive(Default, Debug)]
pub struct MetaData {
// The total size in bytes of all column data in the `RowGroup`.
pub columns_size: usize,
// The total number of rows in the `RowGroup`.
pub rows: u32,
// The distinct set of columns for this `RowGroup` (all of these columns
// will appear in all of the `Table`'s `RowGroup`s) and the range of values
// for each of those columns.
//
// This can be used to skip the table entirely if a logical predicate can't
// possibly match based on the range of values a column has.
pub columns: BTreeMap<String, ColumnMeta>,
pub column_names: Vec<String>,
}
impl std::fmt::Display for &MetaData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(
f,
"[META] rows: {}, columns: {}",
self.rows,
self.columns.len()
)?;
for (name, meta) in &self.columns {
writeln!(f, "'{}': {}", name, meta)?;
}
Ok(())
}
}
impl MetaData {
/// Returns the estimated size in bytes of the meta data and all column data
/// associated with a `RowGroup`.
pub fn size(&self) -> usize {
let base_size = std::mem::size_of::<Self>();
(base_size
// account for contents of meta data
+ self
.columns
.iter()
.map(|(k, v)| k.len() + v.size())
.sum::<usize>())
+ self.columns_size
}
// helper function to determine if the provided binary expression could be
// satisfied in the `RowGroup`, If this function returns `false` then there
// no rows in the `RowGroup` would ever match the expression.
//
pub fn column_could_satisfy_binary_expr(&self, expr: &BinaryExpr) -> bool {
let (column_min, column_max) = match self.columns.get(expr.column()) {
Some(schema) => &schema.range,
None => return false, // column doesn't exist.
};
let (op, value) = (expr.op(), &expr.literal_as_value());
match op {
// If the column range covers the value then it could contain that
// value.
Operator::Equal => column_min <= value && column_max >= value,
// If every value in the column is equal to "value" then this will
// be false, otherwise it must be satisfied
Operator::NotEqual => (column_min != column_max) || column_max != value,
// if the column max is larger than value then the column could
// contain the value.
Operator::GT => column_max > value,
// if the column max is at least as large as `value` then the column
// could contain the value.
Operator::GTE => column_max >= value,
// if the column min is smaller than value then the column could
// contain the value.
Operator::LT => column_min < value,
// if the column min is at least as small as value then the column
// could contain the value.
Operator::LTE => column_min <= value,
}
}
pub fn add_column(
&mut self,
name: &str,
column_size: usize,
col_type: schema::ColumnType,
logical_data_type: LogicalDataType,
range: (OwnedValue, OwnedValue),
distinct_count: Option<NonZeroU64>,
) {
self.column_names.push(name.to_owned());
self.columns.insert(
name.to_owned(),
ColumnMeta {
typ: col_type,
logical_data_type,
range,
distinct_count,
},
);
self.columns_size += column_size;
}
// Extract schema information for a set of columns.
fn schema_for_column_names(
&self,
names: &[ColumnName<'_>],
) -> Vec<(crate::schema::ColumnType, LogicalDataType)> {
names
.iter()
.map(|&name| {
let schema = self.columns.get(name).unwrap();
(schema.typ.clone(), schema.logical_data_type)
})
.collect::<Vec<_>>()
}
// Extract the schema information for a set of aggregate columns
fn schema_for_aggregate_column_names(
&self,
columns: &[(ColumnName<'_>, AggregateType)],
) -> Vec<(crate::schema::ColumnType, AggregateType, LogicalDataType)> {
columns
.iter()
.map(|(name, agg_type)| {
let schema = self.columns.get(*name).unwrap();
// TODO(edd): this check happens because an aggregate does
// not have to have the same physical type as the logical
// type of the column it is aggregating on. An example of
// this is Count. I'm going to fix this by associated data
// types with the aggregate itself.
let physical_data_type = if let AggregateType::Count = agg_type {
LogicalDataType::Unsigned
} else {
schema.logical_data_type
};
(schema.typ.clone(), *agg_type, physical_data_type)
})
.collect::<Vec<_>>()
}
}
/// Encapsulates results from `RowGroup`s with a structure that makes them
/// easier to work with and display.
pub struct ReadFilterResult<'row_group> {
schema: ResultSchema,
data: Vec<Values<'row_group>>,
}
impl ReadFilterResult<'_> {
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
pub fn schema(&self) -> &ResultSchema {
&self.schema
}
}
impl TryFrom<ReadFilterResult<'_>> for RecordBatch {
type Error = Error;
fn try_from(result: ReadFilterResult<'_>) -> Result<Self, Self::Error> {
let schema = internal_types::schema::Schema::try_from(result.schema())
.map_err(|source| Error::SchemaError { source })?;
let columns: Vec<ArrayRef> = result
.data
.into_iter()
.enumerate()
.map(|(i, values)| {
// Note: here we are special-casing columns that have been
// specified as being represented by `TimestampNanosecondArray`
// according to the Arrow schema. Currently this is done so that
// when they're fed into a data-fusion query engine, it will
// emit a friendlier representation of them.
if let DataType::Timestamp(TimeUnit::Nanosecond, timestamp) =
schema.field(i).1.data_type()
{
return match values {
Values::I64(arr) => {
Ok(Arc::new(arrow::array::TimestampNanosecondArray::from_vec(
arr,
timestamp.clone(),
)) as arrow::array::ArrayRef)
}
Values::I64N(arr) => Ok(Arc::new(
arrow::array::TimestampNanosecondArray::from_opt_vec(
arr,
timestamp.clone(),
),
)
as arrow::array::ArrayRef),
t => UnsupportedOperation {
msg: format!("cannot convert {:?} to TimestampNanosecondArray", t),
}
.fail(),
};
}
Ok(arrow::array::ArrayRef::from(values))
})
.collect::<Result<Vec<_>, _>>()?;
let arrow_schema: arrow::datatypes::SchemaRef = schema.into();
// try_new only returns an error if the schema is invalid or the number
// of rows on columns differ. We have full control over both so there
// should never be an error to return...
Self::try_new(arrow_schema, columns).map_err(|source| Error::ArrowError { source })
}
}
impl std::fmt::Debug for &ReadFilterResult<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Display the header
Display::fmt(self.schema(), f)?;
writeln!(f)?;
// Display the rest of the values.
Display::fmt(&self, f)
}
}
impl Display for &ReadFilterResult<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_empty() {
return Ok(());
}
let expected_rows = self.data[0].len();
let mut rows = 0;
let mut iter_map = self
.data
.iter()
.map(|v| ValuesIterator::new(v))
.collect::<Vec<_>>();
let columns = iter_map.len();
while rows < expected_rows {
if rows > 0 {
writeln!(f)?;
}
for (i, data) in iter_map.iter_mut().enumerate() {
write!(f, "{}", data.next().unwrap())?;
if i < columns - 1 {
write!(f, ",")?;
}
}
rows += 1;
}
writeln!(f)
}
}
#[derive(Default, Clone)]
pub struct ReadAggregateResult<'row_group> {
// a schema describing the columns in the results and their types.
pub(crate) schema: ResultSchema,
// The collection of columns forming the group keys.
pub(crate) group_key_cols: Vec<Vec<Option<&'row_group str>>>,
// The collection of aggregate columns. Each value in each column is an
// aggregate associated with the group key built from values in the group
// columns and the same ordinal position.
pub(crate) aggregate_cols: Vec<AggregateVec>,
pub(crate) group_keys_sorted: bool,
}
impl<'row_group> ReadAggregateResult<'row_group> {
pub fn new(schema: ResultSchema) -> Self {
Self {
schema,
..Default::default()
}
}
/// A `ReadAggregateResult` is empty if there are no aggregate columns.
pub fn is_empty(&self) -> bool {
self.aggregate_cols.is_empty()
}
pub fn schema(&self) -> &ResultSchema {
&self.schema
}
/// The number of rows in the result.
pub fn rows(&self) -> usize {
if self.aggregate_cols.is_empty() {
return 0;
}
self.aggregate_cols[0].len()
}
// The number of distinct group keys in the result. Not the same as `rows()`
// because a `ReadAggregateResult` can have no group keys and have a single
// aggregate row.
pub fn cardinality(&self) -> usize {
if self.group_key_cols.is_empty() {
return 0;
}
self.group_key_cols[0].len()
}
// Is this result for a grouped aggregate?
pub fn is_grouped_aggregate(&self) -> bool {
!self.group_key_cols.is_empty()
}
// The number of grouping columns.
pub fn group_key_columns(&self) -> usize {
self.group_key_cols.len()
}
// Whether or not the rows in the results are sorted by group keys or not.
pub fn group_keys_sorted(&self) -> bool {
self.group_key_cols.is_empty() || self.group_keys_sorted
}
/// Merges `other` and self, returning a new set of results.
pub fn merge(
mut self,
mut other: ReadAggregateResult<'row_group>,
) -> ReadAggregateResult<'row_group> {
assert_eq!(self.schema(), other.schema());
if self.is_empty() {
return other;
} else if other.is_empty() {
return self;
}
// `read_aggregate` uses a variety of ways to generate results. It is
// not safe to assume any particular ordering, so we will sort self and
// other and do a merge.
if !self.group_keys_sorted() {
self.sort();
}
if !other.group_keys_sorted() {
other.sort();
}
let mut result = Self::new(self.schema.clone());
// Allocate output grouping columns
result
.group_key_cols
.resize(result.schema.group_columns.len(), vec![]);
// Allocate output aggregate columns
result.aggregate_cols = result
.schema
.aggregate_columns
.iter()
.map(|(_, agg_type, data_type)| AggregateVec::from((agg_type, data_type)))
.collect::<Vec<_>>();
let mut self_i = 0;
let mut other_i = 0;
while self_i < self.rows() || other_i < other.rows() {
if self_i == self.rows() {
// drained self, add the rest of other's group key columns
for (col_i, col) in result.group_key_cols.iter_mut().enumerate() {
col.extend(other.group_key_cols[col_i].iter().skip(other_i));
}
// add the rest of other's aggregate columns
//
// N.B - by checking the data type of the aggregate columns here
// we can do type checking on a column basis (once) rather than
// for each row. This allows us to extract an aggregate vec
// and an iterator of the same type to extend the aggregate vec.
for (col_i, (_, _, data_type)) in result.schema.aggregate_columns.iter().enumerate()
{
match data_type {
LogicalDataType::Integer => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_i64(arr.take_as_i64().into_iter());
}
LogicalDataType::Unsigned => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_u64(arr.take_as_u64().into_iter());
}
LogicalDataType::Float => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_f64(arr.take_as_f64().into_iter());
}
LogicalDataType::String => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_str(arr.take_as_str().into_iter());
}
LogicalDataType::Binary => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_bytes(arr.take_as_bytes().into_iter());
}
LogicalDataType::Boolean => {
let arr = other.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_bool(arr.take_as_bool().into_iter());
}
}
}
return result;
} else if other_i == other.rows() {
// drained other, add the rest of self's group key columns
for (col_i, col) in result.group_key_cols.iter_mut().enumerate() {
col.extend(self.group_key_cols[col_i].iter().skip(self_i));
}
// add the rest of self's aggregate columns
for (col_i, (_, _, data_type)) in result.schema.aggregate_columns.iter().enumerate()
{
match data_type {
LogicalDataType::Integer => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_i64(arr.take_as_i64().into_iter());
}
LogicalDataType::Unsigned => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_u64(arr.take_as_u64().into_iter());
}
LogicalDataType::Float => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_f64(arr.take_as_f64().into_iter());
}
LogicalDataType::String => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_str(arr.take_as_str().into_iter());
}
LogicalDataType::Binary => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_bytes(arr.take_as_bytes().into_iter());
}
LogicalDataType::Boolean => {
let arr = self.aggregate_cols.remove(0);
result.aggregate_cols[col_i]
.extend_with_bool(arr.take_as_bool().into_iter());
}
}
}
return result;
}
// compare the next row in self and other and determine if there is
// a clear lexicographic order.
let mut ord = Ordering::Equal;
for i in 0..result.schema.group_columns.len() {
match self.group_key_cols[i][self_i].partial_cmp(&other.group_key_cols[i][other_i])
{
Some(o) => {
ord = o;
if !matches!(ord, Ordering::Equal) {
break;
}
}
None => continue,
}
}
match ord {
Ordering::Less => {
// move the next row for each of self's columns onto result.
for (col_i, col) in result.group_key_cols.iter_mut().enumerate() {
col.push(self.group_key_cols[col_i][self_i]);
}
for (col_i, col) in result.aggregate_cols.iter_mut().enumerate() {
col.push(self.aggregate_cols[col_i].value(self_i));
}
self_i += 1;
}
Ordering::Equal => {
// move the next row for each of self's columns onto result.
for (col_i, col) in result.group_key_cols.iter_mut().enumerate() {
col.push(self.group_key_cols[col_i][self_i]);
}
// merge all the aggregates for this group key.
for (col_i, col) in result.aggregate_cols.iter_mut().enumerate() {
let self_value = self.aggregate_cols[col_i].value(self_i);
let other_value = other.aggregate_cols[col_i].value(other_i);
let (_, agg_type, _) = &self.schema.aggregate_columns[col_i];
col.push(match agg_type {
AggregateType::Count => self_value + other_value,
AggregateType::Min => match self_value.partial_cmp(&other_value) {
Some(ord) => match ord {
Ordering::Less => self_value,
Ordering::Equal => self_value,
Ordering::Greater => other_value,
},
None => self_value,
},
AggregateType::Max => match self_value.partial_cmp(&other_value) {
Some(ord) => match ord {
Ordering::Less => other_value,
Ordering::Equal => other_value,
Ordering::Greater => self_value,
},
None => self_value,
},
AggregateType::Sum => self_value + other_value,
_ => unimplemented!("first/last not implemented"),
});
}
self_i += 1;
other_i += 1;
}
Ordering::Greater => {
// move the next row for each of other's columns onto result.
for (col_i, col) in result.group_key_cols.iter_mut().enumerate() {
col.push(other.group_key_cols[col_i][other_i]);
}
for (col_i, col) in result.aggregate_cols.iter_mut().enumerate() {
col.push(other.aggregate_cols[col_i].value(other_i));
}
other_i += 1;
}
}
}
result
}
// Executes a mutable sort of the results based on the lexicographic order
// of each group key columns.
//
// Given these group key columns:
//
// [foo [zoo [zoo
// foo bar zoo
// bar bar bar
// bar] bar] zoo]
//
// `sort` would result them becoming:
//
// [bar [bar [bar
// bar bar zoo
// foo bar zoo
// foo] zoo] zoo]
//
// The same permutation is also applied to the aggregate columns.
//
pub fn sort(&mut self) {
if self.group_keys_sorted {
return;
}
// Create a vector of group keys, which allows us to determine a
// permutation by which we should sort all columns.
let mut group_keys = (0..self.rows())
.map(|i| GroupKey::new(&self.group_key_cols, i))
.collect::<Vec<_>>();
// sort the vector of group keys, which will give us a permutation
// that we can apply to all of the columns.
group_keys.sort_unstable_by(|a, b| {
let cols = a.len();
for i in 0..cols {
match a.columns[i][a.row_offset].partial_cmp(&b.columns[i][b.row_offset]) {
Some(ord) => {
if matches!(ord, Ordering::Equal) {
continue;
}
return ord;
}
None => continue,
}
}
std::cmp::Ordering::Equal
});
// Now create a permutation by looking at how the row_offsets have been
// ordered in the `group_keys` array.
let perm = permutation::Permutation::from_vec(
group_keys
.iter()
.map(|gk| gk.row_offset)
.collect::<Vec<_>>(),
);
assert_eq!(perm.len(), self.rows());
// Apply that permutation to all of the columns.
for col in self.group_key_cols.iter_mut() {
*col = perm.apply_slice(col.as_slice());
}
for col in self.aggregate_cols.iter_mut() {
col.sort_with_permutation(&perm);
}
self.group_keys_sorted = true;
}
}
// The `GroupKey` struct is a wrapper over a specific row of data in grouping
// columns.
//
// Rather than pivot the columns into a row-wise orientation to sort them, we
// can effectively sort a projection across them (`row_offset`) storing
// `GroupKey`s in a vector and sorting that.
struct GroupKey<'a> {
columns: &'a [Vec<Option<&'a str>>],
row_offset: usize,
}
impl<'a> GroupKey<'a> {
fn new(columns: &'a [Vec<Option<&'a str>>], offset: usize) -> Self {
Self {
columns,
row_offset: offset,
}
}
// The number of columns comprising the `GroupKey`.
fn len(&self) -> usize {
self.columns.len()
}
}
impl TryFrom<ReadAggregateResult<'_>> for RecordBatch {
type Error = Error;
fn try_from(mut result: ReadAggregateResult<'_>) -> Result<Self, Self::Error> {
let schema = internal_types::schema::Schema::try_from(result.schema())
.map_err(|source| Error::SchemaError { source })?;
let arrow_schema: arrow::datatypes::SchemaRef = schema.into();
// Add the group columns to the set of column data for the record batch.
let mut columns: Vec<Arc<dyn arrow::array::Array>> =
Vec::with_capacity(result.schema.len());
for (_, data_type) in &result.schema.group_columns {
match data_type {
LogicalDataType::String => {
columns.push(Arc::new(array::StringArray::from(
result.group_key_cols.remove(0), // move column out of result
)));
}
_ => panic!("only String currently supported as group column"),
}
}
for (_, _, data_type) in &result.schema.aggregate_columns {
match data_type {
LogicalDataType::Integer => {
columns.push(Arc::new(array::Int64Array::from(
result.aggregate_cols.remove(0).take_as_i64(),
)));
}
LogicalDataType::Unsigned => {
columns.push(Arc::new(array::UInt64Array::from(
result.aggregate_cols.remove(0).take_as_u64(),
)));
}
LogicalDataType::Float => {
columns.push(Arc::new(array::Float64Array::from(
result.aggregate_cols.remove(0).take_as_f64(),
)));
}
LogicalDataType::String => {
columns.push(Arc::new(array::StringArray::from(
result
.aggregate_cols
.remove(0)
.take_as_str()
.iter()
.map(|x| x.as_deref())
.collect::<Vec<_>>(),
)));
}
LogicalDataType::Binary => {
columns.push(Arc::new(array::BinaryArray::from(
result
.aggregate_cols
.remove(0)
.take_as_bytes()
.iter()
.map(|x| x.as_deref())
.collect::<Vec<_>>(),
)));
}
LogicalDataType::Boolean => {
columns.push(Arc::new(array::BooleanArray::from(
result.aggregate_cols.remove(0).take_as_bool(),
)));
}
}
}
// everything has been moved and copied into record batch.
assert!(result.group_key_cols.is_empty());
assert!(result.aggregate_cols.is_empty());
// try_new only returns an error if the schema is invalid or the number
// of rows on columns differ. We have full control over both so there
// should never be an error to return...
Self::try_new(arrow_schema, columns).context(ArrowError)
}
}
// `group_keys_sorted` does not contribute to a result's equality with another
impl PartialEq for ReadAggregateResult<'_> {
fn eq(&self, other: &Self) -> bool {
self.schema() == other.schema()
&& self.group_key_cols == other.group_key_cols
&& self.aggregate_cols == other.aggregate_cols
}
}
/// The Debug implementation emits both the schema and the column data for the
/// results.
impl std::fmt::Debug for ReadAggregateResult<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Display the schema
Display::fmt(&self.schema(), f)?;
// Display the rest of the values.
Display::fmt(&self, f)
}
}
/// The Display implementation emits all of the column data for the results, but
/// omits the schema.
impl Display for ReadAggregateResult<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_empty() {
return Ok(());
}
// There may or may not be group keys
let expected_rows = self.rows();
for row in 0..expected_rows {
if row > 0 {
writeln!(f)?;
}
// write row for group by columns
if self.is_grouped_aggregate() {
for col in &self.group_key_cols {
match col[row] {
Some(v) => write!(f, "{},", v)?,
None => write!(f, "NULL,")?,
}
}
}
// write row for aggregate columns
for (i, col) in self.aggregate_cols.iter().enumerate() {
col.write_value(row, f)?;
if i < self.aggregate_cols.len() - 1 {
write!(f, ",")?
}
}
}
writeln!(f)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::schema;
// Helper function that creates a predicate from a single expression
fn col_pred(expr: BinaryExpr) -> Predicate {
Predicate::new(vec![expr])
}
#[test]
fn size() {
let mut columns = vec![];
let rc = ColumnType::Tag(Column::from(&[Some("west"), Some("west"), None, None][..]));
columns.push(("region".to_string(), rc));
let tc = ColumnType::Time(Column::from(&[100_i64, 200, 500, 600][..]));
columns.push(("time".to_string(), tc));
let row_group = RowGroup::new(4, columns);
let rg_size = row_group.size();
assert!(rg_size > 0);
let mut columns = vec![];
let track = ColumnType::Tag(Column::from(
&[Some("Thinking"), Some("of"), Some("a"), Some("place")][..],
));
columns.push(("track".to_string(), track));
let tc = ColumnType::Time(Column::from(&[100_i64, 200, 500, 600][..]));
columns.push(("time".to_string(), tc));
let row_group = RowGroup::new(4, columns);
assert!(row_group.size() > rg_size);
}
#[test]
fn row_ids_from_predicates() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[100_i64, 200, 500, 600, 300, 300][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
let row_group = RowGroup::new(6, columns);
// Closed partially covering "time range" predicate
let row_ids = row_group.row_ids_from_predicate(&Predicate::with_time_range(&[], 200, 600));
assert_eq!(row_ids.unwrap().to_vec(), vec![1, 2, 4, 5]);
// Fully covering "time range" predicate
let row_ids = row_group.row_ids_from_predicate(&Predicate::with_time_range(&[], 10, 601));
assert!(matches!(row_ids, RowIDsOption::All(_)));
// Open ended "time range" predicate
let row_ids = row_group.row_ids_from_predicate(&col_pred(BinaryExpr::from((
TIME_COLUMN_NAME,
">=",
300_i64,
))));
assert_eq!(row_ids.unwrap().to_vec(), vec![2, 3, 4, 5]);
// Closed partially covering "time range" predicate and other column
// predicate
let row_ids = row_group.row_ids_from_predicate(&Predicate::with_time_range(
&[BinaryExpr::from(("region", "=", "south"))],
200,
600,
));
assert_eq!(row_ids.unwrap().to_vec(), vec![4]);
// Fully covering "time range" predicate and other column predicate
let row_ids = row_group.row_ids_from_predicate(&Predicate::with_time_range(
&[BinaryExpr::from(("region", "=", "west"))],
10,
601,
));
assert_eq!(row_ids.unwrap().to_vec(), vec![0, 1, 3]);
// "time range" predicate and other column predicate that doesn't match
let row_ids = row_group.row_ids_from_predicate(&Predicate::with_time_range(
&[BinaryExpr::from(("region", "=", "nope"))],
200,
600,
));
assert!(matches!(row_ids, RowIDsOption::None(_)));
// Just a column predicate
let row_ids =
row_group.row_ids_from_predicate(&col_pred(BinaryExpr::from(("region", "=", "east"))));
assert_eq!(row_ids.unwrap().to_vec(), vec![2]);
// Predicate can matches all the rows
let row_ids =
row_group.row_ids_from_predicate(&col_pred(BinaryExpr::from(("region", "!=", "abba"))));
assert!(matches!(row_ids, RowIDsOption::All(_)));
// No predicates
let row_ids = row_group.row_ids_from_predicate(&Predicate::default());
assert!(matches!(row_ids, RowIDsOption::All(_)));
}
#[test]
fn read_filter() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3, 4, 5, 6][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
let mc = ColumnType::Tag(Column::from(
&["GET", "POST", "POST", "POST", "PUT", "GET"][..],
));
columns.push(("method".to_string(), mc));
let fc = ColumnType::Field(Column::from(&[100_u64, 101, 200, 203, 203, 10][..]));
columns.push(("count".to_string(), fc));
let row_group = RowGroup::new(6, columns);
let cases = vec![
(
vec!["count", "region", "time"],
Predicate::with_time_range(&[], 1, 6),
"count,region,time
100,west,1
101,west,2
200,east,3
203,west,4
203,south,5
",
),
(
vec!["time", "region", "method"],
Predicate::with_time_range(&[], -19, 2),
"time,region,method
1,west,GET
",
),
(
vec!["time"],
Predicate::with_time_range(&[], 0, 3),
"time
1
2
",
),
(
vec!["method"],
Predicate::with_time_range(&[], 0, 3),
"method
GET
POST
",
),
(
vec!["count", "method", "time"],
Predicate::with_time_range(&[BinaryExpr::from(("method", "=", "POST"))], 0, 6),
"count,method,time
101,POST,2
200,POST,3
203,POST,4
",
),
(
vec!["region", "time"],
Predicate::with_time_range(&[BinaryExpr::from(("method", "=", "POST"))], 0, 6),
"region,time
west,2
east,3
west,4
",
),
];
for (cols, predicates, expected) in cases {
let results = row_group.read_filter(&cols, &predicates);
assert_eq!(format!("{:?}", &results), expected);
}
// test no matching rows
let results = row_group.read_filter(
&["method", "region", "time"],
&Predicate::with_time_range(&[], -19, 1),
);
assert!(results.is_empty());
}
#[test]
fn read_filter_dictionaries() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3, 4, 5, 6][..]));
columns.push(("time".to_string(), tc));
// Tag column that will be dictionary encoded when materialised
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
// Field column that will be stored as a string array when materialised
let mc = ColumnType::Field(Column::from(
&["GET", "POST", "POST", "POST", "PUT", "GET"][..],
));
columns.push(("method".to_string(), mc));
let row_group = RowGroup::new(6, columns);
let cases = vec![
(
vec!["method", "region", "time"],
Predicate::default(),
"method,region,time
GET,west,1
POST,west,2
POST,east,3
POST,west,4
PUT,south,5
GET,north,6
",
),
(
vec!["method", "region", "time"],
Predicate::with_time_range(&[], -1, 3),
"method,region,time
GET,west,1
POST,west,2
",
),
];
for (cols, predicates, expected) in cases {
let results = row_group.read_filter(&cols, &predicates);
assert_eq!(format!("{:?}", &results), expected);
}
}
#[test]
fn read_aggregate() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3, 4, 5, 6][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
let mc = ColumnType::Tag(Column::from(
&["GET", "POST", "POST", "POST", "PUT", "GET"][..],
));
columns.push(("method".to_string(), mc));
let ec = ColumnType::Tag(Column::from(
&[
Some("prod"),
Some("prod"),
Some("stag"),
Some("prod"),
None,
None,
][..],
));
columns.push(("env".to_string(), ec));
let c = ColumnType::Tag(Column::from(
&["Alpha", "Alpha", "Bravo", "Bravo", "Alpha", "Alpha"][..],
));
columns.push(("letters".to_string(), c));
let c = ColumnType::Tag(Column::from(
&["one", "two", "two", "two", "one", "three"][..],
));
columns.push(("numbers".to_string(), c));
let fc = ColumnType::Field(Column::from(&[100_u64, 101, 200, 203, 203, 10][..]));
columns.push(("counter".to_string(), fc));
let row_group = RowGroup::new(6, columns);
// test queries with no predicates and grouping on low cardinality
// columns
read_aggregate_all_rows_all_rle(&row_group);
// test row group queries that group on fewer than five columns.
read_aggregate_hash_u128_key(&row_group);
// test row group queries that use a vector-based group key.
read_aggregate_hash_vec_key(&row_group);
// test row group queries that only group on one column.
read_aggregate_single_groupby_column(&row_group);
}
// the read_group path where grouping is on fewer than five columns.
fn read_aggregate_hash_u128_key(row_group: &RowGroup) {
let cases = vec![
(
Predicate::with_time_range(&[], 0, 7), // all time but without explicit pred
vec!["region", "method"],
vec![("counter", AggregateType::Sum)],
"region,method,counter_sum
east,POST,200
north,GET,10
south,PUT,203
west,GET,100
west,POST,304
",
),
(
Predicate::with_time_range(&[], 2, 6), // all time but without explicit pred
vec!["env", "region"],
vec![
("counter", AggregateType::Sum),
("counter", AggregateType::Count),
],
"env,region,counter_sum,counter_count
NULL,south,203,1
prod,west,304,2
stag,east,200,1
",
),
(
Predicate::with_time_range(&[], -1, 10),
vec!["region", "env"],
vec![("method", AggregateType::Min)], // Yep, you can aggregate any column.
"region,env,method_min
east,stag,POST
north,NULL,GET
south,NULL,PUT
west,prod,GET
",
),
// This case is identical to above but has an explicit `region >
// "north"` predicate.
(
Predicate::with_time_range(&[BinaryExpr::from(("region", ">", "north"))], -1, 10),
vec!["region", "env"],
vec![("method", AggregateType::Min)], // Yep, you can aggregate any column.
"region,env,method_min
south,NULL,PUT
west,prod,GET
",
),
(
Predicate::with_time_range(&[], -1, 10),
vec!["region", "env", "method"],
vec![("time", AggregateType::Max)], // Yep, you can aggregate any column.
"region,env,method,time_max
east,stag,POST,3
north,NULL,GET,6
south,NULL,PUT,5
west,prod,GET,1
west,prod,POST,4
",
),
];
for (predicate, group_cols, aggs, expected) in cases {
let mut results = row_group.read_aggregate(&predicate, &group_cols, &aggs);
results.sort();
assert_eq!(format!("{:?}", &results), expected);
}
}
// the read_group path where grouping is on five or more columns. This will
// ensure that the `read_group_hash_with_vec_key` path is exercised.
fn read_aggregate_hash_vec_key(row_group: &RowGroup) {
let cases = vec![(
Predicate::with_time_range(&[], 0, 7), // all time but with explicit pred
vec!["region", "method", "env", "letters", "numbers"],
vec![("counter", AggregateType::Sum)],
"region,method,env,letters,numbers,counter_sum
east,POST,stag,Bravo,two,200
north,GET,NULL,Alpha,three,10
south,PUT,NULL,Alpha,one,203
west,GET,prod,Alpha,one,100
west,POST,prod,Alpha,two,101
west,POST,prod,Bravo,two,203
",
)];
for (predicate, group_cols, aggs, expected) in cases {
let mut results = row_group.read_aggregate(&predicate, &group_cols, &aggs);
results.sort();
assert_eq!(format!("{:?}", &results), expected);
}
}
// the read_group path where grouping is on a single column.
fn read_aggregate_single_groupby_column(row_group: &RowGroup) {
let cases = vec![(
Predicate::with_time_range(&[], 0, 7), // all time but with explicit pred
vec!["method"],
vec![("counter", AggregateType::Sum)],
"method,counter_sum
GET,110
POST,504
PUT,203
",
)];
for (predicate, group_cols, aggs, expected) in cases {
let mut results = row_group.read_aggregate(&predicate, &group_cols, &aggs);
results.sort();
assert_eq!(format!("{:?}", &results), expected);
}
}
fn read_aggregate_all_rows_all_rle(row_group: &RowGroup) {
let cases = vec![
(
Predicate::default(),
vec!["region", "method"],
vec![("counter", AggregateType::Sum)],
"region,method,counter_sum
east,POST,200
north,GET,10
south,PUT,203
west,GET,100
west,POST,304
",
),
(
Predicate::default(),
vec!["region", "method", "env"],
vec![("counter", AggregateType::Sum)],
"region,method,env,counter_sum
east,POST,stag,200
north,GET,NULL,10
south,PUT,NULL,203
west,GET,prod,100
west,POST,prod,304
",
),
(
Predicate::default(),
vec!["env"],
vec![("counter", AggregateType::Count)],
"env,counter_count
NULL,2
prod,3
stag,1
",
),
(
Predicate::default(),
vec!["region", "method"],
vec![
("counter", AggregateType::Sum),
("counter", AggregateType::Min),
("counter", AggregateType::Max),
],
"region,method,counter_sum,counter_min,counter_max
east,POST,200,200,200
north,GET,10,10,10
south,PUT,203,203,203
west,GET,100,100,100
west,POST,304,101,203
",
),
];
for (predicate, group_cols, aggs, expected) in cases {
let results = row_group.read_aggregate(&predicate, &group_cols, &aggs);
assert_eq!(format!("{:?}", &results), expected);
}
}
#[test]
fn row_aggregate_could_satisfy_predicate() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3, 4, 5, 6][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
let mc = ColumnType::Tag(Column::from(
&["GET", "GET", "GET", "GET", "GET", "GET"][..],
));
columns.push(("method".to_string(), mc));
let row_group = RowGroup::new(6, columns);
let cases = vec![
(("az", "=", "west"), false), // no az column
(("region", "=", "west"), true), // region column does contain "west"
(("region", "=", "over"), true), // region column might contain "over"
(("region", "=", "abc"), false), // region column can't contain "abc"
(("region", "=", "zoo"), false), // region column can't contain "zoo"
(("region", "!=", "hello"), true), // region column might not contain "hello"
(("method", "!=", "GET"), false), // method must only contain "GET"
(("region", ">", "abc"), true), // region column might contain something > "abc"
(("region", ">", "north"), true), // region column might contain something > "north"
(("region", ">", "west"), false), // region column can't contain something > "west"
(("region", ">=", "abc"), true), // region column might contain something ≥ "abc"
(("region", ">=", "east"), true), // region column might contain something ≥ "east"
(("region", ">=", "west"), true), // region column might contain something ≥ "west"
(("region", ">=", "zoo"), false), // region column can't contain something ≥ "zoo"
(("region", "<", "foo"), true), // region column might contain something < "foo"
(("region", "<", "north"), true), // region column might contain something < "north"
(("region", "<", "south"), true), // region column might contain something < "south"
(("region", "<", "east"), false), // region column can't contain something < "east"
(("region", "<", "abc"), false), // region column can't contain something < "abc"
(("region", "<=", "east"), true), // region column might contain something ≤ "east"
(("region", "<=", "north"), true), // region column might contain something ≤ "north"
(("region", "<=", "south"), true), // region column might contain something ≤ "south"
(("region", "<=", "abc"), false), // region column can't contain something ≤ "abc"
];
for ((col, op, value), exp) in cases {
let predicate = Predicate::new(vec![BinaryExpr::from((col, op, value))]);
assert_eq!(
row_group.could_satisfy_conjunctive_binary_expressions(predicate.iter()),
exp,
"{:?} failed",
predicate
);
}
}
#[test]
fn row_aggregate_satisfies_predicate() {
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3, 4, 5, 6][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(
&["west", "west", "east", "west", "south", "north"][..],
));
columns.push(("region".to_string(), rc));
let mc = ColumnType::Tag(Column::from(
&["GET", "GET", "GET", "GET", "GET", "GET"][..],
));
columns.push(("method".to_string(), mc));
let row_group = RowGroup::new(6, columns);
let mut predicate = Predicate::default();
assert_eq!(row_group.satisfies_predicate(&predicate), true);
predicate = Predicate::new(vec![BinaryExpr::from(("region", "=", "east"))]);
assert_eq!(row_group.satisfies_predicate(&predicate), true);
// all expressions satisfied in data
predicate = Predicate::new(vec![
BinaryExpr::from(("region", "=", "east")),
BinaryExpr::from(("method", "!=", "POST")),
]);
assert_eq!(row_group.satisfies_predicate(&predicate), true);
// all expressions satisfied in data by all rows
predicate = Predicate::new(vec![BinaryExpr::from(("method", "=", "GET"))]);
assert_eq!(row_group.satisfies_predicate(&predicate), true);
// one expression satisfied in data but other ruled out via column pruning.
predicate = Predicate::new(vec![
BinaryExpr::from(("region", "=", "east")),
BinaryExpr::from(("method", ">", "GET")),
]);
assert_eq!(row_group.satisfies_predicate(&predicate), false);
// all expressions rules out via column pruning.
predicate = Predicate::new(vec![
BinaryExpr::from(("region", ">", "west")),
BinaryExpr::from(("method", ">", "GET")),
]);
assert_eq!(row_group.satisfies_predicate(&predicate), false);
// column does not exist
predicate = Predicate::new(vec![BinaryExpr::from(("track", "=", "Jeanette"))]);
assert_eq!(row_group.satisfies_predicate(&predicate), false);
// one column satisfies expression but other column does not exist
predicate = Predicate::new(vec![
BinaryExpr::from(("region", "=", "south")),
BinaryExpr::from(("track", "=", "Jeanette")),
]);
assert_eq!(row_group.satisfies_predicate(&predicate), false);
}
#[test]
fn pack_unpack_group_keys() {
let cases = vec![
vec![0, 0, 0, 0],
vec![1, 2, 3, 4],
vec![1, 3, 4, 2],
vec![0],
vec![0, 1],
vec![u32::MAX, u32::MAX, u32::MAX, u32::MAX],
vec![u32::MAX, u16::MAX as u32, u32::MAX, u16::MAX as u32],
vec![0, u16::MAX as u32, 0],
vec![0, u16::MAX as u32, 0, 0],
vec![0, 0, u32::MAX, 0],
];
for case in cases {
let mut packed_value = 0_u128;
for (i, &encoded_id) in case.iter().enumerate() {
packed_value = pack_u32_in_u128(packed_value, encoded_id, i);
}
assert_eq!(
unpack_u128_group_key(packed_value, case.len(), vec![]),
case
);
}
}
#[test]
fn read_aggregate_result_display() {
let mut result = ReadAggregateResult {
schema: ResultSchema {
select_columns: vec![],
group_columns: vec![
(
schema::ColumnType::Tag("region".to_owned()),
LogicalDataType::String,
),
(
schema::ColumnType::Tag("host".to_owned()),
LogicalDataType::String,
),
],
aggregate_columns: vec![
(
schema::ColumnType::Field("temp".to_owned()),
AggregateType::Sum,
LogicalDataType::Integer,
),
(
schema::ColumnType::Field("voltage".to_owned()),
AggregateType::Count,
LogicalDataType::Unsigned,
),
],
},
group_key_cols: vec![
vec![
Some("east"),
Some("east"),
Some("west"),
Some("west"),
Some("west"),
],
vec![
Some("host-a"),
Some("host-b"),
Some("host-a"),
Some("host-c"),
Some("host-d"),
],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(10), Some(20), Some(25), Some(21), Some(11)]),
AggregateVec::Count(vec![Some(3), Some(4), Some(3), Some(1), Some(9)]),
],
group_keys_sorted: false,
};
// Debug implementation
assert_eq!(
format!("{:?}", &result),
"region,host,temp_sum,voltage_count
east,host-a,10,3
east,host-b,20,4
west,host-a,25,3
west,host-c,21,1
west,host-d,11,9
"
);
// Display implementation
assert_eq!(
format!("{}", &result),
"east,host-a,10,3
east,host-b,20,4
west,host-a,25,3
west,host-c,21,1
west,host-d,11,9
"
);
// results don't have to have group keys.
result.schema.group_columns = vec![];
result.group_key_cols = vec![];
// Debug implementation
assert_eq!(
format!("{:?}", &result),
"temp_sum,voltage_count
10,3
20,4
25,3
21,1
11,9
"
);
// Display implementation
assert_eq!(
format!("{}", &result),
"10,3
20,4
25,3
21,1
11,9
"
);
}
#[test]
fn read_aggregate_result_sort() {
let mut result = ReadAggregateResult {
schema: ResultSchema::default(), // schema not needed for sorting.
group_key_cols: vec![
vec![
Some("east"),
Some("west"),
Some("west"),
Some("east"),
Some("west"),
],
vec![
Some("host-a"),
Some("host-c"),
Some("host-a"),
Some("host-d"),
Some("host-b"),
],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(10), Some(20), Some(25), Some(21), Some(11)]),
AggregateVec::Count(vec![Some(3), Some(4), Some(3), Some(1), Some(9)]),
],
group_keys_sorted: false,
};
result.sort();
// Debug implementation
assert_eq!(
format!("{}", &result),
"east,host-a,10,3
east,host-d,21,1
west,host-a,25,3
west,host-b,11,9
west,host-c,20,4
"
);
let mut result = ReadAggregateResult {
schema: ResultSchema::default(),
group_key_cols: vec![
vec![Some("west"), Some("east"), Some("north")],
vec![Some("host-c"), Some("host-c"), Some("host-c")],
vec![Some("pro"), Some("stag"), Some("dev")],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(10), Some(20), Some(-5)]),
AggregateVec::Count(vec![Some(6), Some(8), Some(2)]),
],
..Default::default()
};
result.sort();
assert_eq!(
format!("{}", &result),
"east,host-c,stag,20,8
north,host-c,dev,-5,2
west,host-c,pro,10,6
"
);
}
#[test]
fn read_aggregate_result_merge() {
let schema = ResultSchema {
group_columns: vec![
(
schema::ColumnType::Tag("region".to_owned()),
LogicalDataType::String,
),
(
schema::ColumnType::Tag("host".to_owned()),
LogicalDataType::String,
),
],
aggregate_columns: vec![
(
schema::ColumnType::Field("temp".to_owned()),
AggregateType::Sum,
LogicalDataType::Integer,
),
(
schema::ColumnType::Field("voltage".to_owned()),
AggregateType::Count,
LogicalDataType::Unsigned,
),
],
..ResultSchema::default()
};
let mut result = ReadAggregateResult {
schema: schema.clone(),
..Default::default()
};
let other_result = ReadAggregateResult {
schema: schema.clone(),
group_key_cols: vec![
vec![Some("east"), Some("east")],
vec![Some("host-a"), Some("host-b")],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(10), Some(20)]),
AggregateVec::Count(vec![Some(3), Some(4)]),
],
..Default::default()
};
// merging something into nothing results in having a copy of something.
result = result.merge(other_result.clone());
assert_eq!(result, other_result.clone());
// merging the something into the result again results in all the
// aggregates doubling.
result = result.merge(other_result.clone());
assert_eq!(
result,
ReadAggregateResult {
schema: schema.clone(),
group_key_cols: vec![
vec![Some("east"), Some("east")],
vec![Some("host-a"), Some("host-b")],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(20), Some(40)]),
AggregateVec::Count(vec![Some(6), Some(8)]),
],
..Default::default()
}
);
// merging a result in with different group keys merges those group
// keys in.
let other_result = ReadAggregateResult {
schema: schema.clone(),
group_key_cols: vec![vec![Some("north")], vec![Some("host-a")]],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(-5)]),
AggregateVec::Count(vec![Some(2)]),
],
..Default::default()
};
result = result.merge(other_result.clone());
assert_eq!(
result,
ReadAggregateResult {
schema: schema.clone(),
group_key_cols: vec![
vec![Some("east"), Some("east"), Some("north")],
vec![Some("host-a"), Some("host-b"), Some("host-a")],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(20), Some(40), Some(-5)]),
AggregateVec::Count(vec![Some(6), Some(8), Some(2)]),
],
..Default::default()
}
);
// merging nothing in doesn't change the result.
let other_result = ReadAggregateResult {
schema: schema.clone(),
..Default::default()
};
result = result.merge(other_result.clone());
assert_eq!(
result,
ReadAggregateResult {
schema,
group_key_cols: vec![
vec![Some("east"), Some("east"), Some("north")],
vec![Some("host-a"), Some("host-b"), Some("host-a")],
],
aggregate_cols: vec![
AggregateVec::SumI64(vec![Some(20), Some(40), Some(-5)]),
AggregateVec::Count(vec![Some(6), Some(8), Some(2)]),
],
..Default::default()
}
);
}
#[test]
fn column_meta_equal() {
let col1 = ColumnMeta {
typ: schema::ColumnType::Tag("region".to_owned()),
logical_data_type: schema::LogicalDataType::String,
range: (
OwnedValue::String("east".to_owned()),
OwnedValue::String("west".to_owned()),
),
distinct_count: Some(NonZeroU64::new(233).unwrap()),
};
let col2 = ColumnMeta {
typ: schema::ColumnType::Tag("region".to_owned()),
logical_data_type: schema::LogicalDataType::String,
range: (
OwnedValue::String("north".to_owned()),
OwnedValue::String("west".to_owned()),
),
distinct_count: Some(NonZeroU64::new(233).unwrap()),
};
let col3 = ColumnMeta {
typ: schema::ColumnType::Tag("host".to_owned()),
logical_data_type: schema::LogicalDataType::String,
range: (
OwnedValue::String("east".to_owned()),
OwnedValue::String("west".to_owned()),
),
distinct_count: None,
};
assert_eq!(col1, col2);
assert_ne!(col1, col3);
assert_ne!(col2, col3);
}
#[test]
fn column_names() {
let mut columns = vec![];
let rc = ColumnType::Tag(Column::from(&[Some("west"), Some("west"), None, None][..]));
columns.push(("region".to_string(), rc));
let track = ColumnType::Tag(Column::from(
&[Some("Thinking"), Some("of"), Some("a"), Some("place")][..],
));
columns.push(("track".to_string(), track));
let temp = ColumnType::Field(Column::from(
&[Some("hot"), Some("cold"), Some("cold"), Some("warm")][..],
));
columns.push(("temp".to_string(), temp));
let tc = ColumnType::Time(Column::from(&[100_i64, 200, 500, 600][..]));
columns.push(("time".to_string(), tc));
let row_group = RowGroup::new(4, columns);
// No predicate - just find a value in each column that matches.
let mut dst = BTreeSet::new();
row_group.column_names(&Predicate::default(), Selection::All, &mut dst);
assert_eq!(
dst,
vec!["region", "temp", "time", "track"]
.into_iter()
.map(|s| s.to_owned())
.collect()
);
// A predicate, but no rows match. No columns should be returned.
let mut dst = BTreeSet::new();
row_group.column_names(
&Predicate::new(vec![BinaryExpr::from(("region", "=", "east"))]),
Selection::All,
&mut dst,
);
assert!(dst.is_empty());
// A predicate, that matches some rows. Columns with non-null values at
// those rows should be returned.
let mut dst = BTreeSet::new();
row_group.column_names(
&Predicate::new(vec![BinaryExpr::from(("track", "=", "place"))]),
Selection::All,
&mut dst,
);
// query matches one row.
//
// region, temp, track, time
// NULL , warm, place, 600
//
assert_eq!(
dst,
vec!["temp", "time", "track",]
.into_iter()
.map(|s| s.to_owned())
.collect()
);
// Reusing the same buffer keeps existing results even if they're not
// part of the result-set from the row group.
let mut columns = vec![];
let rc = ColumnType::Tag(Column::from(&[Some("prod")][..]));
columns.push(("env".to_string(), rc));
let tc = ColumnType::Time(Column::from(&[100_i64][..]));
let temp = ColumnType::Field(Column::from(&[Some("hot")][..]));
columns.push(("temp".to_string(), temp));
columns.push(("time".to_string(), tc));
let row_group = RowGroup::new(1, columns);
row_group.column_names(&Predicate::default(), Selection::All, &mut dst);
assert_eq!(
dst,
vec!["env", "temp", "time", "track"]
.into_iter()
.map(|s| s.to_owned())
.collect()
);
// just tag keys
dst.clear();
row_group.column_names(&Predicate::default(), Selection::Some(&["env"]), &mut dst);
assert_eq!(
dst.iter().cloned().collect::<Vec<_>>(),
vec!["env".to_owned()],
);
// just field keys
dst.clear();
row_group.column_names(&Predicate::default(), Selection::Some(&["temp"]), &mut dst);
assert_eq!(
dst.iter().cloned().collect::<Vec<_>>(),
vec!["temp".to_owned()],
);
}
fn to_map(arr: Vec<(&str, &[&str])>) -> BTreeMap<String, BTreeSet<String>> {
arr.iter()
.map(|(k, values)| {
(
k.to_string(),
values
.iter()
.map(|s| s.to_string())
.collect::<BTreeSet<_>>(),
)
})
.collect::<BTreeMap<_, _>>()
}
#[test]
fn column_values() {
// Build a row group.
let mut columns = vec![];
let tc = ColumnType::Time(Column::from(&[1_i64, 2, 3][..]));
columns.push(("time".to_string(), tc));
let rc = ColumnType::Tag(Column::from(&["west", "south", "north"][..]));
columns.push(("region".to_string(), rc));
let ec = ColumnType::Tag(Column::from(&["prod", "stag", "stag"][..]));
columns.push(("env".to_string(), ec));
let rg = RowGroup::new(3, columns);
let result = rg.column_values(&Predicate::default(), &["region"], BTreeMap::new());
assert_eq!(
result,
to_map(vec![("region", &["north", "west", "south"])])
);
let result = rg.column_values(&Predicate::default(), &["env", "region"], BTreeMap::new());
assert_eq!(
result,
to_map(vec![
("env", &["prod", "stag"]),
("region", &["north", "west", "south"])
])
);
let result = rg.column_values(
&Predicate::new(vec![BinaryExpr::from(("time", ">", 1_i64))]),
&["env", "region"],
BTreeMap::new(),
);
assert_eq!(
result,
to_map(vec![("env", &["stag"]), ("region", &["north", "south"])])
);
let mut dst = BTreeMap::new();
dst.insert(
"env".to_owned(),
vec!["stag".to_owned()].into_iter().collect::<BTreeSet<_>>(),
);
let result = rg.column_values(
&Predicate::new(vec![BinaryExpr::from(("time", ">", 1_i64))]),
&["env", "region"],
dst,
);
assert_eq!(
result,
to_map(vec![("env", &["stag"]), ("region", &["north", "south"])])
);
let result = rg.column_values(
&Predicate::new(vec![BinaryExpr::from(("time", ">", 4_i64))]),
&["env", "region"],
BTreeMap::new(),
);
assert_eq!(result, to_map(vec![]));
}
use datafusion::logical_plan::*;
use datafusion::scalar::ScalarValue;
use std::convert::TryFrom;
#[test]
fn to_binary_expr() {
let cases = vec![
(
// a = 22
col("a").eq(Expr::Literal(ScalarValue::Int64(Some(22)))),
BinaryExpr::from(("a", "=", 22_i64)),
),
(
// a > 10
col("a").gt(Expr::Literal(ScalarValue::Int64(Some(10)))),
BinaryExpr::from(("a", ">", 10_i64)),
),
(
// 100 = c
Expr::Literal(ScalarValue::Int64(Some(100))).eq(col("c")),
BinaryExpr::from(("c", "=", 100_i64)),
),
(
// 100 != c
Expr::Literal(ScalarValue::Int64(Some(100))).not_eq(col("c")),
BinaryExpr::from(("c", "!=", 100_i64)),
),
(
// 100 < c
Expr::Literal(ScalarValue::Int64(Some(100))).lt(col("c")),
BinaryExpr::from(("c", ">", 100_i64)),
),
(
// 100 <= c
Expr::Literal(ScalarValue::Int64(Some(100))).lt_eq(col("c")),
BinaryExpr::from(("c", ">=", 100_i64)),
),
(
// 100 >= c
Expr::Literal(ScalarValue::Int64(Some(100))).gt_eq(col("c")),
BinaryExpr::from(("c", "<=", 100_i64)),
),
(
// 100 > c
Expr::Literal(ScalarValue::Int64(Some(100))).gt(col("c")),
BinaryExpr::from(("c", "<", 100_i64)),
),
(
// a = timestamp(100000)
col("a").eq(Expr::Literal(ScalarValue::TimestampNanosecond(Some(
1000000,
)))),
BinaryExpr::from(("a", "=", 1000000_i64)),
),
];
for (input, exp) in cases {
assert_eq!(BinaryExpr::try_from(&input).unwrap(), exp);
}
// Error cases
let cases = vec![
// 33 = 33
Expr::Literal(ScalarValue::Int64(Some(33)))
.eq(Expr::Literal(ScalarValue::Int64(Some(33)))),
// a > b
col("a").gt(col("b")),
];
for input in cases {
assert!(BinaryExpr::try_from(&input).is_err());
}
}
}
| 37.0517 | 100 | 0.510268 |
ab7fadac91e39f892fa5abbd15dd9ae8e784ec3a | 21,166 | use rustc_index::vec::IndexVec;
use rustc_middle::mir::tcx::RvalueInitializationState;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use smallvec::{smallvec, SmallVec};
use std::mem;
use super::abs_domain::Lift;
use super::IllegalMoveOriginKind::*;
use super::{Init, InitIndex, InitKind, InitLocation, LookupResult, MoveError};
use super::{
LocationMap, MoveData, MoveOut, MoveOutIndex, MovePath, MovePathIndex, MovePathLookup,
};
struct MoveDataBuilder<'a, 'tcx> {
body: &'a Body<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
data: MoveData<'tcx>,
errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
}
impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
fn new(body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
let mut move_paths = IndexVec::new();
let mut path_map = IndexVec::new();
let mut init_path_map = IndexVec::new();
MoveDataBuilder {
body,
tcx,
param_env,
errors: Vec::new(),
data: MoveData {
moves: IndexVec::new(),
loc_map: LocationMap::new(body),
rev_lookup: MovePathLookup {
locals: body
.local_decls
.indices()
.map(|i| {
Self::new_move_path(
&mut move_paths,
&mut path_map,
&mut init_path_map,
None,
Place::from(i),
)
})
.collect(),
projections: Default::default(),
},
move_paths,
path_map,
inits: IndexVec::new(),
init_loc_map: LocationMap::new(body),
init_path_map,
},
}
}
fn new_move_path(
move_paths: &mut IndexVec<MovePathIndex, MovePath<'tcx>>,
path_map: &mut IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
init_path_map: &mut IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
parent: Option<MovePathIndex>,
place: Place<'tcx>,
) -> MovePathIndex {
let move_path =
move_paths.push(MovePath { next_sibling: None, first_child: None, parent, place });
if let Some(parent) = parent {
let next_sibling = mem::replace(&mut move_paths[parent].first_child, Some(move_path));
move_paths[move_path].next_sibling = next_sibling;
}
let path_map_ent = path_map.push(smallvec![]);
assert_eq!(path_map_ent, move_path);
let init_path_map_ent = init_path_map.push(smallvec![]);
assert_eq!(init_path_map_ent, move_path);
move_path
}
}
impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
/// This creates a MovePath for a given place, returning an `MovePathError`
/// if that place can't be moved from.
///
/// NOTE: places behind references *do not* get a move path, which is
/// problematic for borrowck.
///
/// Maybe we should have separate "borrowck" and "moveck" modes.
fn move_path_for(&mut self, place: Place<'tcx>) -> Result<MovePathIndex, MoveError<'tcx>> {
debug!("lookup({:?})", place);
let mut base = self.builder.data.rev_lookup.locals[place.local];
// The move path index of the first union that we find. Once this is
// some we stop creating child move paths, since moves from unions
// move the whole thing.
// We continue looking for other move errors though so that moving
// from `*(u.f: &_)` isn't allowed.
let mut union_path = None;
for (i, elem) in place.projection.iter().enumerate() {
let proj_base = &place.projection[..i];
let body = self.builder.body;
let tcx = self.builder.tcx;
let place_ty = Place::ty_from(place.local, proj_base, body, tcx).ty;
match place_ty.kind() {
ty::Ref(..) | ty::RawPtr(..) => {
let proj = &place.projection[..i + 1];
return Err(MoveError::cannot_move_out_of(
self.loc,
BorrowedContent {
target_place: Place {
local: place.local,
projection: tcx.intern_place_elems(proj),
},
},
));
}
ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfTypeWithDestructor { container_ty: place_ty },
));
}
ty::Adt(adt, _) if adt.is_union() => {
union_path.get_or_insert(base);
}
ty::Slice(_) => {
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfSliceOrArray {
ty: place_ty,
is_index: match elem {
ProjectionElem::Index(..) => true,
_ => false,
},
},
));
}
ty::Array(..) => {
if let ProjectionElem::Index(..) = elem {
return Err(MoveError::cannot_move_out_of(
self.loc,
InteriorOfSliceOrArray { ty: place_ty, is_index: true },
));
}
}
_ => {}
};
if union_path.is_none() {
base = self.add_move_path(base, elem, |tcx| Place {
local: place.local,
projection: tcx.intern_place_elems(&place.projection[..i + 1]),
});
}
}
if let Some(base) = union_path {
// Move out of union - always move the entire union.
Err(MoveError::UnionMove { path: base })
} else {
Ok(base)
}
}
fn add_move_path(
&mut self,
base: MovePathIndex,
elem: PlaceElem<'tcx>,
mk_place: impl FnOnce(TyCtxt<'tcx>) -> Place<'tcx>,
) -> MovePathIndex {
let MoveDataBuilder {
data: MoveData { rev_lookup, move_paths, path_map, init_path_map, .. },
tcx,
..
} = self.builder;
*rev_lookup.projections.entry((base, elem.lift())).or_insert_with(move || {
MoveDataBuilder::new_move_path(
move_paths,
path_map,
init_path_map,
Some(base),
mk_place(*tcx),
)
})
}
fn create_move_path(&mut self, place: Place<'tcx>) {
// This is an non-moving access (such as an overwrite or
// drop), so this not being a valid move path is OK.
let _ = self.move_path_for(place);
}
}
impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
fn finalize(
self,
) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
debug!("{}", {
debug!("moves for {:?}:", self.body.span);
for (j, mo) in self.data.moves.iter_enumerated() {
debug!(" {:?} = {:?}", j, mo);
}
debug!("move paths for {:?}:", self.body.span);
for (j, path) in self.data.move_paths.iter_enumerated() {
debug!(" {:?} = {:?}", j, path);
}
"done dumping moves"
});
if !self.errors.is_empty() { Err((self.data, self.errors)) } else { Ok(self.data) }
}
}
pub(super) fn gather_moves<'tcx>(
body: &Body<'tcx>,
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Result<MoveData<'tcx>, (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>)> {
let mut builder = MoveDataBuilder::new(body, tcx, param_env);
builder.gather_args();
for (bb, block) in body.basic_blocks().iter_enumerated() {
for (i, stmt) in block.statements.iter().enumerate() {
let source = Location { block: bb, statement_index: i };
builder.gather_statement(source, stmt);
}
let terminator_loc = Location { block: bb, statement_index: block.statements.len() };
builder.gather_terminator(terminator_loc, block.terminator());
}
builder.finalize()
}
impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
fn gather_args(&mut self) {
for arg in self.body.args_iter() {
let path = self.data.rev_lookup.locals[arg];
let init = self.data.inits.push(Init {
path,
kind: InitKind::Deep,
location: InitLocation::Argument(arg),
});
debug!("gather_args: adding init {:?} of {:?} for argument {:?}", init, path, arg);
self.data.init_path_map[path].push(init);
}
}
fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) {
debug!("gather_statement({:?}, {:?})", loc, stmt);
(Gatherer { builder: self, loc }).gather_statement(stmt);
}
fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) {
debug!("gather_terminator({:?}, {:?})", loc, term);
(Gatherer { builder: self, loc }).gather_terminator(term);
}
}
struct Gatherer<'b, 'a, 'tcx> {
builder: &'b mut MoveDataBuilder<'a, 'tcx>,
loc: Location,
}
impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
match &stmt.kind {
StatementKind::Assign(box (place, rval)) => {
self.create_move_path(*place);
if let RvalueInitializationState::Shallow = rval.initialization_state() {
// Box starts out uninitialized - need to create a separate
// move-path for the interior so it will be separate from
// the exterior.
self.create_move_path(self.builder.tcx.mk_place_deref(*place));
self.gather_init(place.as_ref(), InitKind::Shallow);
} else {
self.gather_init(place.as_ref(), InitKind::Deep);
}
self.gather_rvalue(rval);
}
StatementKind::FakeRead(_, place) => {
self.create_move_path(**place);
}
StatementKind::LlvmInlineAsm(ref asm) => {
for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) {
if !kind.is_indirect {
self.gather_init(output.as_ref(), InitKind::Deep);
}
}
for (_, input) in asm.inputs.iter() {
self.gather_operand(input);
}
}
StatementKind::StorageLive(_) => {}
StatementKind::StorageDead(local) => {
self.gather_move(Place::from(*local));
}
StatementKind::SetDiscriminant { .. } => {
span_bug!(
stmt.source_info.span,
"SetDiscriminant should not exist during borrowck"
);
}
StatementKind::Retag { .. }
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::Nop => {}
}
}
fn gather_rvalue(&mut self, rvalue: &Rvalue<'tcx>) {
match *rvalue {
Rvalue::ThreadLocalRef(_) => {} // not-a-move
Rvalue::Use(ref operand)
| Rvalue::Repeat(ref operand, _)
| Rvalue::Cast(_, ref operand, _)
| Rvalue::UnaryOp(_, ref operand) => self.gather_operand(operand),
Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs)
| Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => {
self.gather_operand(lhs);
self.gather_operand(rhs);
}
Rvalue::Aggregate(ref _kind, ref operands) => {
for operand in operands {
self.gather_operand(operand);
}
}
Rvalue::Ref(..)
| Rvalue::AddressOf(..)
| Rvalue::Discriminant(..)
| Rvalue::Len(..)
| Rvalue::NullaryOp(NullOp::SizeOf, _)
| Rvalue::NullaryOp(NullOp::Box, _) => {
// This returns an rvalue with uninitialized contents. We can't
// move out of it here because it is an rvalue - assignments always
// completely initialize their place.
//
// However, this does not matter - MIR building is careful to
// only emit a shallow free for the partially-initialized
// temporary.
//
// In any case, if we want to fix this, we have to register a
// special move and change the `statement_effect` functions.
}
}
}
fn gather_terminator(&mut self, term: &Terminator<'tcx>) {
match term.kind {
TerminatorKind::Goto { target: _ }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
// In some sense returning moves the return place into the current
// call's destination, however, since there are no statements after
// this that could possibly access the return place, this doesn't
// need recording.
| TerminatorKind::Return
| TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::GeneratorDrop
| TerminatorKind::Unreachable => {}
TerminatorKind::Assert { ref cond, .. } => {
self.gather_operand(cond);
}
TerminatorKind::SwitchInt { ref discr, .. } => {
self.gather_operand(discr);
}
TerminatorKind::Yield { ref value, resume_arg: place, .. } => {
self.gather_operand(value);
self.create_move_path(place);
self.gather_init(place.as_ref(), InitKind::Deep);
}
TerminatorKind::Drop { place, target: _, unwind: _ } => {
self.gather_move(place);
}
TerminatorKind::DropAndReplace { place, ref value, .. } => {
self.create_move_path(place);
self.gather_operand(value);
self.gather_init(place.as_ref(), InitKind::Deep);
}
TerminatorKind::Call {
ref func,
ref args,
ref destination,
cleanup: _,
from_hir_call: _,
fn_span: _,
} => {
self.gather_operand(func);
for arg in args {
self.gather_operand(arg);
}
if let Some((destination, _bb)) = *destination {
self.create_move_path(destination);
self.gather_init(destination.as_ref(), InitKind::NonPanicPathOnly);
}
}
TerminatorKind::InlineAsm {
template: _,
ref operands,
options: _,
line_spans: _,
destination: _,
} => {
for op in operands {
match *op {
InlineAsmOperand::In { reg: _, ref value }
| InlineAsmOperand::Const { ref value } => {
self.gather_operand(value);
}
InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
if let Some(place) = place {
self.create_move_path(place);
self.gather_init(place.as_ref(), InitKind::Deep);
}
}
InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
self.gather_operand(in_value);
if let Some(out_place) = out_place {
self.create_move_path(out_place);
self.gather_init(out_place.as_ref(), InitKind::Deep);
}
}
InlineAsmOperand::SymFn { value: _ }
| InlineAsmOperand::SymStatic { def_id: _ } => {}
}
}
}
}
}
fn gather_operand(&mut self, operand: &Operand<'tcx>) {
match *operand {
Operand::Constant(..) | Operand::Copy(..) => {} // not-a-move
Operand::Move(place) => {
// a move
self.gather_move(place);
}
}
}
fn gather_move(&mut self, place: Place<'tcx>) {
debug!("gather_move({:?}, {:?})", self.loc, place);
if let [ref base @ .., ProjectionElem::Subslice { from, to, from_end: false }] =
**place.projection
{
// Split `Subslice` patterns into the corresponding list of
// `ConstIndex` patterns. This is done to ensure that all move paths
// are disjoint, which is expected by drop elaboration.
let base_place =
Place { local: place.local, projection: self.builder.tcx.intern_place_elems(base) };
let base_path = match self.move_path_for(base_place) {
Ok(path) => path,
Err(MoveError::UnionMove { path }) => {
self.record_move(place, path);
return;
}
Err(error @ MoveError::IllegalMove { .. }) => {
self.builder.errors.push((base_place, error));
return;
}
};
let base_ty = base_place.ty(self.builder.body, self.builder.tcx).ty;
let len: u64 = match base_ty.kind() {
ty::Array(_, size) => size.eval_usize(self.builder.tcx, self.builder.param_env),
_ => bug!("from_end: false slice pattern of non-array type"),
};
for offset in from..to {
let elem =
ProjectionElem::ConstantIndex { offset, min_length: len, from_end: false };
let path =
self.add_move_path(base_path, elem, |tcx| tcx.mk_place_elem(base_place, elem));
self.record_move(place, path);
}
} else {
match self.move_path_for(place) {
Ok(path) | Err(MoveError::UnionMove { path }) => self.record_move(place, path),
Err(error @ MoveError::IllegalMove { .. }) => {
self.builder.errors.push((place, error));
}
};
}
}
fn record_move(&mut self, place: Place<'tcx>, path: MovePathIndex) {
let move_out = self.builder.data.moves.push(MoveOut { path, source: self.loc });
debug!(
"gather_move({:?}, {:?}): adding move {:?} of {:?}",
self.loc, place, move_out, path
);
self.builder.data.path_map[path].push(move_out);
self.builder.data.loc_map[self.loc].push(move_out);
}
fn gather_init(&mut self, place: PlaceRef<'tcx>, kind: InitKind) {
debug!("gather_init({:?}, {:?})", self.loc, place);
let mut place = place;
// Check if we are assigning into a field of a union, if so, lookup the place
// of the union so it is marked as initialized again.
if let [proj_base @ .., ProjectionElem::Field(_, _)] = place.projection {
if let ty::Adt(def, _) =
Place::ty_from(place.local, proj_base, self.builder.body, self.builder.tcx)
.ty
.kind()
{
if def.is_union() {
place = PlaceRef { local: place.local, projection: proj_base }
}
}
}
if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) {
let init = self.builder.data.inits.push(Init {
location: InitLocation::Statement(self.loc),
path,
kind,
});
debug!(
"gather_init({:?}, {:?}): adding init {:?} of {:?}",
self.loc, place, init, path
);
self.builder.data.init_path_map[path].push(init);
self.builder.data.init_loc_map[self.loc].push(init);
}
}
}
| 38.483636 | 100 | 0.488283 |
c13f55369c9c291b129dd51523706671a9ceca38 | 199 | #![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
//! Raw C API.
pub mod api;
pub use api::GodotApi;
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
| 16.583333 | 51 | 0.688442 |
21da542e40c58e3c95361459375e845c91ea3098 | 19,284 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Transformations of SQL ASTs.
//!
//! Most query optimizations are performed by the dataflow layer, but some
//! are much easier to perform in SQL. Someday, we'll want our own SQL IR,
//! but for now we just use the parser's AST directly.
use anyhow::bail;
use uuid::Uuid;
use sql_parser::ast::visit_mut::{self, VisitMut};
use sql_parser::ast::{
Expr, Function, FunctionArgs, Ident, Query, Raw, Select, SelectItem, TableAlias, TableFactor,
TableWithJoins, UnresolvedObjectName, Value,
};
use crate::normalize;
use crate::plan::StatementContext;
pub fn transform_query<'a>(
scx: &StatementContext,
query: &'a mut Query<Raw>,
) -> Result<(), anyhow::Error> {
run_transforms(scx, |t, query| t.visit_query_mut(query), query)
}
pub fn transform_expr(scx: &StatementContext, expr: &mut Expr<Raw>) -> Result<(), anyhow::Error> {
run_transforms(scx, |t, expr| t.visit_expr_mut(expr), expr)
}
fn run_transforms<F, A>(scx: &StatementContext, mut f: F, ast: &mut A) -> Result<(), anyhow::Error>
where
F: for<'ast> FnMut(&mut dyn VisitMut<'ast, Raw>, &'ast mut A),
{
let mut func_rewriter = FuncRewriter::new(scx);
f(&mut func_rewriter, ast);
func_rewriter.status?;
let mut desugarer = Desugarer::new();
f(&mut desugarer, ast);
desugarer.status
}
// Transforms various functions to forms that are more easily handled by the
// planner.
//
// Specifically:
//
// * Rewrites the `mod` function to the `%` binary operator, so the modulus
// code only needs to handle the operator form.
//
// * Rewrites the `nullif` function to a `CASE` statement, to reuse the code
// for planning equality of datums.
//
// * Rewrites `avg(col)` to `sum(col) / count(col)`, so that we can pretend
// the `avg` aggregate function doesn't exist from here on out. This also
// has the nice side effect of reusing the division planning logic, which
// is not trivial for some types, like decimals.
//
// * Rewrites the suite of standard deviation and variance functions in a
// manner similar to `avg`.
//
// TODO(sploiselle): rewrite these in terms of func::sql_op!
struct FuncRewriter<'a> {
scx: &'a StatementContext<'a>,
status: Result<(), anyhow::Error>,
}
impl<'a> FuncRewriter<'a> {
fn new(scx: &'a StatementContext<'a>) -> FuncRewriter<'a> {
FuncRewriter {
scx,
status: Ok(()),
}
}
// Divides `lhs` by `rhs` but replaces division-by-zero errors with NULL;
// note that this is semantically equivalent to `NULLIF(rhs, 0)`.
fn plan_divide(lhs: Expr<Raw>, rhs: Expr<Raw>) -> Expr<Raw> {
lhs.divide(Expr::Case {
operand: None,
conditions: vec![rhs.clone().equals(Expr::number("0"))],
results: vec![Expr::null()],
else_result: Some(Box::new(rhs)),
})
}
fn plan_agg(
name: UnresolvedObjectName,
expr: Expr<Raw>,
filter: Option<Box<Expr<Raw>>>,
distinct: bool,
) -> Expr<Raw> {
Expr::Function(Function {
name,
args: FunctionArgs::Args(vec![expr]),
filter,
over: None,
distinct,
})
}
fn plan_avg(expr: Expr<Raw>, filter: Option<Box<Expr<Raw>>>, distinct: bool) -> Expr<Raw> {
let sum = Self::plan_agg(
UnresolvedObjectName::qualified(&["pg_catalog", "sum"]),
expr.clone(),
filter.clone(),
distinct,
)
.call_unary(vec!["mz_internal", "mz_avg_promotion"]);
let count = Self::plan_agg(
UnresolvedObjectName::qualified(&["pg_catalog", "count"]),
expr,
filter,
distinct,
);
Self::plan_divide(sum, count)
}
fn plan_variance(
expr: Expr<Raw>,
filter: Option<Box<Expr<Raw>>>,
distinct: bool,
sample: bool,
) -> Expr<Raw> {
// N.B. this variance calculation uses the "textbook" algorithm, which
// is known to accumulate problematic amounts of error. The numerically
// stable variants, the most well-known of which is Welford's, are
// however difficult to implement inside of Differential Dataflow, as
// they do not obviously support retractions efficiently (#1240).
//
// The code below converts var_samp(x) into
//
// (sum(x²) - sum(x)² / count(x)) / (count(x) - 1)
//
// and var_pop(x) into:
//
// (sum(x²) - sum(x)² / count(x)) / count(x)
//
let expr = expr.call_unary(vec!["mz_internal", "mz_avg_promotion"]);
let expr_squared = expr.clone().multiply(expr.clone());
let sum_squares = Self::plan_agg(
UnresolvedObjectName::qualified(&["pg_catalog", "sum"]),
expr_squared,
filter.clone(),
distinct,
);
let sum = Self::plan_agg(
UnresolvedObjectName::qualified(&["pg_catalog", "sum"]),
expr.clone(),
filter.clone(),
distinct,
);
let sum_squared = sum.clone().multiply(sum);
let count = Self::plan_agg(
UnresolvedObjectName::qualified(&["pg_catalog", "count"]),
expr,
filter,
distinct,
);
Self::plan_divide(
sum_squares.minus(Self::plan_divide(sum_squared, count.clone())),
if sample {
count.minus(Expr::number("1"))
} else {
count
},
)
}
fn plan_stddev(
expr: Expr<Raw>,
filter: Option<Box<Expr<Raw>>>,
distinct: bool,
sample: bool,
) -> Expr<Raw> {
Self::plan_variance(expr, filter, distinct, sample).call_unary(vec!["sqrt"])
}
fn rewrite_expr(&mut self, expr: &Expr<Raw>) -> Option<(Ident, Expr<Raw>)> {
match expr {
Expr::Function(Function {
name,
args: FunctionArgs::Args(args),
filter,
distinct,
over: None,
}) => {
let name = normalize::unresolved_object_name(name.clone()).ok()?;
if let Some(database) = &name.database {
// If a database name is provided, we need only verify that
// the database exists, as presently functions can only
// exist in ambient schemas.
if let Err(e) = self.scx.catalog.resolve_database(database) {
self.status = Err(e.into());
}
}
if name.schema.is_some() && name.schema.as_deref() != Some("pg_catalog") {
return None;
}
let filter = filter.clone();
let distinct = *distinct;
let expr = if args.len() == 1 {
let arg = args[0].clone();
match name.item.as_str() {
"avg" => Self::plan_avg(arg, filter, distinct),
"variance" | "var_samp" => Self::plan_variance(arg, filter, distinct, true),
"var_pop" => Self::plan_variance(arg, filter, distinct, false),
"stddev" | "stddev_samp" => Self::plan_stddev(arg, filter, distinct, true),
"stddev_pop" => Self::plan_stddev(arg, filter, distinct, false),
_ => return None,
}
} else if args.len() == 2 {
let (lhs, rhs) = (args[0].clone(), args[1].clone());
match name.item.as_str() {
"mod" => lhs.modulo(rhs),
"pow" => Expr::call(vec!["pg_catalog", "power"], vec![lhs, rhs]),
_ => return None,
}
} else {
return None;
};
Some((Ident::new(name.item), expr))
}
// Rewrites special keywords that SQL considers to be function calls
// to actual function calls. For example, `SELECT current_timestamp`
// is rewritten to `SELECT current_timestamp()`.
Expr::Identifier(ident) if ident.len() == 1 => {
let ident = normalize::ident(ident[0].clone());
let fn_ident = match ident.as_str() {
"current_role" => Some("current_user"),
"current_schema" | "current_timestamp" | "current_user" => Some(ident.as_str()),
_ => None,
};
match fn_ident {
None => None,
Some(fn_ident) => {
let expr = Expr::call_nullary(vec![fn_ident]);
Some((Ident::new(ident), expr))
}
}
}
_ => None,
}
}
}
impl<'ast> VisitMut<'ast, Raw> for FuncRewriter<'_> {
fn visit_select_item_mut(&mut self, item: &'ast mut SelectItem<Raw>) {
if let SelectItem::Expr { expr, alias: None } = item {
visit_mut::visit_expr_mut(self, expr);
if let Some((alias, expr)) = self.rewrite_expr(expr) {
*item = SelectItem::Expr {
expr,
alias: Some(alias),
};
}
} else {
visit_mut::visit_select_item_mut(self, item);
}
}
fn visit_expr_mut(&mut self, expr: &'ast mut Expr<Raw>) {
visit_mut::visit_expr_mut(self, expr);
if let Some((_name, new_expr)) = self.rewrite_expr(expr) {
*expr = new_expr;
}
}
}
/// Removes syntax sugar to simplify the planner.
///
/// For example, `<expr> NOT IN (<subquery>)` is rewritten to `expr <> ALL
/// (<subquery>)`.
struct Desugarer {
status: Result<(), anyhow::Error>,
}
impl<'ast> VisitMut<'ast, Raw> for Desugarer {
fn visit_expr_mut(&mut self, expr: &'ast mut Expr<Raw>) {
if self.status.is_ok() {
self.status = self.visit_expr_mut_internal(expr);
}
}
}
impl Desugarer {
fn new() -> Desugarer {
Desugarer { status: Ok(()) }
}
fn visit_expr_mut_internal(&mut self, expr: &mut Expr<Raw>) -> Result<(), anyhow::Error> {
// `($expr)` => `$expr`
while let Expr::Nested(e) = expr {
*expr = e.take();
}
// `$expr BETWEEN $low AND $high` => `$expr >= $low AND $expr <= $low`
// `$expr NOT BETWEEN $low AND $high` => `$expr < $low OR $expr > $low`
if let Expr::Between {
expr: e,
low,
high,
negated,
} = expr
{
if *negated {
*expr = e.clone().lt(low.take()).or(e.take().gt(high.take()));
} else {
*expr = e.clone().gt_eq(low.take()).and(e.take().lt_eq(high.take()));
}
}
// `$expr IN ($e1, $e2, ..., $en)`
// =>
// `$expr = $e1 OR $expr = $e2 OR ... OR $expr = $en`
if let Expr::InList {
expr: e,
list,
negated,
} = expr
{
let mut cond = Expr::Value(Value::Boolean(false));
for l in list {
cond = cond.or(e.clone().equals(l.take()));
}
if *negated {
*expr = cond.negate();
} else {
*expr = cond;
}
}
// `$expr IN ($subquery)` => `$expr = ANY ($subquery)`
// `$expr NOT IN ($subquery)` => `$expr <> ALL ($subquery)`
if let Expr::InSubquery {
expr: e,
subquery,
negated,
} = expr
{
if *negated {
*expr = Expr::AllSubquery {
left: Box::new(e.take()),
op: "<>".into(),
right: Box::new(subquery.take()),
};
} else {
*expr = Expr::AnySubquery {
left: Box::new(e.take()),
op: "=".into(),
right: Box::new(subquery.take()),
};
}
}
// `$expr = ALL ($array_expr)`
// =>
// `$expr = ALL (SELECT elem FROM unnest($array_expr) _ (elem))`
//
// and analogously for other operators and ANY.
if let Expr::AnyExpr { left, op, right } | Expr::AllExpr { left, op, right } = expr {
let binding = Ident::new("elem");
let subquery = Query::select(
Select::default()
.from(TableWithJoins {
relation: TableFactor::Function {
name: UnresolvedObjectName(vec![
Ident::new("mz_catalog"),
Ident::new("unnest"),
]),
args: FunctionArgs::Args(vec![right.take()]),
alias: Some(TableAlias {
name: Ident::new("_"),
columns: vec![binding.clone()],
strict: true,
}),
},
joins: vec![],
})
.project(SelectItem::Expr {
expr: Expr::Identifier(vec![binding]),
alias: None,
}),
);
let left = Box::new(left.take());
let op = op.clone();
*expr = match expr {
Expr::AnyExpr { .. } => Expr::AnySubquery {
left,
op,
right: Box::new(subquery),
},
Expr::AllExpr { .. } => Expr::AllSubquery {
left,
op,
right: Box::new(subquery),
},
_ => unreachable!(),
};
}
// `$expr = ALL ($subquery)`
// =>
// `(SELECT mz_internal.mz_all($expr = $binding) FROM ($subquery) AS _ ($binding))
//
// and analogously for other operators and ANY.
if let Expr::AnySubquery { left, op, right } | Expr::AllSubquery { left, op, right } = expr
{
let left = match &mut **left {
Expr::Row { .. } => left.take(),
_ => Expr::Row {
exprs: vec![left.take()],
},
};
let arity = match &left {
Expr::Row { exprs } => exprs.len(),
_ => unreachable!(),
};
let bindings: Vec<_> = (0..arity)
.map(|_| Ident::new(format!("right_{}", Uuid::new_v4())))
.collect();
let select = Select::default()
.from(TableWithJoins::subquery(
right.take(),
TableAlias {
name: Ident::new("subquery"),
columns: bindings.clone(),
strict: true,
},
))
.project(SelectItem::Expr {
expr: left
.binop(
&op,
Expr::Row {
exprs: bindings
.into_iter()
.map(|b| Expr::Identifier(vec![b]))
.collect(),
},
)
.call_unary(match expr {
Expr::AnySubquery { .. } => vec!["mz_internal", "mz_any"],
Expr::AllSubquery { .. } => vec!["mz_internal", "mz_all"],
_ => unreachable!(),
}),
alias: None,
});
*expr = Expr::Subquery(Box::new(Query::select(select)));
}
// Expands row comparisons.
//
// ROW($l1, $l2, ..., $ln) = ROW($r1, $r2, ..., $rn)
// =>
// $l1 = $r1 AND $l2 = $r2 AND ... AND $ln = $rn
//
// ROW($l1, $l2, ..., $ln) < ROW($r1, $r2, ..., $rn)
// =>
// $l1 < $r1 OR ($l1 = $r1 AND ($l2 < $r2 OR ($l2 = $r2 AND ... ($ln < $rn))))
//
// ROW($l1, $l2, ..., $ln) <= ROW($r1, $r2, ..., $rn)
// =>
// $l1 < $r1 OR ($l1 = $r1 AND ($l2 < $r2 OR ($l2 = $r2 AND ... ($ln <= $rn))))
//
// and analogously for the inverse operations !=, >, and >=.
if let Expr::Op {
op,
expr1: left,
expr2: Some(right),
} = expr
{
if let (Expr::Row { exprs: left }, Expr::Row { exprs: right }) =
(&mut **left, &mut **right)
{
if matches!(op.as_str(), "=" | "<>" | "<" | "<=" | ">" | ">=") {
if left.len() != right.len() {
bail!("unequal number of entries in row expressions");
}
if left.is_empty() {
assert!(right.is_empty());
bail!("cannot compare rows of zero length");
}
}
match op.as_str() {
"=" | "<>" => {
let mut new = Expr::Value(Value::Boolean(true));
for (l, r) in left.iter_mut().zip(right) {
new = l.take().equals(r.take()).and(new);
}
if op == "<>" {
new = new.negate();
}
*expr = new;
}
"<" | "<=" | ">" | ">=" => {
let strict_op = match op.as_str() {
"<" | "<=" => "<",
">" | ">=" => ">",
_ => unreachable!(),
};
let (l, r) = (left.last_mut().unwrap(), right.last_mut().unwrap());
let mut new = l.take().binop(&op, r.take());
for (l, r) in left.iter_mut().zip(right).rev().skip(1) {
new = l
.clone()
.binop(strict_op, r.clone())
.or(l.take().equals(r.take()).and(new));
}
*expr = new;
}
_ => (),
}
}
}
visit_mut::visit_expr_mut(self, expr);
Ok(())
}
}
| 35.843866 | 100 | 0.443995 |
891dc5bfa4b0b71395d17d26d6c137fda480b125 | 29,866 | use chrono::{DateTime, NaiveDateTime, Utc};
use rspotify::model::*;
use std::time::Duration;
#[test]
fn test_simplified_track() {
let json_str = r#"
{
"artists": [ {
"external_urls": {
"spotify": "https://open.spotify.com/artist/08td7MxkoHQkXnWAYD8d6Q"
},
"href": "https://api.spotify.com/v1/artists/08td7MxkoHQkXnWAYD8d6Q",
"id": "08td7MxkoHQkXnWAYD8d6Q",
"name": "Tania Bowra",
"type": "artist",
"uri": "spotify:artist:08td7MxkoHQkXnWAYD8d6Q"
} ],
"available_markets": ["US"],
"disc_number": 1,
"duration_ms": 276773,
"explicit": false,
"external_urls": {
"spotify": "https://open.spotify.com/track/2TpxZ7JUBn3uw46aR7qd6V"
},
"href": "https://api.spotify.com/v1/tracks/2TpxZ7JUBn3uw46aR7qd6V",
"id": "2TpxZ7JUBn3uw46aR7qd6V",
"name": "All I Want",
"preview_url": "https://p.scdn.co/mp3-preview/6d00206e32194d15df329d4770e4fa1f2ced3f57",
"track_number": 1,
"type": "track",
"uri": "spotify:track:2TpxZ7JUBn3uw46aR7qd6V",
"is_local": false
}
"#;
let track: SimplifiedTrack = serde_json::from_str(&json_str).unwrap();
let duration = Duration::from_millis(276773);
assert_eq!(track.duration, duration);
}
#[test]
fn test_public_user() {
let json_str = r#"
{
"display_name": "Ronald Pompa",
"external_urls": {
"spotify": "https://open.spotify.com/user/wizzler"
},
"followers": {
"href": null,
"total": 4050
},
"href": "https://api.spotify.com/v1/users/wizzler",
"id": "wizzler",
"images": [
{
"height": null,
"url": "https://i.scdn.co/image/ab6775700000ee85b5d374d281b9e510eda15fdf",
"width": null
}
],
"type": "user",
"uri": "spotify:user:wizzler"
}
"#;
let user: PublicUser = serde_json::from_str(&json_str).unwrap();
assert_eq!(user.id, "wizzler".to_string());
}
#[test]
fn test_private_user() {
let json_str = r#"
{
"country": "US",
"display_name": "Sergey",
"email": "[email protected]",
"explicit_content": {
"filter_enabled": false,
"filter_locked": false
},
"external_urls": {
"spotify": "https://open.spotify.com/user/waq5aexykhm6nlv0cnwdieng0"
},
"followers": {
"href": null,
"total": 0
},
"href": "https://api.spotify.com/v1/users/waq5aexykhm6nlv0cnwdieng0",
"id": "waq5aexykhm6nlv0cnwdieng0",
"images": [],
"product": "open",
"type": "user",
"uri": "spotify:user:waq5aexykhm6nlv0cnwdieng0"
}
"#;
let private_user: PrivateUser = serde_json::from_str(&json_str).unwrap();
assert_eq!(private_user.country.unwrap(), Country::UnitedStates);
}
#[test]
fn test_full_artist() {
let json_str = r#"
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0OdUWJ0sBjDrqHygGUXeCF"
},
"followers": {
"href": null,
"total": 833247
},
"genres": [
"indie folk"
],
"href": "https://api.spotify.com/v1/artists/0OdUWJ0sBjDrqHygGUXeCF",
"id": "0OdUWJ0sBjDrqHygGUXeCF",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/0f9a5013134de288af7d49a962417f4200539b47",
"width": 640
}
],
"name": "Band of Horses",
"popularity": 65,
"type": "artist",
"uri": "spotify:artist:0OdUWJ0sBjDrqHygGUXeCF"
}
"#;
let full_artist: FullArtist = serde_json::from_str(&json_str).unwrap();
assert_eq!(full_artist.name, "Band of Horses");
assert_eq!(full_artist.followers.total, 833247);
}
#[test]
fn test_simplified_episode() {
let json_str = r#"
{
"audio_preview_url": "https://p.scdn.co/mp3-preview/d8b916e1872de2bb0285d8c7bfe2b4b57011c85c",
"description": "En unik barockträdgård från 1600-talet gömmer sig på Södermalm i Stockholm och nu gräver arkeologerna ut parken och kvarteret där Bellman lekte som barn. Nu grävs Carl Michael Bellmans kvarter fram på Södermalm i Stockholm. Under dagens jordyta döljer sig en rik barockträdgård, men också tunga industrier från en tid då Söder var stockholmarnas sommarnöje. Dessutom om hur arkeologer ska kunna bli bättre att hitta de fattigas kulturarv. För vid sidan av slott, borgar och hallar finns torpen och backstugorna som utgör ett fortfarande okänt kulturarv som angår oss alla. Programledare Tobias Svanelid.",
"duration_ms": 2685023,
"explicit": false,
"external_urls": {
"spotify": "https://open.spotify.com/episode/3brfPv3PaUhspkm1T9ZVl8"
},
"href": "https://api.spotify.com/v1/episodes/3brfPv3PaUhspkm1T9ZVl8",
"id": "3brfPv3PaUhspkm1T9ZVl8",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/65497c8c1bef1b783d2be6a1c73b294d953f9406",
"width": 640
}
],
"is_externally_hosted": false,
"is_playable": true,
"language": "sv",
"languages": [
"sv"
],
"name": "På Bellmans bakgata",
"release_date": "2020-10-20",
"release_date_precision": "day",
"resume_point": {
"fully_played": false,
"resume_position_ms": 0
},
"type": "episode",
"uri": "spotify:episode:3brfPv3PaUhspkm1T9ZVl8"
}
"#;
let simplified_episode: SimplifiedEpisode = serde_json::from_str(&json_str).unwrap();
assert_eq!(
simplified_episode.release_date_precision,
DatePrecision::Day
);
let duration = Duration::from_millis(2685023);
assert_eq!(simplified_episode.duration, duration);
}
#[test]
fn test_full_episode() {
let json_str = r#"
{
"audio_preview_url": "https://p.scdn.co/mp3-preview/566fcc94708f39bcddc09e4ce84a8e5db8f07d4d",
"description": "En ny tysk ",
"duration_ms": 1502795,
"explicit": false,
"external_urls": {
"spotify": "https://open.spotify.com/episode/512ojhOuo1ktJprKbVcKyQ"
},
"href": "https://api.spotify.com/v1/episodes/512ojhOuo1ktJprKbVcKyQ",
"id": "512ojhOuo1ktJprKbVcKyQ",
"images": [
{
"height": 64,
"url": "https://i.scdn.co/image/e29c75799cad73927fad713011edad574868d8da",
"width": 64
}
],
"is_externally_hosted": false,
"is_playable": true,
"language": "sv",
"languages": [
"sv"
],
"name": "Tredje rikets knarkande granskas",
"release_date": "2015-10-01",
"release_date_precision": "day",
"show": {
"available_markets": [
"ZA"
],
"copyrights": [],
"description": "Vi är där historien är. Ansvarig utgivare: Nina Glans",
"explicit": false,
"external_urls": {
"spotify": "https://open.spotify.com/show/38bS44xjbVVZ3No3ByF1dJ"
},
"href": "https://api.spotify.com/v1/shows/38bS44xjbVVZ3No3ByF1dJ",
"id": "38bS44xjbVVZ3No3ByF1dJ",
"images": [
{
"height": 64,
"url": "https://i.scdn.co/image/3dc007829bc0663c24089e46743a9f4ae15e65f8",
"width": 64
}
],
"is_externally_hosted": false,
"languages": [
"sv"
],
"media_type": "audio",
"name": "Vetenskapsradion Historia",
"publisher": "Sveriges Radio",
"type": "show",
"uri": "spotify:show:38bS44xjbVVZ3No3ByF1dJ"
},
"type": "episode",
"uri": "spotify:episode:512ojhOuo1ktJprKbVcKyQ"
}
"#;
let full_episode: FullEpisode = serde_json::from_str(&json_str).unwrap();
assert_eq!(full_episode.release_date_precision, DatePrecision::Day);
let duration = Duration::from_millis(1502795);
assert_eq!(full_episode.duration, duration);
}
#[test]
fn test_copyright() {
let json_str = r#"
[ {
"text" : "(P) 2000 Sony Music Entertainment Inc.",
"type" : "P"
} ]
"#;
let copyrights: Vec<Copyright> = serde_json::from_str(&json_str).unwrap();
assert_eq!(copyrights[0]._type, CopyrightType::Performance);
}
#[test]
fn test_audio_analysis_section() {
let json_str = r#"
{
"start": 237.02356,
"duration": 18.32542,
"confidence": 1,
"loudness": -20.074,
"tempo": 98.253,
"tempo_confidence": 0.767,
"key": 5,
"key_confidence": 0.327,
"mode": 1,
"mode_confidence": 0.566,
"time_signature": 4,
"time_signature_confidence": 1
}
"#;
let session: AudioAnalysisSection = serde_json::from_str(&json_str).unwrap();
assert_eq!(session.time_interval.duration, 18.32542);
}
#[test]
fn test_audio_analysis_segments() {
let json_str = r#"
{
"start": 252.15601,
"duration": 3.19297,
"confidence": 0.522,
"loudness_start": -23.356,
"loudness_max_time": 0.06971,
"loudness_max": -18.121,
"loudness_end": -60,
"pitches": [
0.15
],
"timbre": [
-19.037
]
}
"#;
let segment: AudioAnalysisSegment = serde_json::from_str(&json_str).unwrap();
assert_eq!(segment.time_interval.start, 252.15601);
}
#[test]
fn test_actions() {
let json_str = r#"
{
"disallows": {
"resuming": true
}
}
"#;
let actions: Actions = serde_json::from_str(&json_str).unwrap();
assert_eq!(actions.disallows[0], DisallowKey::Resuming);
}
#[test]
fn test_recommendations_seed() {
let json_str = r#"
{
"initialPoolSize": 500,
"afterFilteringSize": 380,
"afterRelinkingSize": 365,
"href": "https://api.spotify.com/v1/artists/4NHQUGzhtTLFvgF5SZesLK",
"id": "4NHQUGzhtTLFvgF5SZesLK",
"type": "artist"
}
"#;
let seed: RecommendationsSeed = serde_json::from_str(&json_str).unwrap();
assert_eq!(seed._type, RecommendationsSeedType::Artist);
}
#[test]
fn test_full_playlist() {
let json_str_images = r#"
[
{
"height": null,
"url": "https://i.scdn.co/image/ab67706c0000bebb8d0ce13d55f634e290f744ba",
"width": null
}
]
"#;
let json_str_simplified_artists = r#"
[
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/5I8r2w4hf7OYp2cunjihxJ"
},
"href": "https://api.spotify.com/v1/artists/5I8r2w4hf7OYp2cunjihxJ",
"id": "5I8r2w4hf7OYp2cunjihxJ",
"name": "Kularis",
"type": "artist",
"uri": "spotify:artist:5I8r2w4hf7OYp2cunjihxJ"
}
]
"#;
let json_str = r#"
{
"collaborative": false,
"description": "A playlist for testing pourposes",
"external_urls": {
"spotify": "https://open.spotify.com/playlist/3cEYpjA9oz9GiPac4AsH4n"
},
"followers": {
"href": null,
"total": 109
},
"href": "https://api.spotify.com/v1/playlists/3cEYpjA9oz9GiPac4AsH4n",
"id": "3cEYpjA9oz9GiPac4AsH4n",
"images": json_str_images,
"name": "Spotify Web API Testing playlist",
"owner": {
"display_name": "JMPerez²",
"external_urls": {
"spotify": "https://open.spotify.com/user/jmperezperez"
},
"href": "https://api.spotify.com/v1/users/jmperezperez",
"id": "jmperezperez",
"type": "user",
"uri": "spotify:user:jmperezperez"
},
"primary_color": null,
"public": true,
"snapshot_id": "MTgsZWFmNmZiNTIzYTg4ODM0OGQzZWQzOGI4NTdkNTJlMjU0OWFkYTUxMA==",
"tracks": {
"href": "https://api.spotify.com/v1/playlists/3cEYpjA9oz9GiPac4AsH4n/tracks?offset=0&limit=100",
"items": [
{
"added_at": "2015-01-15T12:39:22Z",
"added_by": {
"external_urls": {
"spotify": "https://open.spotify.com/user/jmperezperez"
},
"href": "https://api.spotify.com/v1/users/jmperezperez",
"id": "jmperezperez",
"type": "user",
"uri": "spotify:user:jmperezperez"
},
"is_local": false,
"primary_color": null,
"track": {
"album": {
"album_type": "album",
"artists": json_str_simplified_artists,
"available_markets": [
"US"
],
"external_urls": {
"spotify": "https://open.spotify.com/album/2pANdqPvxInB0YvcDiw4ko"
},
"href": "https://api.spotify.com/v1/albums/2pANdqPvxInB0YvcDiw4ko",
"id": "2pANdqPvxInB0YvcDiw4ko",
"images": json_str_images,
"name": "Progressive Psy Trance Picks Vol.8",
"release_date": "2012-04-02",
"release_date_precision": "day",
"total_tracks": 20,
"type": "album",
"uri": "spotify:album:2pANdqPvxInB0YvcDiw4ko"
},
"artists": json_str_simplified_artists,
"available_markets": [
"US"
],
"disc_number": 1,
"duration_ms": 376000,
"episode": false,
"explicit": false,
"external_ids": {
"isrc": "DEKC41200989"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/4rzfv0JLZfVhOhbSQ8o5jZ"
},
"href": "https://api.spotify.com/v1/tracks/4rzfv0JLZfVhOhbSQ8o5jZ",
"id": "4rzfv0JLZfVhOhbSQ8o5jZ",
"is_local": false,
"name": "Api",
"popularity": 2,
"preview_url": "https://p.scdn.co/mp3-preview/c440fa9ff320fb74629f8477bff45b1a377897ab?cid=774b29d4f13844c495f206cafdad9c86",
"track": true,
"track_number": 10,
"type": "track",
"uri": "spotify:track:4rzfv0JLZfVhOhbSQ8o5jZ"
},
"video_thumbnail": {
"url": null
}
}
],
"limit": 100,
"next": null,
"offset": 0,
"previous": null,
"total": 5
},
"type": "playlist",
"uri": "spotify:playlist:3cEYpjA9oz9GiPac4AsH4n"
}
"#.replace("json_str_images", json_str_images).replace("json_str_simplified_artists", json_str_simplified_artists);
let full_playlist: FullPlaylist = serde_json::from_str(&json_str).unwrap();
assert_eq!(
full_playlist.uri,
"spotify:playlist:3cEYpjA9oz9GiPac4AsH4n".to_string()
);
assert_eq!(full_playlist.followers.total, 109);
}
#[test]
fn test_audio_features() {
let json = r#"
{
"duration_ms" : 255349,
"key" : 5,
"mode" : 0,
"time_signature" : 4,
"acousticness" : 0.514,
"danceability" : 0.735,
"energy" : 0.578,
"instrumentalness" : 0.0902,
"liveness" : 0.159,
"loudness" : -11.840,
"speechiness" : 0.0461,
"valence" : 0.624,
"tempo" : 98.002,
"id" : "06AKEBrKUckW0KREUWRnvT",
"uri" : "spotify:track:06AKEBrKUckW0KREUWRnvT",
"track_href" : "https://api.spotify.com/v1/tracks/06AKEBrKUckW0KREUWRnvT",
"analysis_url" : "https://api.spotify.com/v1/audio-analysis/06AKEBrKUckW0KREUWRnvT",
"type" : "audio_features"
}
"#;
let audio_features: AudioFeatures = serde_json::from_str(json).unwrap();
let duration = Duration::from_millis(255349);
assert_eq!(audio_features.duration, duration);
}
#[test]
fn test_full_track() {
let json = r#"
{
"album": {
"album_type": "single",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/6sFIWsNpZYqfjUpaCgueju"
},
"href": "https://api.spotify.com/v1/artists/6sFIWsNpZYqfjUpaCgueju",
"id": "6sFIWsNpZYqfjUpaCgueju",
"name": "Carly Rae Jepsen",
"type": "artist",
"uri": "spotify:artist:6sFIWsNpZYqfjUpaCgueju"
}
],
"available_markets": [
"ZA"
],
"external_urls": {
"spotify": "https://open.spotify.com/album/0tGPJ0bkWOUmH7MEOR77qc"
},
"href": "https://api.spotify.com/v1/albums/0tGPJ0bkWOUmH7MEOR77qc",
"id": "0tGPJ0bkWOUmH7MEOR77qc",
"images": [
{
"height": 64,
"url": "https://i.scdn.co/image/5a73a056d0af707b4119a883d87285feda543fbb",
"width": 64
}
],
"name": "Cut To The Feeling",
"release_date": "2017-05-26",
"release_date_precision": "day",
"type": "album",
"uri": "spotify:album:0tGPJ0bkWOUmH7MEOR77qc"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/6sFIWsNpZYqfjUpaCgueju"
},
"href": "https://api.spotify.com/v1/artists/6sFIWsNpZYqfjUpaCgueju",
"id": "6sFIWsNpZYqfjUpaCgueju",
"name": "Carly Rae Jepsen",
"type": "artist",
"uri": "spotify:artist:6sFIWsNpZYqfjUpaCgueju"
}
],
"available_markets": [
"ZA"
],
"disc_number": 1,
"duration_ms": 207959,
"explicit": false,
"external_ids": {
"isrc": "USUM71703861"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/11dFghVXANMlKmJXsNCbNl"
},
"href": "https://api.spotify.com/v1/tracks/11dFghVXANMlKmJXsNCbNl",
"id": "11dFghVXANMlKmJXsNCbNl",
"is_local": false,
"name": "Cut To The Feeling",
"popularity": 63,
"preview_url": "https://p.scdn.co/mp3-preview/3eb16018c2a700240e9dfb8817b6f2d041f15eb1?cid=774b29d4f13844c495f206cafdad9c86",
"track_number": 1,
"type": "track",
"uri": "spotify:track:11dFghVXANMlKmJXsNCbNl"
}
"#;
let full_track: FullTrack = serde_json::from_str(&json).unwrap();
let duration = Duration::from_millis(207959);
assert_eq!(full_track.duration, duration);
}
#[test]
fn test_resume_point() {
let json = r#"
{
"fully_played": false,
"resume_position_ms": 423432
}
"#;
let resume_point: ResumePoint = serde_json::from_str(&json).unwrap();
let duration = Duration::from_millis(423432);
assert_eq!(resume_point.resume_position, duration);
}
#[test]
fn test_currently_playing_context() {
let json = r#"
{
"timestamp": 1607769168429,
"context": {
"external_urls": {
"spotify": "https://open.spotify.com/album/2lgOc40hhHqjUGAKMWqGxO"
},
"href": "https://api.spotify.com/v1/albums/2lgOc40hhHqjUGAKMWqGxO",
"type": "album",
"uri": "spotify:album:2lgOc40hhHqjUGAKMWqGxO"
},
"progress_ms": 22270,
"item": {
"album": {
"album_type": "single",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0cGUm45nv7Z6M6qdXYQGTX"
},
"href": "https://api.spotify.com/v1/artists/0cGUm45nv7Z6M6qdXYQGTX",
"id": "0cGUm45nv7Z6M6qdXYQGTX",
"name": "Kehlani",
"type": "artist",
"uri": "spotify:artist:0cGUm45nv7Z6M6qdXYQGTX"
}
],
"external_urls": {
"spotify": "https://open.spotify.com/album/2lgOc40hhHqjUGAKMWqGxO"
},
"href": "https://api.spotify.com/v1/albums/2lgOc40hhHqjUGAKMWqGxO",
"id": "2lgOc40hhHqjUGAKMWqGxO",
"images": [
{
"height": 64,
"url": "https://i.scdn.co/image/ab67616d00004851fa7b2b60e85950ee93dcdc04",
"width": 64
}
],
"name": "Playinwitme (feat. Kehlani)",
"release_date": "2018-03-20",
"release_date_precision": "day",
"total_tracks": 1,
"type": "album",
"uri": "spotify:album:2lgOc40hhHqjUGAKMWqGxO"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0cGUm45nv7Z6M6qdXYQGTX"
},
"href": "https://api.spotify.com/v1/artists/0cGUm45nv7Z6M6qdXYQGTX",
"id": "0cGUm45nv7Z6M6qdXYQGTX",
"name": "Kehlani",
"type": "artist",
"uri": "spotify:artist:0cGUm45nv7Z6M6qdXYQGTX"
}
],
"available_markets": [],
"disc_number": 1,
"duration_ms": 191680,
"explicit": false,
"external_ids": {
"isrc": "USAT21801141"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/4F1yvJfQ7gJkrcgFJQDjOr"
},
"href": "https://api.spotify.com/v1/tracks/4F1yvJfQ7gJkrcgFJQDjOr",
"id": "4F1yvJfQ7gJkrcgFJQDjOr",
"is_local": false,
"is_playable": true,
"linked_from": {
"external_urls": {
"spotify": "https://open.spotify.com/track/43cFjTTCD9Cni4aSL0sORz"
},
"href": "https://api.spotify.com/v1/tracks/43cFjTTCD9Cni4aSL0sORz",
"id": "43cFjTTCD9Cni4aSL0sORz",
"type": "track",
"uri": "spotify:track:43cFjTTCD9Cni4aSL0sORz"
},
"name": "Playinwitme (feat. Kehlani)",
"popularity": 70,
"preview_url": "https://p.scdn.co/mp3-preview/05e8881d5c896a8d147d2e79150fb5480a4fb186?cid=774b29d4f13844c495f206cafdad9c86",
"track_number": 9,
"type": "track",
"uri": "spotify:track:4F1yvJfQ7gJkrcgFJQDjOr"
},
"currently_playing_type": "track",
"actions": {
"disallows": {
"resuming": true,
"skipping_prev": true
}
},
"is_playing": true
}
"#;
let currently_playing_context: CurrentlyPlayingContext = serde_json::from_str(&json).unwrap();
let timestamp = 1607769168429;
let second: i64 = (timestamp - timestamp % 1000) / 1000;
let nanosecond = (timestamp % 1000) * 1000000;
let dt = DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(second, nanosecond as u32),
Utc,
);
assert_eq!(currently_playing_context.timestamp, dt);
let duration = Duration::from_millis(22270);
assert_eq!(currently_playing_context.progress, Some(duration));
}
#[test]
fn test_current_playback_context() {
let json = r#"
{
"device": {
"id": "28d0f845293d03a2713392905c6d30b6442719b5",
"is_active": true,
"is_private_session": false,
"is_restricted": false,
"name": "Web Player (Firefox)",
"type": "Computer",
"volume_percent": 100
},
"shuffle_state": false,
"repeat_state": "off",
"timestamp": 1607774342714,
"context": {
"external_urls": {
"spotify": "https://open.spotify.com/album/2lgOc40hhHqjUGAKMWqGxO"
},
"href": "https://api.spotify.com/v1/albums/2lgOc40hhHqjUGAKMWqGxO",
"type": "album",
"uri": "spotify:album:2lgOc40hhHqjUGAKMWqGxO"
},
"item": {
"album": {
"album_type": "single",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0cGUm45nv7Z6M6qdXYQGTX"
},
"href": "https://api.spotify.com/v1/artists/0cGUm45nv7Z6M6qdXYQGTX",
"id": "0cGUm45nv7Z6M6qdXYQGTX",
"name": "Kehlani",
"type": "artist",
"uri": "spotify:artist:0cGUm45nv7Z6M6qdXYQGTX"
}
],
"available_markets": [],
"external_urls": {
"spotify": "https://open.spotify.com/album/2lgOc40hhHqjUGAKMWqGxO"
},
"href": "https://api.spotify.com/v1/albums/2lgOc40hhHqjUGAKMWqGxO",
"id": "2lgOc40hhHqjUGAKMWqGxO",
"images": [
{
"height": 64,
"url": "https://i.scdn.co/image/ab67616d00004851fa7b2b60e85950ee93dcdc04",
"width": 64
}
],
"name": "Playinwitme (feat. Kehlani)",
"release_date": "2018-03-20",
"release_date_precision": "day",
"total_tracks": 1,
"type": "album",
"uri": "spotify:album:2lgOc40hhHqjUGAKMWqGxO"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/0cGUm45nv7Z6M6qdXYQGTX"
},
"href": "https://api.spotify.com/v1/artists/0cGUm45nv7Z6M6qdXYQGTX",
"id": "0cGUm45nv7Z6M6qdXYQGTX",
"name": "Kehlani",
"type": "artist",
"uri": "spotify:artist:0cGUm45nv7Z6M6qdXYQGTX"
}
],
"available_markets": [],
"disc_number": 1,
"duration_ms": 193093,
"explicit": false,
"external_ids": {
"isrc": "USAT21801141"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/43cFjTTCD9Cni4aSL0sORz"
},
"href": "https://api.spotify.com/v1/tracks/43cFjTTCD9Cni4aSL0sORz",
"id": "43cFjTTCD9Cni4aSL0sORz",
"is_local": false,
"name": "Playinwitme (feat. Kehlani)",
"popularity": 0,
"preview_url": null,
"track_number": 1,
"type": "track",
"uri": "spotify:track:43cFjTTCD9Cni4aSL0sORz"
},
"currently_playing_type": "track",
"actions": {
"disallows": {
"resuming": true,
"skipping_prev": true
}
},
"is_playing": true
}
"#;
let current_playback_context: CurrentPlaybackContext = serde_json::from_str(&json).unwrap();
let timestamp = 1607774342714;
let second: i64 = (timestamp - timestamp % 1000) / 1000;
let nanosecond = (timestamp % 1000) * 1000000;
let dt = DateTime::<Utc>::from_utc(
NaiveDateTime::from_timestamp(second, nanosecond as u32),
Utc,
);
assert_eq!(current_playback_context.timestamp, dt);
assert!(current_playback_context.progress.is_none());
}
#[test]
fn test_audio_analysis_track() {
let json = r#"
{
"num_samples": 5630445,
"duration": 255.34898,
"sample_md5": "",
"offset_seconds": 0,
"window_seconds": 0,
"analysis_sample_rate": 22050,
"analysis_channels": 1,
"end_of_fade_in": 0,
"start_of_fade_out": 251.73334,
"loudness": -11.84,
"tempo": 98.002,
"tempo_confidence": 0.423,
"time_signature": 4,
"time_signature_confidence": 1,
"key": 5,
"key_confidence": 0.36,
"mode": 0,
"mode_confidence": 0.414,
"codestring": "e",
"code_version": 3.15,
"echoprintstring": "e",
"echoprint_version": 4.12,
"synchstring": "eJ",
"synch_version": 1,
"rhythmstring": "e",
"rhythm_version": 1
}
"#;
let audio_analysis_track: AudioAnalysisTrack = serde_json::from_str(&json).unwrap();
assert_eq!(audio_analysis_track.mode, Modality::Minor);
}
#[test]
fn test_simplified_playlist() {
let json = r#"
{
"collaborative": false,
"description": "Chegou o grande dia, aperte o play e partiu fim de semana!",
"external_urls": {
"spotify": "https://open.spotify.com/playlist/37i9dQZF1DX8mBRYewE6or"
},
"href": "https://api.spotify.com/v1/playlists/37i9dQZF1DX8mBRYewE6or",
"id": "37i9dQZF1DX8mBRYewE6or",
"images": [
{
"height": null,
"url": "https://i.scdn.co/image/ab67706f00000003206a95fa5badbe1d33b65e14",
"width": null
}
],
"name": "Sexta",
"owner": {
"display_name": "Spotify",
"external_urls": {
"spotify": "https://open.spotify.com/user/spotify"
},
"href": "https://api.spotify.com/v1/users/spotify",
"id": "spotify",
"type": "user",
"uri": "spotify:user:spotify"
},
"primary_color": null,
"public": null,
"snapshot_id": "MTYxMzM5MzIyMywwMDAwMDAwMGQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0Mjdl",
"tracks": {
"href": "https://api.spotify.com/v1/playlists/37i9dQZF1DX8mBRYewE6or/tracks",
"total": 62
},
"type": "playlist",
"uri": "spotify:playlist:37i9dQZF1DX8mBRYewE6or"
}
"#;
let simplified_playlist: SimplifiedPlaylist = serde_json::from_str(&json).unwrap();
assert_eq!(
simplified_playlist.tracks.href,
"https://api.spotify.com/v1/playlists/37i9dQZF1DX8mBRYewE6or/tracks"
);
assert_eq!(simplified_playlist.tracks.total, 62);
}
| 33.147614 | 634 | 0.54403 |
480aebbe2da3b1ec36010aa587ab407399b4e132 | 444 | pub mod simple_executor;
use alloc::boxed::Box;
use core::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
pub struct Task {
future: Pin<Box<dyn Future<Output = ()>>>,
}
impl Task {
pub fn new(future: impl Future<Output = ()> + 'static) -> Task {
Task {
future: Box::pin(future),
}
}
fn poll(&mut self, context: &mut Context) -> Poll<()> { self.future.as_mut().poll(context) }
}
| 19.304348 | 96 | 0.558559 |
6a3c3fc00c59b5aec408ce037fe1e568a212f6f8 | 619 | use serde_derive::Deserialize;
use serde_json::Value;
/// This object represents the Home Assistant Entity
///
/// [Entity](https://developers.home-assistant.io/docs/core/entity/)
#[derive(Debug, Deserialize, PartialEq)]
pub struct HassEntity {
pub entity_id: String,
pub state: String,
pub last_changed: String,
pub last_updated: String,
pub attributes: Value,
pub context: Context,
}
/// General construct used by HassEntity and HassEvent
#[derive(Debug, Deserialize, PartialEq)]
pub struct Context {
pub id: String,
pub parent_id: Option<String>,
pub user_id: Option<String>,
}
| 25.791667 | 68 | 0.71567 |
5d79e167a1e6125cc368cd2e1258662673ce1216 | 4,414 | // This example demonstrates the use of a will.
//
// The client will connect to the server with a will built from the --topic, --qos and --payload parameters. It will then subscribe to the same topic.
// If the client receives a Ctrl-C, it will exit without properly shutting down the client. Thus the client will not be able to send a DISCONNECT
// to the server, so the server will publish the will to all subscribers.
//
// To demonstrate the effect, run two or more instances of this example with different client IDs (and optionally, different QoS and payloads)
// but the same topic subscription, then kill one with Ctrl-C. The other instances should all receive the will.
//
// Example:
//
// cargo run --features client --example will -- --client-id 'example-will-1' --topic foo --qos 1 --payload '"goodbye, world" - example-will-1'
// cargo run --features client --example will -- --client-id 'example-will-2' --topic foo --qos 1 --payload '"goodbye, world" - example-will-2'
use futures_util::StreamExt;
mod common;
#[derive(Debug, structopt::StructOpt)]
struct Options {
/// Address of the MQTT server.
#[structopt(long)]
server: std::net::SocketAddr,
/// Client ID used to identify this application to the server. If not given, a server-generated ID will be used.
#[structopt(long)]
client_id: Option<mqtt3::proto::ByteStr>,
/// Username used to authenticate with the server, if any.
#[structopt(long)]
username: Option<mqtt3::proto::ByteStr>,
/// Password used to authenticate with the server, if any.
#[structopt(long)]
password: Option<mqtt3::proto::ByteStr>,
/// Maximum back-off time between reconnections to the server, in seconds.
#[structopt(long, default_value = "30", parse(try_from_str = common::duration_from_secs_str))]
max_reconnect_back_off: std::time::Duration,
/// Keep-alive time advertised to the server, in seconds.
#[structopt(long, default_value = "5", parse(try_from_str = common::duration_from_secs_str))]
keep_alive: std::time::Duration,
/// The topic of the will.
#[structopt(long)]
topic: mqtt3::proto::ByteStr,
/// The QoS of the will.
#[structopt(long, parse(try_from_str = common::qos_from_str))]
qos: mqtt3::proto::QoS,
/// The payload of the will.
#[structopt(long)]
payload: String,
}
#[tokio::main(flavor = "current_thread")]
async fn main() {
let Options {
server,
client_id,
username,
password,
max_reconnect_back_off,
keep_alive,
topic,
qos,
payload,
} = common::init("will");
let will = mqtt3::proto::Publication {
topic_name: topic.clone(),
qos,
retain: false,
payload: payload.into(),
};
let mut client = mqtt3::Client::new(
client_id,
username,
Some(will),
move || {
let password = password.clone();
Box::pin(async move {
let (stream, sink) = common::transport::tokio::connect(server).await?;
Ok::<_, std::io::Error>((stream, sink, password))
})
},
max_reconnect_back_off,
keep_alive,
);
let mut update_subscription_handle = client
.update_subscription_handle()
.expect("couldn't get subscription update handle");
tokio::spawn(async move {
let result = update_subscription_handle
.subscribe(mqtt3::proto::SubscribeTo {
topic_filter: topic,
qos,
})
.await;
if let Err(err) = result {
panic!("couldn't update subscription: {}", err);
}
});
while let Some(event) = client.next().await {
let event = event.unwrap();
if let mqtt3::Event::Publication(publication) = event {
match std::str::from_utf8(&publication.payload) {
Ok(s) => log::info!(
"Received publication: {:?} {:?} {:?}",
publication.topic_name,
s,
publication.qos,
),
Err(_) => log::info!(
"Received publication: {:?} {:?} {:?}",
publication.topic_name,
publication.payload,
publication.qos,
),
}
}
}
}
| 33.953846 | 150 | 0.592207 |
645dd07af2773a606e25772378fa4be16411d927 | 1,462 | // MIT License
//
// Copyright (c) 2022 Ferhat Geçdoğan All Rights Reserved.
// Distributed under the terms of the MIT License.
//
//
use std::io::{Write};
mod parse;
// function taken from gretea https://github.com/ferhatgec/gretea/blob/master/src/main.rs
fn create_and_write(path: &std::path::Path, generated: String) {
let mut file = match std::fs::File::create(path) {
Err(why) => panic!("elitetors: couldn't write to {}: {}", path.display(), why),
Ok(file) => file
};
match file.write_all(generated.as_bytes()) {
Err(why) => panic!("elitetors: couldn't write to {}: {}", path.display(), why),
_ => {}
}
}
fn main() {
let cli_arguments: Vec<_> = std::env::args().collect();
if cli_arguments.len() < 2 {
println!("elitetors - create rust source from elite\n\
----------\n\
{arg} file", arg = cli_arguments.get(0).unwrap());
std::process::exit(1);
}
let mut elite_read = elite::read::EliteFileData {
raw_data: elite::read::elite_file_contents::create_empty_string(),
unparsed: vec![]
};
elite_read.read_raw_file(cli_arguments.get(1).unwrap());
let x = elite::lexer::elite_lexer::init_lexer(&elite_read, true);
let y = parse::elite_cpp::parse(x);
let z = format!("{}_out.rs", cli_arguments.get(1).unwrap()).as_str().to_owned();
let path = std::path::Path::new(&z);
create_and_write(path, y);
}
| 30.458333 | 89 | 0.603283 |
08d57b75210fc855347fc3903fb7bd1668fe7f4c | 4,371 | use std::str::FromStr;
use snafu::prelude::*;
pub struct Options {
pub base_url: http::Uri,
}
pub struct Reqwest {
options: Options,
client: reqwest_lib::blocking::Client,
}
impl crate::Restcrab for Reqwest {
type Error = Error;
type Options = Options;
type Crab = Reqwest;
fn options(&self) -> &Self::Options {
&self.options
}
fn options_mut(&mut self) -> &mut Self::Options {
&mut self.options
}
fn from_options(options: Options) -> Result<Reqwest, Error> {
Ok(Reqwest {
options,
client: reqwest_lib::blocking::Client::new(),
})
}
fn call<REQ: serde::Serialize, RES: for<'de> serde::Deserialize<'de>>(&self, request: crate::Request<REQ>) -> Result<Option<RES>, Self::Error> {
let url: http::Uri = if request.url.host().is_some() && request.url.scheme().is_some() {
request.url.to_owned()
} else {
let mut base_parts = http::uri::Parts::from(self.options.base_url.clone());
let parts = request.url.to_owned().into_parts();
if parts.scheme.is_some() {
base_parts.scheme = parts.scheme;
}
if parts.authority.is_some() {
base_parts.authority = parts.authority;
}
if let Some(path_and_query) = parts.path_and_query {
let mut path = path_and_query.path().to_string();
if !path.starts_with('/') {
let base_path = if let Some(path_and_query) = base_parts.path_and_query {
path_and_query.path().to_owned()
} else {
"/".to_string()
};
if !path.ends_with('/') {
path += "/";
}
path = base_path + &path;
}
base_parts.path_and_query = Some(http::uri::PathAndQuery::from_str(&(path + path_and_query.query().unwrap_or_default())).map_err(|source| Error::ConstructingUrl { source })?);
}
http::Uri::from_parts(base_parts)?
};
let mut req_builder = match &request.method {
&http::Method::HEAD => self.client.head(url.to_string()),
&http::Method::GET => self.client.get(url.to_string()),
&http::Method::POST => self.client.post(url.to_string()),
&http::Method::PUT => self.client.put(url.to_string()),
&http::Method::PATCH => self.client.patch(url.to_string()),
&http::Method::DELETE => self.client.delete(url.to_string()),
&http::Method::OPTIONS => self.client.request(reqwest_lib::Method::OPTIONS, url.to_string()),
&http::Method::CONNECT => self.client.request(reqwest_lib::Method::CONNECT, url.to_string()),
&http::Method::TRACE => self.client.request(reqwest_lib::Method::TRACE, url.to_string()),
method => return Err(Error::InvalidMethod { method: method.clone() }),
};
for (key, value) in &request.headers {
req_builder = req_builder.header(key, value);
}
if let Some(body) = &request.body {
req_builder = req_builder.body(serde_json::to_string(body).context(SerializingBodySnafu)?);
}
let response = req_builder.send().context(SendingRequestSnafu)?;
ensure!(response.status().is_success(), UnsuccessfulResponseCodeSnafu { response });
let text = response.text().context(DecodingResponseBodySnafu)?;
if !text.is_empty() {
serde_json::from_str::<RES>(text.as_str()).map(Some).context(DeserializingBodySnafu)
} else {
Ok(None)
}
}
}
#[derive(Debug, snafu::Snafu)]
pub enum Error {
#[snafu(display("Error parsing url: {source}"), context(false))]
ParsingUrl { source: http::uri::InvalidUriParts },
#[snafu(display("Error serializing body: {source}"))]
SerializingBody { source: serde_json::Error },
#[snafu(display("Error deserializing body: {source}"))]
DeserializingBody { source: serde_json::Error },
#[snafu(display("Error sending request: {source}"))]
SendingRequest { source: reqwest_lib::Error },
#[snafu(display("Unsuccessful response code: {response:?}"))]
UnsuccessfulResponseCode { response: reqwest_lib::blocking::Response },
#[snafu(display("Error converting response body to text: {source}"))]
DecodingResponseBody { source: reqwest_lib::Error },
#[snafu(display("Invalid method: {method}"))]
InvalidMethod { method: http::Method },
#[snafu(display("Error constructing url: {source}"))]
ConstructingUrl { source: http::uri::InvalidUri },
#[snafu(context(false))]
Restcrab { source: crate::Error },
}
| 32.864662 | 183 | 0.644246 |
d716c53e1d18ce68c4db2f1a79f42b7c9802cae4 | 790 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that `#[plugin_registrar]` attribute is gated by `plugin_registrar`
// feature gate.
// the registration function isn't typechecked yet
#[plugin_registrar]
pub fn registrar() {}
//~^ ERROR compiler plugins are experimental
//~| HELP add #![feature(plugin_registrar)] to the crate attributes to enable
fn main() {}
| 39.5 | 77 | 0.743038 |
67448a096782174d8caba01b6670c2407bce232d | 16,198 | /*
* Copyright 2018 Intel Corporation
* Copyright 2019 Cargill Incorporated
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use std::fs::{create_dir_all, metadata, OpenOptions};
use std::io::prelude::*;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
#[cfg(target_os = "linux")]
use std::os::linux::fs::MetadataExt;
#[cfg(not(target_os = "linux"))]
use std::os::unix::fs::MetadataExt;
use crate::error::CliError;
use cylinder::{secp256k1::Secp256k1Context, Context};
use super::chown;
pub enum ConflictStrategy {
Force,
Skip,
Error,
}
pub fn create_key_pair(
key_dir: &Path,
private_key_path: PathBuf,
public_key_path: PathBuf,
conflict_strategy: ConflictStrategy,
change_permissions: bool,
) -> Result<(), CliError> {
match conflict_strategy {
ConflictStrategy::Force => (),
ConflictStrategy::Skip => {
if public_key_path.exists() && !private_key_path.exists() {
return Err(CliError::UserError(format!(
"{} already exists without a corresponding private key. \
Rerun with --force to overwrite existing files",
public_key_path.as_path().display(),
)));
}
if !public_key_path.exists() && private_key_path.exists() {
return Err(CliError::UserError(format!(
"{} already exists without a corresponding public key. \
Rerun with --force to overwrite existing files",
private_key_path.as_path().display(),
)));
}
if public_key_path.exists() && private_key_path.exists() {
debug!("keys files exist; skipping generation");
return Ok(());
}
}
ConflictStrategy::Error => {
if public_key_path.exists() || private_key_path.exists() {
return Err(CliError::UserError(format!(
"Key files already exist: {} and {}. Rerun with --force to \
overwrite existing files",
public_key_path.as_path().display(),
private_key_path.as_path().display(),
)));
}
}
}
let context = Secp256k1Context::new();
let private_key = context.new_random_private_key();
let public_key = context
.get_public_key(&private_key)
.map_err(|err| CliError::UserError(format!("Failed to get public key: {}", err)))?;
let key_dir_info = metadata(key_dir).map_err(|err| {
CliError::UserError(format!(
"Failed to read key directory '{}': {}",
key_dir.display(),
err
))
})?;
#[cfg(not(target_os = "linux"))]
let (key_dir_uid, key_dir_gid) = (key_dir_info.uid(), key_dir_info.gid());
#[cfg(target_os = "linux")]
let (key_dir_uid, key_dir_gid) = (key_dir_info.st_uid(), key_dir_info.st_gid());
{
if private_key_path.exists() {
info!(
"Overwriting private key file: {}",
private_key_path.display()
);
} else {
info!("Writing private key file: {}", private_key_path.display());
}
let private_key_file = OpenOptions::new()
.write(true)
.create(true)
.mode(0o600)
.open(private_key_path.as_path())
.map_err(|err| {
CliError::UserError(format!(
"Failed to open private key file '{}': {}",
private_key_path.display(),
err
))
})?;
writeln!(&private_key_file, "{}", private_key.as_hex()).map_err(|err| {
CliError::UserError(format!(
"Failed to write to private key file '{}': {}",
private_key_path.display(),
err
))
})?;
}
{
if public_key_path.exists() {
info!("Overwriting public key file: {}", public_key_path.display());
} else {
info!("Writing public key file: {}", public_key_path.display());
}
let public_key_file = OpenOptions::new()
.write(true)
.create(true)
.mode(0o644)
.open(public_key_path.as_path())
.map_err(|err| {
CliError::UserError(format!(
"Failed to open public key file '{}': {}",
public_key_path.display(),
err
))
})?;
writeln!(&public_key_file, "{}", public_key.as_hex()).map_err(|err| {
CliError::UserError(format!(
"Failed to write to public key file '{}': {}",
public_key_path.display(),
err
))
})?;
}
if change_permissions {
chown(private_key_path.as_path(), key_dir_uid, key_dir_gid)?;
chown(public_key_path.as_path(), key_dir_uid, key_dir_gid)?;
}
Ok(())
}
/// Generates a public/private key pair that can be used to sign transactions.
/// If no directory is provided, the keys are created in the default directory
///
/// $HOME/.grid/keys/
///
/// If no key_name is provided the key name is set to USER environment variable.
pub fn generate_keys(
key_name: String,
conflict_strategy: ConflictStrategy,
key_dir: PathBuf,
) -> Result<(), CliError> {
create_dir_all(key_dir.as_path())
.map_err(|err| CliError::UserError(format!("Failed to create keys directory: {}", err)))?;
let private_key_path = key_dir.join(&key_name).with_extension("priv");
let public_key_path = key_dir.join(&key_name).with_extension("pub");
create_key_pair(
&key_dir,
private_key_path,
public_key_path,
conflict_strategy,
true,
)?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use tempfile::TempDir;
/// Validate that create_key_pair skips when the keypairs already exist
#[test]
fn create_keypair_mode_skip_skips_existing_keypairs() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
File::create(public_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-pubkey")
.expect("Could not write file");
File::create(private_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-privkey")
.expect("Could not write file");
create_key_pair(
temp_dir.path(),
private_key_path,
public_key_path.clone(),
ConflictStrategy::Skip,
true,
)
.expect("Could not create keypair");
let mut contents = String::new();
File::open(public_key_path)
.unwrap()
.read_to_string(&mut contents)
.unwrap();
assert_eq!(contents, "test-pubkey");
}
/// Validate that create_key_pair fails if the public key exists, but the private key is
/// missing
#[test]
fn create_keypair_mode_skip_fails_missing_private_key() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
File::create(public_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-privkey")
.expect("Could not write file");
let result = create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Skip,
true,
);
assert!(result.is_err());
let expected = format!(
"{} already exists without a corresponding private key. \
Rerun with --force to overwrite existing files",
public_key_path.as_path().display(),
);
match result.unwrap_err() {
CliError::UserError(message) => {
assert_eq!(message, expected);
}
clierror => panic!(
"received unexpected result {}, expected {}",
clierror, expected
),
}
}
/// Validate that create_key_pair fails if the private key exists, but the public key is
/// missing
#[test]
fn create_keypair_mode_skip_fails_missing_public_key() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
File::create(private_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-privkey")
.expect("Could not write file");
let result = create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Skip,
true,
);
assert!(result.is_err());
let expected = format!(
"{} already exists without a corresponding public key. \
Rerun with --force to overwrite existing files",
private_key_path.as_path().display(),
);
match result.unwrap_err() {
CliError::UserError(message) => {
assert_eq!(message, expected);
}
clierror => panic!(
"received unexpected result {}, expected CliError::UserError({})",
clierror, expected
),
}
}
/// Validate that create_key_pair writes new keys successfully on a success condition
#[test]
fn create_keypair_mode_skip_successfully_writes_new_keys() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Skip,
true,
)
.unwrap();
assert!(public_key_path.exists());
assert!(private_key_path.exists());
}
/// Validate that create_key_pair force option will overwrite an existing pubkey
#[test]
fn create_keypair_mode_force_returns_different_pubkey() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
let public_key_content = "test-pubkey";
File::create(public_key_path.clone())
.expect("Failed to create file")
.write_all(public_key_content.as_bytes())
.expect("Could not write file");
File::create(private_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-privkey")
.expect("Could not write file");
create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Force,
true,
)
.unwrap();
let mut contents = String::new();
File::open(public_key_path)
.unwrap()
.read_to_string(&mut contents)
.unwrap();
assert_ne!(
contents, public_key_content,
"result must not be equal to existing pubkey"
);
}
/// Validate that create_key_pair force successfully creates both a private and public key
#[test]
fn create_keypair_mode_force_successfully_writes_new_keys() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Force,
true,
)
.unwrap();
assert!(public_key_path.exists());
assert!(private_key_path.exists());
}
/// Validate that create_key_pair Error fails on existing keypairs
#[test]
fn create_keypair_mode_fail_fails_on_existing_keypairs() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
let public_key_content = b"test-privkey";
File::create(public_key_path.clone())
.expect("Failed to create file")
.write_all(public_key_content)
.expect("Could not write file");
File::create(private_key_path.clone())
.expect("Failed to create file")
.write_all(b"test-privkey")
.expect("Could not write file");
let result = create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Error,
true,
);
assert!(
result.is_err(),
"result must be an error if one of the keypairs exists"
);
let expected = format!(
"Key files already exist: {} and {}. Rerun with --force to \
overwrite existing files",
public_key_path.as_path().display(),
private_key_path.as_path().display(),
);
match result.unwrap_err() {
CliError::UserError(message) => {
assert_eq!(message, expected);
}
clierror => panic!(
"received unexpected result {}, expected CliError::UserError({})",
clierror, expected
),
}
}
/// Validate that create_key_pair Error succeeds if there are no existing keypairs
#[test]
fn create_keypair_mode_fail_successfully_writes_new_keys() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("public_key");
let private_key_path = temp_dir.path().join("private_key");
create_key_pair(
temp_dir.path(),
private_key_path.clone(),
public_key_path.clone(),
ConflictStrategy::Error,
true,
)
.unwrap();
assert!(public_key_path.exists());
assert!(private_key_path.exists());
}
/// Validate that generate_keys successfully generates a public and private key in the correct
/// path
#[test]
fn generate_keys_succeeds() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let public_key_path = temp_dir.path().join("grid_key.pub");
let private_key_path = temp_dir.path().join("grid_key.priv");
generate_keys(
"grid_key".to_string(),
ConflictStrategy::Error,
temp_dir.path().to_path_buf(),
)
.unwrap();
assert!(public_key_path.exists());
assert!(private_key_path.exists());
}
}
| 32.526104 | 98 | 0.568712 |
c171f29f0e05ec2d2c361abe1a6ef1c35b3afecc | 7,029 | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::{
event_store::EventStore,
ledger_store::LedgerStore,
metrics::{
BACKUP_EPOCH_ENDING_EPOCH, BACKUP_STATE_SNAPSHOT_LEAF_IDX, BACKUP_STATE_SNAPSHOT_VERSION,
BACKUP_TXN_VERSION,
},
state_store::StateStore,
transaction_store::TransactionStore,
};
use anyhow::{ensure, Result};
use aptos_crypto::hash::HashValue;
use aptos_types::{
contract_event::ContractEvent,
ledger_info::LedgerInfoWithSignatures,
proof::{SparseMerkleRangeProof, TransactionAccumulatorRangeProof, TransactionInfoWithProof},
state_store::{state_key::StateKey, state_value::StateValue},
transaction::{Transaction, TransactionInfo, Version},
};
use itertools::zip_eq;
use serde::{Deserialize, Serialize};
use std::{fmt, sync::Arc};
/// `BackupHandler` provides functionalities for AptosDB data backup.
#[derive(Clone)]
pub struct BackupHandler {
ledger_store: Arc<LedgerStore>,
transaction_store: Arc<TransactionStore>,
state_store: Arc<StateStore>,
event_store: Arc<EventStore>,
}
impl BackupHandler {
pub(crate) fn new(
ledger_store: Arc<LedgerStore>,
transaction_store: Arc<TransactionStore>,
state_store: Arc<StateStore>,
event_store: Arc<EventStore>,
) -> Self {
Self {
ledger_store,
transaction_store,
state_store,
event_store,
}
}
/// Gets an iterator that yields a range of transactions.
pub fn get_transaction_iter(
&self,
start_version: Version,
num_transactions: usize,
) -> Result<impl Iterator<Item = Result<(Transaction, TransactionInfo, Vec<ContractEvent>)>> + '_>
{
let txn_iter = self
.transaction_store
.get_transaction_iter(start_version, num_transactions)?;
let txn_info_iter = self
.ledger_store
.get_transaction_info_iter(start_version, num_transactions)?;
let events_iter = self
.event_store
.get_events_by_version_iter(start_version, num_transactions)?;
let zipped = zip_eq(zip_eq(txn_iter, txn_info_iter), events_iter)
.enumerate()
.map(move |(idx, ((txn_res, txn_info_res), events_res))| {
BACKUP_TXN_VERSION.set((start_version.wrapping_add(idx as u64)) as i64);
Ok((txn_res?, txn_info_res?, events_res?))
});
Ok(zipped)
}
/// Gets the proof for a transaction chunk.
/// N.B. the `LedgerInfo` returned will always be in the same epoch of the `last_version`.
pub fn get_transaction_range_proof(
&self,
first_version: Version,
last_version: Version,
) -> Result<(TransactionAccumulatorRangeProof, LedgerInfoWithSignatures)> {
ensure!(
last_version >= first_version,
"Bad transaction range: [{}, {}]",
first_version,
last_version
);
let num_transactions = last_version - first_version + 1;
let epoch = self.ledger_store.get_epoch(last_version)?;
let ledger_info = self.ledger_store.get_latest_ledger_info_in_epoch(epoch)?;
let accumulator_proof = self.ledger_store.get_transaction_range_proof(
Some(first_version),
num_transactions,
ledger_info.ledger_info().version(),
)?;
Ok((accumulator_proof, ledger_info))
}
/// Gets an iterator which can yield all accounts in the state tree.
pub fn get_account_iter(
&self,
version: Version,
) -> Result<Box<dyn Iterator<Item = Result<(StateKey, StateValue)>> + Send + Sync>> {
let iterator = self
.state_store
.get_state_key_and_value_iter(version, HashValue::zero())?
.enumerate()
.map(move |(idx, res)| {
BACKUP_STATE_SNAPSHOT_VERSION.set(version as i64);
BACKUP_STATE_SNAPSHOT_LEAF_IDX.set(idx as i64);
res
});
Ok(Box::new(iterator))
}
/// Gets the proof that proves a range of accounts.
pub fn get_account_state_range_proof(
&self,
rightmost_key: HashValue,
version: Version,
) -> Result<SparseMerkleRangeProof> {
self.state_store
.get_value_range_proof(rightmost_key, version)
}
/// Gets the epoch, committed version, and synced version of the DB.
pub fn get_db_state(&self) -> Result<Option<DbState>> {
self.ledger_store
.get_startup_info()?
.map(
|(latest_li, epoch_state_if_not_in_li, synced_version_opt)| {
Ok(DbState {
epoch: latest_li
.ledger_info()
.next_epoch_state()
.unwrap_or_else(|| {
epoch_state_if_not_in_li
.as_ref()
.expect("EpochState must exist")
})
.epoch,
committed_version: latest_li.ledger_info().version(),
synced_version: synced_version_opt
.unwrap_or_else(|| latest_li.ledger_info().version()),
})
},
)
.transpose()
}
/// Gets the proof of the state root at specified version.
/// N.B. the `LedgerInfo` returned will always be in the same epoch of the version.
pub fn get_state_root_proof(
&self,
version: Version,
) -> Result<(TransactionInfoWithProof, LedgerInfoWithSignatures)> {
let epoch = self.ledger_store.get_epoch(version)?;
let ledger_info = self.ledger_store.get_latest_ledger_info_in_epoch(epoch)?;
let txn_info = self
.ledger_store
.get_transaction_info_with_proof(version, ledger_info.ledger_info().version())?;
Ok((txn_info, ledger_info))
}
pub fn get_epoch_ending_ledger_info_iter(
&self,
start_epoch: u64,
end_epoch: u64,
) -> Result<impl Iterator<Item = Result<LedgerInfoWithSignatures>> + '_> {
Ok(self
.ledger_store
.get_epoch_ending_ledger_info_iter(start_epoch, end_epoch)?
.enumerate()
.map(move |(idx, li)| {
BACKUP_EPOCH_ENDING_EPOCH.set((start_epoch + idx as u64) as i64);
li
}))
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct DbState {
pub epoch: u64,
pub committed_version: Version,
pub synced_version: Version,
}
impl fmt::Display for DbState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"epoch: {}, committed_version: {}, synced_version: {}",
self.epoch, self.committed_version, self.synced_version,
)
}
}
| 35.145 | 102 | 0.597951 |
9087cc0dea7c4b5697271bcf4d6a293997447287 | 3,442 | // Copyright (c) 2021, COSIC-KU Leuven, Kasteelpark Arenberg 10, bus 2452, B-3001 Leuven-Heverlee, Belgium.
// Copyright (c) 2021, Cosmian Tech SAS, 53-55 rue La Boétie, Paris, France.
use crate::bit_protocols::*;
use crate::integer::*;
use crate::slice::*;
use scale::*;
/**************************************
* Helper routines for floating point *
**************************************/
/* Input an integer of size K,
* Output float tuple with mantissa of size L
*/
#[allow(non_snake_case)]
pub fn Clear_Int2Fl<const K: u64, const L: u64>(
a_int: ClearInteger<K>,
_: ConstU64<L>,
) -> (ClearModp, ClearModp, ClearModp, ClearModp, ClearModp)
where
ConstU64<{ L - 1 }>: ,
ConstU64<{ L + 1 }>: ,
ConstU64<{ K - 1 }>: ,
ConstU64<{ K + 1 }>: ,
ConstU64<{ K - L - 1 }>: ,
{
let s = a_int.ltz();
let z = a_int.eqz();
let a = a_int.rep();
let aa = (ClearModp::from(1) - s - s) * a;
let vec_a = BitDec_ClearModp(aa, K - 1);
let mut rev_a: Slice<ClearModp> = Slice::uninitialized(K - 1);
for i in 0..K - 1 {
rev_a.set(i, &*vec_a.get_unchecked(K - 2 - i));
}
let vec_b = rev_a.PreOr();
let one = ClearModp::from(1_i64);
let mut v = one;
let mut p = ClearModp::from((K - 1) as i64);
let mut twop = ClearModp::from(1_i64);
for i in 0..K - 1 {
v = v + twop * (one - *vec_b.get_unchecked(i));
p = p - *vec_b.get_unchecked(i);
twop = twop + twop;
}
p = -p;
v = a * v;
if (K - 1) > L {
let mut v_int: ClearInteger<{ K - 1 }> = ClearInteger::from(v);
v_int = v_int.Trunc(ConstU64::<{ K - L - 1 }>, ConstBool::<false>);
v = v_int.rep();
} else {
v = v * modp_two_power(L - K + 1);
}
p = (p + ClearModp::from((K - L - 1) as i64)) * (ClearModp::from(1) - z);
let err = ClearModp::from(0_i64);
(v, p, z, s, err)
}
/* Input an integer of size K,
* Output float tuple with mantissa of size L
*/
#[allow(non_snake_case)]
pub fn Secret_Int2Fl<const K: u64, const L: u64, const KAPPA: u64>(
a_int: SecretInteger<K, KAPPA>,
_: ConstU64<L>,
) -> (SecretModp, SecretModp, SecretModp, SecretModp, SecretModp)
where
ConstU64<{ L - 1 }>: ,
ConstU64<{ L + 1 }>: ,
ConstU64<{ K - 1 }>: ,
ConstU64<{ K + 1 }>: ,
ConstU64<{ K - L - 1 }>: ,
{
let s = a_int.ltz();
let z = a_int.eqz();
let a = a_int.rep();
let aa = (SecretModp::from(1) - s - s) * a;
let vec_a = BitDec::<{ K - 1 }, { K - 1 }, KAPPA>(aa);
let mut rev_a: Slice<SecretModp> = Slice::uninitialized(K - 1);
for i in 0..K - 1 {
rev_a.set(i, &*vec_a.get_unchecked(K - 2 - i));
}
let vec_b = rev_a.PreOr();
let one = SecretModp::from(1_i64);
let mut v = one;
let mut p = SecretModp::from((K - 1) as i64);
let mut twop = SecretModp::from(1_i64);
for i in 0..K - 1 {
v = v + twop * (one - *vec_b.get_unchecked(i));
p = p - *vec_b.get_unchecked(i);
twop = twop + twop;
}
p = -p;
v = a * v;
if (K - 1) > L {
let mut v_int: SecretInteger<{ K - 1 }, KAPPA> = SecretInteger::from(v);
v_int = v_int.Trunc(ConstU64::<{ K - L - 1 }>, ConstBool::<false>);
v = v_int.rep();
} else {
v = v * modp_two_power(L - K + 1);
}
p = (p + SecretModp::from((K - L - 1) as i64)) * (SecretModp::from(1) - z);
let err = SecretModp::from(0_i64);
(v, p, z, s, err)
}
| 31.87037 | 107 | 0.527891 |
67b58c02c62e9fd7075fbcabe9fee82e0bce4e64 | 4,825 | use bench;
use stats::Fact;
use content_length::ContentLength;
/// The engine of making requests. The engine implements making the requests and producing
/// facts for the stats collector to process.
#[derive(Clone)]
pub struct Engine {
urls: Vec<String>,
method: Method,
kind: Kind,
}
/// The methods that are supported by the current implementations. These are currently
/// body-less methods so that we don't need to load up any additional content.
#[derive(Clone, Copy)]
pub enum Method {
Get,
Head,
}
const DEFAULT_METHOD: Method = Method::Get;
#[derive(Clone, Copy)]
enum Kind {
Reqwest,
Hyper,
}
const DEFAULT_KIND: Kind = Kind::Reqwest;
impl Engine {
/// Creates a new engine. The engine will default to using `reqwest`
pub fn new(urls: Vec<String>) -> Engine {
Engine {
urls,
method: DEFAULT_METHOD,
kind: DEFAULT_KIND,
}
}
/// Sets the method to use with the requests
pub fn with_method(mut self, method: Method) -> Self {
self.method = method;
self
}
/// Sets the engine to be a hyper engine
pub fn with_hyper(mut self) -> Self {
self.kind = Kind::Hyper;
self
}
/// Consumes self to start up the engine and begins making requests. It will callback
/// to the collector to allow the caller to capture requests.
pub fn run<F>(self, requests: usize, collect: F)
where
F: FnMut(Fact),
{
match self.kind {
Kind::Reqwest => self.run_reqwest(requests, collect),
Kind::Hyper => self.run_hyper(requests, collect),
};
}
fn run_reqwest<F>(&self, requests: usize, mut collect: F)
where
F: FnMut(Fact),
{
use reqwest::{self, Client, Request};
let client = Client::new();
let method = match self.method {
Method::Get => reqwest::Method::Get,
Method::Head => reqwest::Method::Head,
};
for n in 0..requests {
let url = &self.urls[n % self.urls.len()];
let request = Request::new(method.clone(), url.parse().expect("Invalid url"));
let mut len = 0;
let (resp, duration) = bench::time_it(|| {
let mut resp = client
.execute(request)
.expect("Failure to even connect is no good");
if let Ok(body) = resp.text() {
len = body.len();
}
resp
});
collect(Fact::record(
ContentLength::new(len as u64),
resp.status().as_u16(),
duration,
));
}
}
fn run_hyper<F>(&self, requests: usize, mut collect: F)
where
F: FnMut(Fact),
{
use hyper::{self, Client, Request, Uri};
use hyper_tls::HttpsConnector;
use tokio_core::reactor::Core;
use futures::{Future, Stream};
let mut core = Core::new().expect("Setting up tokio core failed");
let handle = core.handle();
let client = Client::configure()
.connector(HttpsConnector::new(1, &handle).expect("To set up a http connector"))
.build(&handle);
let urls: Vec<Uri> = self.urls.iter().map(|url| url.parse().unwrap()).collect();
let method = match self.method {
Method::Get => hyper::Method::Get,
Method::Head => hyper::Method::Head,
};
for n in 0..requests {
let uri = &urls[n % urls.len()];
let request = client
.request(Request::new(method.clone(), uri.clone()))
.and_then(|response| {
let status = response.status().as_u16();
response
.body()
.concat2()
.map(move |body| (status, body.len() as u64))
});
let ((status, content_length), duration) =
bench::time_it(|| core.run(request).expect("reactor run"));
collect(Fact::record(
ContentLength::new(content_length),
status,
duration,
));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reqwest_engine_can_collect_facts() {
let eng = Engine::new(vec!["https://www.google.com".to_string()]);
let mut fact: Option<Fact> = None;
eng.run(1, |f| fact = Some(f));
assert!(fact.is_some());
}
#[test]
fn hyper_engine_can_collect_facts() {
let eng = Engine::new(vec!["https://www.google.com".to_string()]).with_hyper();
let mut fact: Option<Fact> = None;
eng.run(1, |f| fact = Some(f));
assert!(fact.is_some());
}
}
| 29.601227 | 92 | 0.533264 |
f57ae67d855b03210285ed076ef45334ff311cb5 | 230 | //! The module for the advanced usage.
pub use crate::{dependency::DependencyClone, get_dependencies::GetDependencies};
pub mod container {
//! Things needs to define your own containers.
pub use crate::container::*;
}
| 23 | 80 | 0.721739 |
1d1fdb8a5bf9bcf955ca028b324d79bd54b8b3e9 | 340 | // primitive_types3.rs
// Create an array with at least 100 elements in it where the ??? is.
// Execute `rustlings hint primitive_types3` for hints!
fn main() {
let a = [321; 5];
if a.len() >= 100 {
println!("Wow, that's a big array!");
} else {
println!("Meh, I eat arrays like that for breakfast.");
}
}
| 24.285714 | 69 | 0.594118 |
7aa7b3d0f52526df160345aa2f2c6210c30e094a | 9,989 | //! Functionality to boot application cores on x86.
//!
//! This code is closely intertwingled with the assembly code in `start_ap.S`,
//! make sure these two files are and stay in sync.
use alloc::sync::Arc;
use core::sync::atomic::AtomicBool;
use apic::ApicDriver;
use x86::apic::ApicId;
use x86::current::paging::PAddr;
use crate::memory::vspace::MapAction;
use crate::round_up;
use crate::stack::Stack;
use super::kcb;
use super::memory::BASE_PAGE_SIZE;
/// The 16-bit segement where our bootstrap code is.
const X86_64_REAL_MODE_SEGMENT: u16 = 0x0600;
/// The page number in real mode (this is what the IPI wants)
const REAL_MODE_PAGE: u8 = (X86_64_REAL_MODE_SEGMENT >> 8) as u8;
/// The offset, what we have to add to get a physical address.
const REAL_MODE_LINEAR_OFFSET: u16 = X86_64_REAL_MODE_SEGMENT << 4;
/// The corresponding 64-bit address (0 + offset in our case).
const REAL_MODE_BASE: usize = REAL_MODE_LINEAR_OFFSET as usize;
/// Return the address range of `start_ap.S` as (start, end)
///
/// # Note
/// The addresses returned are start and end in kernel space
/// (above KERNEL_BASE, within the relocated ELF file). But
/// when we boot we have to copy the code in a lower address region
/// where a 16-bit mode CPU can execute.
fn ap_code_address_range() -> (PAddr, PAddr) {
extern "C" {
/// The first symbol in `start_ap.S`
static x86_64_start_ap: *const u8;
/// The very last symbol in `start_ap.S`
static x86_64_start_ap_end: *const u8;
}
unsafe {
(
PAddr::from(&x86_64_start_ap as *const _ as u64),
PAddr::from(&x86_64_start_ap_end as *const _ as u64),
)
}
}
/// Calculate the size of the bootstrap code-block in `start_ap.S`
///
/// We do that by taking the difference of the first and last symbol
/// in the file.
fn get_boostrap_code_size() -> usize {
let (start_address, end_address) = ap_code_address_range();
let boostrap_code_size = end_address - start_address;
trace!("boostrap_code_size = {:#x}", boostrap_code_size);
boostrap_code_size.into()
}
/// Puts the bootstrap code at a well defined segement that an
/// app core (booting in 16-bit mode can read from) (for us this is
/// REAL_MODE_BASE).
///
/// # Safety
/// Let's hope noone else put something there (we should be ok
/// by just excluding everything below 1 MiB from every being
/// allocated).
unsafe fn copy_bootstrap_code() {
let boot_code_size = get_boostrap_code_size();
let ap_bootstrap_code: &'static [u8] = get_orignal_bootstrap_code();
let real_mode_destination: &'static mut [u8] = get_boostrap_code_region();
let kcb = kcb::get_kcb();
kcb.arch
.init_vspace()
.map_identity(
PAddr::from(REAL_MODE_BASE as u64),
round_up!(boot_code_size, BASE_PAGE_SIZE),
MapAction::ReadWriteExecuteKernel,
)
.expect("Can't map bootstrap code");
real_mode_destination.copy_from_slice(ap_bootstrap_code);
}
/// Initializes the information passed to the APP core by writing
/// overwriting a bunch of declared symbols inside of `start_ap.S`
/// to pass arguments, set the correct stack and page-table
/// and jump to a custom entry function.
///
/// This includes the entry rust function, a pointer
/// to the initial address space, a pointer to the
/// initial stack.
///
/// # Safety
/// To be safe this function should only be invoked
/// during initialization on the BSP core and after we invoked `copy_bootstrap_code`.
///
/// `arg` is read on the new core so we have to ensure whatever they point to
/// lives long enough.
unsafe fn setup_boostrap_code<A>(
entry_fn: u64,
arg: Arc<A>,
initialized: &AtomicBool,
pml4: u64,
stack_top: u64,
) {
// Symbols from `start_ap.S`
extern "C" {
/// Bootstrap code jumps to this address after initialization.
static x86_64_init_ap_absolute_entry: *mut u64;
/// Bootstrap core switches to this address space during initialization.
static x86_64_init_ap_init_pml4: *mut u64;
/// Bootstrap core uses this stack address when starting to execute at `x86_64_init_ap_absolute_entry`.
static x86_64_init_ap_stack_ptr: *mut u64;
// TODO: the *const u64 below should be *const A
// but this crashes rustc:
// reported at: https://github.com/rust-lang/rust/issues/65025
/// First argument for entry fn.
static x86_64_init_ap_arg1: *mut u64;
/// The ap lock to let us know when the app core currently booting is done
/// with the initialization code section.
///
/// (And therefore read the content from `x86_64_init_ap_absolute_entry`,
/// `x86_64_init_ap_init_pml4`, `x86_64_init_ap_stack_ptr` + args and
/// no longer needs it).
static x86_64_init_ap_lock: *mut u64;
}
// TODO: tried to make the following code less ugly but failed:
unsafe fn to_bootstrap_pointer(kernel_text_addr: u64) -> *mut u64 {
let (start_addr, _end_addr) = ap_code_address_range();
assert!(kernel_text_addr > start_addr.as_u64());
core::mem::transmute(kernel_text_addr - start_addr.as_u64() + REAL_MODE_BASE as u64)
}
// Init function
let entry_pointer: *mut u64 =
to_bootstrap_pointer(&x86_64_init_ap_absolute_entry as *const _ as u64);
*entry_pointer = entry_fn;
// Arguments
let arg1_pointer: *mut u64 = to_bootstrap_pointer(&x86_64_init_ap_arg1 as *const _ as u64);
// We get the address of the `ptr: NonNull<ArcInner<T>>`,
// the 1st (private) member inside the Arc, and pass it to the app core, there is probably
// a better/safer and much less ugly way to express this but we just use transmute for now:
*arg1_pointer = core::mem::transmute::<Arc<A>, u64>(arg);
// Page-table
let pml4_pointer: *mut u64 = to_bootstrap_pointer(&x86_64_init_ap_init_pml4 as *const _ as u64);
*pml4_pointer = pml4;
// Stack
let stack_pointer: *mut u64 =
to_bootstrap_pointer(&x86_64_init_ap_stack_ptr as *const _ as u64);
*stack_pointer = stack_top;
// Reset the initialization lock
// The APP core is supposed to set this to `true` after booting is done...
let ap_lock_pointer: *mut u64 = to_bootstrap_pointer(&x86_64_init_ap_lock as *const _ as u64);
*ap_lock_pointer = &*initialized as *const _ as u64;
trace!(
"x86_64_init_ap_absolute_entry is at {:p} and set to {:#x}",
entry_pointer,
*entry_pointer
);
trace!(
"x86_64_init_ap_init_pml4 is at {:p} and set to {:#x}",
pml4_pointer,
*pml4_pointer
);
trace!(
"x86_64_init_ap_stack_ptr is at {:p} and set to {:#x}",
stack_pointer,
*stack_pointer
);
trace!(
"x86_64_init_ap_lock is at {:p} and set to {:#x}={:#x}",
ap_lock_pointer,
*ap_lock_pointer,
*((*ap_lock_pointer) as *const u64)
);
trace!(
"x86_64_init_ap_arg1 is at {:p} and set to {:#x}={:#x}",
arg1_pointer,
*arg1_pointer,
*((*arg1_pointer) as *const u64)
);
// TODO: probably want a fence here
}
/// Returns a slice to the bootstrap code in the kernel ELF .text section
///
/// Ideally this region of memory shouldn't be modified (it's mapped read-only by
/// default anyways). We first copy it into a low memory region and then do the
/// final adjustments there.
fn get_orignal_bootstrap_code() -> &'static [u8] {
let (start_address, _end_address) = ap_code_address_range();
let boot_code_size = get_boostrap_code_size();
// This is safe since this is in the kernel binary and always only
// mapped read-only.
let ap_bootstrap_code: &'static [u8] =
unsafe { core::slice::from_raw_parts(start_address.as_u64() as *const u8, boot_code_size) };
ap_bootstrap_code
}
/// Returns a slice to the bootstrap code region from where we boot new cores.
///
/// # Safety
/// Basically this is only safe in the beginning of system initialization
/// and we need to make sure we have memory backing the REAL_MODE_BASE region
/// first.
unsafe fn get_boostrap_code_region() -> &'static mut [u8] {
let real_mode_destination: &mut [u8] =
core::slice::from_raw_parts_mut(REAL_MODE_BASE as *mut u8, get_boostrap_code_size());
real_mode_destination
}
/// Wakes up (resets) a core by sending a sequence of IPIs (INIT, INIT deassert, STARTUP).
///
/// # Notes
/// x86 specification technically requires to sleep between init and startup, but on most
/// modern processors (Xeon Phi being an exception) this is not really necessary.
///
/// # Safety
/// Can easily reset the wrong core (bad for memory safety).
unsafe fn wakeup_core(core_id: ApicId) {
let kcb = kcb::get_kcb();
// x86 core boot protocol, without sleeping:
kcb.arch.apic().ipi_init(core_id);
kcb.arch.apic().ipi_init_deassert();
let start = rawtime::Instant::now();
while start.elapsed().as_millis() > 10 {}
kcb.arch.apic().ipi_startup(core_id, REAL_MODE_PAGE);
}
/// Starts up the core identified by `core_id`, after initialization it begins
/// to executing in `init_function` and uses `stack` as a stack.
///
/// # Safety
/// You're waking up a core that goes off and does random things
/// (if not being careful), so this can be pretty bad for memory safety.
pub unsafe fn initialize<A>(
core_id: x86::apic::ApicId,
init_function: fn(Arc<A>, &AtomicBool),
args: Arc<A>,
initialized: &AtomicBool,
stack: &dyn Stack,
) {
// Make sure bootsrap code is at correct location in memory
copy_bootstrap_code();
// Initialize bootstrap assembly with correct parameters
let kcb = super::kcb::get_kcb();
setup_boostrap_code(
init_function as u64,
args,
initialized,
kcb.arch.init_vspace().pml4_address().into(),
stack.base() as u64,
);
// Send IPIs
wakeup_core(core_id);
}
| 35.049123 | 111 | 0.676444 |
8f93f64568cbbb56380e20e8bdf3bb6b8262ea5c | 4,274 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use crate::{ExecutorType, Storable, UserID};
use serde::{Deserialize, Serialize};
use std::prelude::v1::*;
use uuid::Uuid;
#[derive(Debug, Deserialize, Serialize)]
pub struct FunctionInput {
pub name: String,
pub description: String,
}
impl FunctionInput {
pub fn new(name: impl Into<String>, description: impl Into<String>) -> Self {
Self {
name: name.into(),
description: description.into(),
}
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct FunctionOutput {
pub name: String,
pub description: String,
}
impl FunctionOutput {
pub fn new(name: impl Into<String>, description: impl Into<String>) -> Self {
Self {
name: name.into(),
description: description.into(),
}
}
}
const USER_PREFIX: &str = "user";
#[derive(Default, Debug, Deserialize, Serialize)]
pub struct User {
pub id: UserID,
pub registered_functions: Vec<String>,
pub allowed_functions: Vec<String>,
}
impl Storable for User {
fn key_prefix() -> &'static str {
USER_PREFIX
}
fn uuid(&self) -> Uuid {
Uuid::new_v5(&Uuid::NAMESPACE_DNS, self.id.to_string().as_bytes())
}
}
const FUNCION_PREFIX: &str = "function";
#[derive(Default, Debug, Deserialize, Serialize)]
pub struct Function {
pub id: Uuid,
pub name: String,
pub description: String,
pub public: bool,
pub executor_type: ExecutorType,
pub payload: Vec<u8>,
pub arguments: Vec<String>,
pub inputs: Vec<FunctionInput>,
pub outputs: Vec<FunctionOutput>,
pub owner: UserID,
pub user_allowlist: Vec<String>,
}
#[derive(Default)]
pub struct FunctionBuilder {
function: Function,
}
impl FunctionBuilder {
pub fn new() -> Self {
Self {
function: Function::default(),
}
}
pub fn id(mut self, id: Uuid) -> Self {
self.function.id = id;
self
}
pub fn executor_type(mut self, executor_type: ExecutorType) -> Self {
self.function.executor_type = executor_type;
self
}
pub fn name(mut self, name: impl ToString) -> Self {
self.function.name = name.to_string();
self
}
pub fn description(mut self, description: impl ToString) -> Self {
self.function.description = description.to_string();
self
}
pub fn payload(mut self, payload: Vec<u8>) -> Self {
self.function.payload = payload;
self
}
pub fn public(mut self, public: bool) -> Self {
self.function.public = public;
self
}
pub fn arguments(mut self, arguments: Vec<String>) -> Self {
self.function.arguments = arguments;
self
}
pub fn inputs(mut self, inputs: Vec<FunctionInput>) -> Self {
self.function.inputs = inputs;
self
}
pub fn outputs(mut self, outputs: Vec<FunctionOutput>) -> Self {
self.function.outputs = outputs;
self
}
pub fn owner(mut self, owner: impl Into<UserID>) -> Self {
self.function.owner = owner.into();
self
}
pub fn user_allowlist(mut self, user_allowlist: Vec<String>) -> Self {
self.function.user_allowlist = user_allowlist;
self
}
pub fn build(self) -> Function {
self.function
}
}
impl Storable for Function {
fn key_prefix() -> &'static str {
FUNCION_PREFIX
}
fn uuid(&self) -> Uuid {
self.id
}
}
| 25.141176 | 81 | 0.631961 |
abccba25f1dca2f7547fbc040e240aebc70b8968 | 5,651 | // Copyright (c) Microsoft. All rights reserved.
use std::fmt::{self, Display};
use std::net::SocketAddr;
use std::str;
use failure::{Backtrace, Compat, Context, Fail};
use hyper::header::{CONTENT_LENGTH, CONTENT_TYPE};
use hyper::{Body, Response, StatusCode, Uri};
use systemd::Fd;
use url::Url;
use IntoResponse;
#[derive(Debug)]
pub struct Error {
inner: Context<ErrorKind>,
}
#[derive(Debug, Fail, PartialEq)]
pub enum ErrorKind {
#[fail(display = "An error occurred while authorizing the HTTP request")]
Authorization,
#[fail(display = "An error occurred while binding a listener to {}", _0)]
BindListener(BindListenerType),
#[fail(display = "Could not perform HTTP request")]
Http,
#[fail(display = "HTTP request failed: [{}] {}", _0, _1)]
HttpWithErrorResponse(StatusCode, String),
#[fail(display = "Could not initialize")]
Initialization,
#[fail(display = "Invalid API version {:?}", _0)]
InvalidApiVersion(String),
#[fail(display = "Invalid URL {:?}", _0)]
InvalidUrl(String),
#[fail(display = "Invalid URL {:?}: {}", _0, _1)]
InvalidUrlWithReason(String, InvalidUrlReason),
#[fail(
display = "URL parts could not be parsed into a valid URL: scheme: {:?}, base path: {:?}, path: {:?}",
scheme, base_path, path
)]
MalformedUrl {
scheme: String,
base_path: String,
path: String,
},
#[fail(display = "Module not found")]
ModuleNotFound(String),
#[fail(display = "An error occurred for path {}", _0)]
Path(String),
#[fail(display = "An error occurred with the proxy {}", _0)]
Proxy(Uri),
#[fail(display = "An error occurred in the service")]
ServiceError,
#[fail(display = "Token source error")]
TokenSource,
#[fail(
display = "Could not form well-formed URL by joining {:?} with {:?}",
_0, _1
)]
UrlJoin(Url, String),
}
impl Fail for Error {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.inner, f)
}
}
impl Error {
pub fn kind(&self) -> &ErrorKind {
self.inner.get_context()
}
pub fn http_with_error_response(status_code: StatusCode, body: &[u8]) -> Self {
let kind = match str::from_utf8(body) {
Ok(body) => ErrorKind::HttpWithErrorResponse(status_code, body.to_string()),
Err(_) => ErrorKind::HttpWithErrorResponse(
status_code,
"<could not parse response body as utf-8>".to_string(),
),
};
kind.into()
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Self {
Error {
inner: Context::new(kind),
}
}
}
impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Self {
Error { inner }
}
}
impl IntoResponse for Error {
fn into_response(self) -> Response<Body> {
let mut fail: &Fail = &self;
let mut message = self.to_string();
while let Some(cause) = fail.cause() {
message.push_str(&format!("\n\tcaused by: {}", cause.to_string()));
fail = cause;
}
let status_code = match *self.kind() {
ErrorKind::Authorization | ErrorKind::ModuleNotFound(_) => StatusCode::NOT_FOUND,
ErrorKind::InvalidApiVersion(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
let body = json!({
"message": message,
})
.to_string();
Response::builder()
.status(status_code)
.header(CONTENT_TYPE, "application/json")
.header(CONTENT_LENGTH, body.len().to_string().as_str())
.body(body.into())
.expect("response builder failure")
}
}
impl IntoResponse for Compat<Error> {
fn into_response(self) -> Response<Body> {
self.into_inner().into_response()
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BindListenerType {
Address(SocketAddr),
Fd(Fd),
}
impl Display for BindListenerType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
BindListenerType::Address(addr) => write!(f, "address {}", addr),
BindListenerType::Fd(fd) => write!(f, "fd {}", fd),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InvalidUrlReason {
FdNeitherNumberNorName,
FileNotFound,
InvalidScheme,
InvalidCredentials,
NoAddress,
NoHost,
UnrecognizedSocket,
}
impl Display for InvalidUrlReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
InvalidUrlReason::FdNeitherNumberNorName => {
write!(f, "URL could not be parsed as fd number nor fd name")
}
InvalidUrlReason::FileNotFound => write!(f, "Socket file could not be found"),
InvalidUrlReason::InvalidScheme => write!(f, "URL does not have a recognized scheme"),
InvalidUrlReason::InvalidCredentials => {
write!(f, "Username or password could not be parsed from URL")
}
InvalidUrlReason::NoAddress => write!(f, "URL has no address"),
InvalidUrlReason::NoHost => write!(f, "URL has no host"),
InvalidUrlReason::UnrecognizedSocket => {
write!(f, "URL does not correspond to a valid socket")
}
}
}
}
| 27.432039 | 110 | 0.588214 |
0a7bc0d2e93a5a265909851a4068c1bf551d640f | 5,397 | use std::{
convert::Infallible,
io::{Error as IoError, ErrorKind},
};
use semver::Version;
use handlebars::TemplateError;
use fluvio::FluvioError;
#[cfg(feature = "k8s")]
use fluvio_cluster::cli::ClusterCliError;
use fluvio_sc_schema::ApiError;
use fluvio_sc_schema::errors::ErrorCode;
use fluvio_extension_common::output::OutputError;
use fluvio_socket::SocketError;
use fluvio_index::{PackageId, Target};
use crate::common::target::TargetError;
pub type Result<T, E = CliError> = core::result::Result<T, E>;
#[derive(thiserror::Error, Debug)]
#[allow(clippy::enum_variant_names)]
pub enum CliError {
#[error(transparent)]
IoError(#[from] IoError),
#[error(transparent)]
OutputError(#[from] OutputError),
#[error("Failed to parse format string")]
TemplateError(#[from] TemplateError),
#[cfg(feature = "k8s")]
#[error("Fluvio cluster error")]
ClusterCliError(#[from] ClusterCliError),
#[error("Target Error")]
TargetError(#[from] TargetError),
#[error("Fluvio client error")]
ClientError(#[from] FluvioError),
#[cfg(feature = "k8s")]
#[error("Kubernetes config error")]
K8ConfigError(#[from] k8_config::ConfigError),
#[cfg(feature = "k8s")]
#[error("Kubernetes client error")]
K8ClientError(#[from] k8_client::ClientError),
/// An error occurred while processing the connector yaml
#[error("Fluvio connector config")]
ConnectorConfig(#[from] serde_yaml::Error),
#[error("Package index error")]
IndexError(#[from] fluvio_index::Error),
#[error("Error finding executable")]
WhichError(#[from] which::Error),
#[error(transparent)]
HttpError(#[from] HttpError),
#[error("Package {package} is not published at version {version} for target {target}")]
PackageNotFound {
package: PackageId,
version: Version,
target: Target,
},
#[error(transparent)]
TlsError(#[from] fluvio_future::openssl::TlsError),
#[error("Invalid argument: {0}")]
InvalidArg(String),
#[error("Unknown error: {0}")]
Other(String),
#[error("Unexpected Infallible error")]
Infallible(#[from] Infallible),
#[error("Dataplane error: {0}")]
DataPlaneError(#[from] ErrorCode),
#[error("TableFormat not found: {0}")]
TableFormatNotFound(String),
}
#[derive(thiserror::Error, Debug)]
#[error("Http Error: {}", inner)]
pub struct HttpError {
pub(crate) inner: http_types::Error,
}
impl From<http_types::Error> for CliError {
fn from(inner: http_types::Error) -> Self {
Self::HttpError(HttpError { inner })
}
}
impl CliError {
pub fn invalid_arg<M: Into<String>>(reason: M) -> Self {
Self::InvalidArg(reason.into())
}
pub fn into_report(self) -> color_eyre::Report {
use color_eyre::Report;
match self {
#[cfg(feature = "k8s")]
CliError::ClusterCliError(cluster) => cluster.into_report(),
_ => Report::from(self),
}
}
/// Looks at the error value and attempts to gracefully handle reporting it
///
/// Sometimes, specific errors require specific user-facing error messages.
/// Here is where we define those messages, as well as the exit code that the
/// program should return when exiting after those errors.
pub fn print(self) -> Result<()> {
match &self {
Self::ClientError(FluvioError::AdminApi(api)) => match api {
ApiError::Code(ErrorCode::TopicAlreadyExists, _) => {
println!("Topic already exists");
Ok(())
}
ApiError::Code(ErrorCode::ManagedConnectorAlreadyExists, _) => {
println!("Connector already exists");
Ok(())
}
ApiError::Code(ErrorCode::TopicNotFound, _) => {
println!("Topic not found");
Ok(())
}
ApiError::Code(ErrorCode::TopicInvalidName, _) => {
println!("Invalid topic name: topic name may only include lowercase letters (a-z), numbers (0-9), and hyphens (-).");
Ok(())
}
ApiError::Code(ErrorCode::TableFormatAlreadyExists, _) => {
println!("TableFormat already exists");
Ok(())
}
ApiError::Code(ErrorCode::TableFormatNotFound, _) => {
println!("TableFormat not found");
Ok(())
}
_ => Err(self),
},
Self::ClientError(FluvioError::Socket(SocketError::Io(io)))
if io.kind() == ErrorKind::TimedOut =>
{
println!("Network connection timed out while waiting for response");
Ok(())
}
#[cfg(feature = "k8s")]
Self::ClusterCliError(ClusterCliError::TargetError(TargetError::ClientError(
FluvioError::Socket(SocketError::Io(io)),
))) => match io.kind() {
ErrorKind::ConnectionRefused => {
println!("Failed to connect to cluster, make sure you have started or connected to your cluster");
Ok(())
}
_ => Err(self),
},
_ => Err(self),
}
}
}
| 33.521739 | 137 | 0.574764 |
388c5fc4dd5b969424a158b23f7752593aecb24d | 2,729 | #![allow(clippy::module_inception)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::large_enum_variant)]
#![allow(clippy::wrong_self_convention)]
#![allow(clippy::should_implement_trait)]
#![allow(clippy::blacklisted_name)]
//! <p>IoT Analytics allows you to collect large amounts of device data, process messages, and store them.
//! You can then query the data and run sophisticated analytics on it. IoT Analytics enables advanced
//! data exploration through integration with Jupyter Notebooks and data visualization through integration
//! with Amazon QuickSight.</p>
//! <p>Traditional analytics and business intelligence tools are designed to process structured data. IoT data
//! often comes from devices that record noisy processes (such as temperature, motion, or sound). As a result
//! the data from these devices can have significant gaps, corrupted messages, and false readings that must be
//! cleaned up before analysis can occur. Also, IoT data is often only meaningful in the context of other data
//! from external sources. </p>
//! <p>IoT Analytics automates the steps required to analyze data from IoT devices. IoT Analytics
//! filters, transforms, and enriches IoT data before storing it in a time-series data store for analysis. You
//! can set up the service to collect only the data you need from your devices, apply mathematical transforms
//! to process the data, and enrich the data with device-specific metadata such as device type and location
//! before storing it. Then, you can analyze your data by running queries using the built-in SQL query engine,
//! or perform more complex analytics and machine learning inference. IoT Analytics includes pre-built models
//! for common IoT use cases so you can answer questions like which devices are about to fail or which customers
//! are at risk of abandoning their wearable devices.</p>
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
pub use error_meta::Error;
pub use config::Config;
mod aws_endpoint;
#[cfg(feature = "client")]
pub mod client;
pub mod config;
pub mod error;
mod error_meta;
pub mod input;
mod json_deser;
mod json_errors;
mod json_ser;
pub mod model;
pub mod operation;
mod operation_deser;
mod operation_ser;
pub mod output;
pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
pub use smithy_http::byte_stream::ByteStream;
pub use smithy_http::result::SdkError;
pub use smithy_types::Blob;
static API_METADATA: aws_http::user_agent::ApiMetadata =
aws_http::user_agent::ApiMetadata::new("iotanalytics", PKG_VERSION);
pub use aws_auth::Credentials;
pub use aws_types::region::Region;
#[cfg(feature = "client")]
pub use client::Client;
pub use smithy_http::endpoint::Endpoint;
| 48.732143 | 112 | 0.776475 |
db1155e0846e5706b022486a7216efa442a8de6f | 13,095 | use byteorder::{LE, WriteBytesExt};
use std::hash::Hasher;
use std::io::{self, Read, Write, Seek, SeekFrom, ErrorKind};
use std::mem;
use twox_hash::XxHash32;
use thiserror::Error;
use fehler::{throws};
use super::{MAGIC, INCOMPRESSIBLE, WINDOW_SIZE};
use super::header::{Flags, BlockDescriptor};
use crate::raw::{U32Table, compress2, EncoderTable};
/// Errors when compressing an LZ4 frame.
#[derive(Error, Debug)]
pub enum CompressionError {
#[error("error reading from the input you gave me")]
ReadError(io::Error),
#[error("error writing to the output you gave me")]
WriteError(#[from] io::Error),
#[error("the block size you asked for is not supported")]
InvalidBlockSize,
}
type Error = CompressionError; // do it this way for better docs
impl From<Error> for io::Error {
fn from(e: Error) -> io::Error {
io::Error::new(ErrorKind::Other, e)
}
}
/// A builder-style struct that configures compression settings.
/// This is how you compress LZ4 frames.
/// (An LZ4 file usually consists of a single frame.)
///
/// Create it using `Default::default()`.
pub struct CompressionSettings<'a> {
independent_blocks: bool,
block_checksums: bool,
content_checksum: bool,
block_size: usize,
dictionary: Option<&'a [u8]>,
dictionary_id: Option<u32>,
}
impl<'a> Default for CompressionSettings<'a> {
fn default() -> Self {
Self {
independent_blocks: true,
block_checksums: false,
content_checksum: true,
block_size: 4 * 1024 * 1024,
dictionary: None,
dictionary_id: None,
}
}
}
impl<'a> CompressionSettings<'a> {
/// In independent mode, blocks are not allowed to reference data from previous blocks.
/// Hence, using dependent blocks yields slightly better compression.
/// The downside of dependent blocks is that seeking becomes impossible - the entire frame always has
/// to be decompressed from the beginning.
///
/// Blocks are independent by default.
pub fn independent_blocks(&mut self, v: bool) -> &mut Self {
self.independent_blocks = v;
self
}
/// Block checksums can help detect data corruption in storage and transit.
/// They do not offer error correction though.
///
/// In most cases, block checksums are not very helpful because you generally want a lower
/// layer to deal with data corruption more comprehensively.
///
/// Block checksums are disabled by default.
pub fn block_checksums(&mut self, v: bool) -> &mut Self {
self.block_checksums = v;
self
}
/// The content checksum (also called frame checksum) is calculated over the contents of the entire frame.
/// This makes them cheaper than block checksums as their size overhead is constant
/// as well as marginally more useful, because they can help protect against incorrect decompression.
///
/// Note that the content checksum can only be verified *after* the entire frame has been read
/// (and returned!), which is the downside of content checksums.
///
/// Frame checksums are enabled by default.
pub fn content_checksum(&mut self, v: bool) -> &mut Self {
self.content_checksum = v;
self
}
/// Only valid values are 4MiB, 1MiB, 256KiB, 64KiB
/// (TODO: better interface for this)
///
/// The default block size is 4 MiB.
pub fn block_size(&mut self, v: usize) -> &mut Self {
self.block_size = v;
self
}
/// A dictionary is essentially a constant slice of bytes shared by the compressing and decompressing party.
/// Using a dictionary can improve compression ratios, because the compressor can reference data from the dictionary.
///
/// The dictionary id is an application-specific identifier which can be used during decompression to determine
/// which dictionary to use.
///
/// Note that while the size of a dictionary can be arbitrary, dictionaries larger than 64 KiB are not useful as
/// the LZ4 algorithm does not support backreferences by more than 64 KiB, i.e. any dictionary content before
/// the trailing 64 KiB is silently ignored.
///
/// By default, no dictionary is used and no id is specified.
pub fn dictionary(&mut self, id: u32, dict: &'a [u8]) -> &mut Self {
self.dictionary_id = Some(id);
self.dictionary = Some(dict);
self
}
/// The dictionary id header field is quite obviously intended to tell anyone trying to decompress your frame which dictionary to use.
/// So it is only natural to assume that the *absence* of a dictionary id indicates that no dictionary was used.
///
/// Unfortunately this assumption turns out to be incorrect. The LZ4 CLI simply never writes a dictionary id.
/// The major downside is that you can no longer distinguish corrupted data from a missing dictionary
/// (unless you write block checksums, which the LZ4 CLI also never does).
///
/// Hence, this library is opinionated in the sense that we always want you to specify either neither or both of these things
/// (the LZ4 CLI basically just ignores the dictionary id completely and only cares about whether you specify a dictionary parameter or not).
///
/// If you think you know better (you probably don't) you may use this method to break this rule.
pub fn dictionary_id_nonsense_override(&mut self, id: Option<u32>) -> &mut Self {
self.dictionary_id = id;
self
}
// TODO: these interfaces need to go away in favor of something that can handle individual blocks rather than always compressing full frames at once
#[throws]
pub fn compress<R: Read, W: Write>(&self, reader: R, writer: W) {
self.compress_internal(reader, writer, None)?;
}
#[throws]
pub fn compress_with_size_unchecked<R: Read, W: Write>(&self, reader: R, writer: W, content_size: u64) {
self.compress_internal(reader, writer, Some(content_size))?;
}
#[throws]
pub fn compress_with_size<R: Read + Seek, W: Write>(&self, mut reader: R, writer: W) {
// maybe one day we can just use reader.stream_len() here: https://github.com/rust-lang/rust/issues/59359
// then again, we implement this to ignore the all bytes before the cursor which stream_len() does not
let start = reader.seek(SeekFrom::Current(0))?;
let end = reader.seek(SeekFrom::End(0))?;
reader.seek(SeekFrom::Start(start))?;
let length = end - start;
self.compress_internal(reader, writer, Some(length))?;
}
#[throws]
fn compress_internal<R: Read, W: Write>(&self, mut reader: R, mut writer: W, content_size: Option<u64>) {
let mut content_hasher = None;
let mut flags = Flags::empty();
if self.independent_blocks {
flags |= Flags::IndependentBlocks;
}
if self.block_checksums {
flags |= Flags::BlockChecksums;
}
if self.content_checksum {
flags |= Flags::ContentChecksum;
content_hasher = Some(XxHash32::with_seed(0));
}
if self.dictionary_id.is_some() {
flags |= Flags::DictionaryId;
}
if content_size.is_some() {
flags |= Flags::ContentSize;
}
let version = 1 << 6;
let flag_byte = version | flags.bits();
let bd_byte = BlockDescriptor::new(self.block_size).ok_or(Error::InvalidBlockSize)?.0;
let mut header = Vec::new();
header.write_u32::<LE>(MAGIC)?;
header.write_u8(flag_byte)?;
header.write_u8(bd_byte)?;
if let Some(content_size) = content_size {
header.write_u64::<LE>(content_size)?;
}
if let Some(id) = self.dictionary_id {
header.write_u32::<LE>(id)?;
}
let mut hasher = XxHash32::with_seed(0);
hasher.write(&header[4..]); // skip magic for header checksum
header.write_u8((hasher.finish() >> 8) as u8)?;
writer.write_all(&header)?;
let mut template_table = U32Table::default();
let mut block_initializer: &[u8] = &[];
if let Some(dict) = self.dictionary {
for window in dict.windows(mem::size_of::<usize>()).step_by(3) {
// this is a perfectly safe way to find out where our window is pointing
// we could do this manually by iterating with an index to avoid the scary-looking
// pointer math but this is way more convenient IMO
let offset = window.as_ptr() as usize - dict.as_ptr() as usize;
template_table.replace(dict, offset);
}
block_initializer = dict;
}
// TODO: when doing dependent blocks or dictionaries, in_buffer's capacity is insufficient
let mut in_buffer = Vec::with_capacity(self.block_size);
in_buffer.extend_from_slice(block_initializer);
let mut out_buffer = vec![0u8; self.block_size];
let mut table = template_table.clone();
loop {
let window_offset = in_buffer.len();
// We basically want read_exact semantics, except at the end.
// Sadly read_exact specifies the buffer contents to be undefined
// on error, so we have to use this construction instead.
reader.by_ref().take(self.block_size as u64).read_to_end(&mut in_buffer).map_err(Error::ReadError)?;
let read_bytes = in_buffer.len() - window_offset;
if read_bytes == 0 {
break;
}
if let Some(x) = content_hasher.as_mut() {
x.write(&in_buffer[window_offset..]);
}
// TODO: implement u16 table for small inputs
// 1. limit output by input size so we never have negative compression ratio
// 2. use a wrapper that forbids partial writes, so don't write 32-bit integers
// as four individual bytes with four individual range checks
let mut cursor = NoPartialWrites(&mut out_buffer[..read_bytes]);
let write = match compress2(&in_buffer, window_offset, &mut table, &mut cursor) {
Ok(()) => {
let not_written_len = cursor.0.len();
let written_len = read_bytes - not_written_len;
writer.write_u32::<LE>(written_len as u32)?;
&out_buffer[..written_len]
}
Err(e) => {
assert!(e.kind() == ErrorKind::ConnectionAborted);
// incompressible
writer.write_u32::<LE>((read_bytes as u32) | INCOMPRESSIBLE)?;
&in_buffer[window_offset..]
}
};
writer.write_all(write)?;
if flags.contains(Flags::BlockChecksums) {
let mut block_hasher = XxHash32::with_seed(0);
block_hasher.write(write);
writer.write_u32::<LE>(block_hasher.finish() as u32)?;
}
if flags.contains(Flags::IndependentBlocks) {
// clear table
in_buffer.clear();
in_buffer.extend_from_slice(block_initializer);
table = template_table.clone();
} else {
if in_buffer.len() > WINDOW_SIZE {
let how_much_to_forget = in_buffer.len() - WINDOW_SIZE;
table.offset(how_much_to_forget);
in_buffer.drain(..how_much_to_forget);
}
}
}
writer.write_u32::<LE>(0)?;
if let Some(x) = content_hasher {
writer.write_u32::<LE>(x.finish() as u32)?;
}
}
}
/// Helper struct to allow more efficient code generation when using the Write trait on byte buffers.
///
/// The underlying problem is that the Write impl on [u8] (and everything similar, e.g. Cursor<[u8]>)
/// is specified to write as many bytes as possible before returning an error.
/// This is a problem because it forces e.g. a 32-bit write to compile to four 8-bit writes with a range
/// check every time, rather than a single 32-bit write with a range check.
///
/// This wrapper aims to resolve the problem by simply not writing anything in case we fail the bounds check,
/// as we throw away the entire buffer in that case anyway.
struct NoPartialWrites<'a>(&'a mut [u8]);
impl<'a> Write for NoPartialWrites<'a> {
#[inline]
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
if self.0.len() < data.len() {
// quite frankly it doesn't matter what we specify here
return Err(ErrorKind::ConnectionAborted.into());
}
let amt = data.len();
let (a, b) = mem::replace(&mut self.0, &mut []).split_at_mut(data.len());
a.copy_from_slice(data);
self.0 = b;
Ok(amt)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| 41.179245 | 152 | 0.620237 |
ef7f1b0cd392e061e4fcee47fe71a4dfe066ab30 | 51,768 | extern crate cargo;
extern crate cargotest;
extern crate git2;
extern crate hamcrest;
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::Path;
use cargo::util::process;
use cargotest::{sleep_ms, RUSTC};
use cargotest::support::paths::{self, CargoPathExt};
use cargotest::support::{git, project, execs, main_file, path2url};
use hamcrest::{assert_that,existing_file};
#[test]
fn cargo_compile_simple_git_dep() {
let project = project("foo");
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep1"
"#)
.file("src/dep1.rs", r#"
pub fn hello() -> &'static str {
"hello world"
}
"#)
}).unwrap();
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
[[bin]]
name = "foo"
"#, git_project.url()))
.file("src/foo.rs", &main_file(r#""{}", dep1::hello()"#, &["dep1"]));
let root = project.root();
let git_root = git_project.root();
assert_that(project.cargo_process("build"),
execs()
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[COMPILING] dep1 v0.5.0 ({}#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
path2url(git_root.clone()),
path2url(git_root),
path2url(root))));
assert_that(&project.bin("foo"), existing_file());
assert_that(
process(&project.bin("foo")),
execs().with_stdout("hello world\n"));
}
#[test]
fn cargo_compile_git_dep_branch() {
let project = project("foo");
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep1"
"#)
.file("src/dep1.rs", r#"
pub fn hello() -> &'static str {
"hello world"
}
"#)
}).unwrap();
// Make a new branch based on the current HEAD commit
let repo = git2::Repository::open(&git_project.root()).unwrap();
let head = repo.head().unwrap().target().unwrap();
let head = repo.find_commit(head).unwrap();
repo.branch("branchy", &head, true).unwrap();
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
branch = "branchy"
[[bin]]
name = "foo"
"#, git_project.url()))
.file("src/foo.rs", &main_file(r#""{}", dep1::hello()"#, &["dep1"]));
let root = project.root();
let git_root = git_project.root();
assert_that(project.cargo_process("build"),
execs()
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[COMPILING] dep1 v0.5.0 ({}?branch=branchy#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
path2url(git_root.clone()),
path2url(git_root),
path2url(root))));
assert_that(&project.bin("foo"), existing_file());
assert_that(
process(&project.bin("foo")),
execs().with_stdout("hello world\n"));
}
#[test]
fn cargo_compile_git_dep_tag() {
let project = project("foo");
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep1"
"#)
.file("src/dep1.rs", r#"
pub fn hello() -> &'static str {
"hello world"
}
"#)
}).unwrap();
// Make a tag corresponding to the current HEAD
let repo = git2::Repository::open(&git_project.root()).unwrap();
let head = repo.head().unwrap().target().unwrap();
repo.tag("v0.1.0",
&repo.find_object(head, None).unwrap(),
&repo.signature().unwrap(),
"make a new tag",
false).unwrap();
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
tag = "v0.1.0"
[[bin]]
name = "foo"
"#, git_project.url()))
.file("src/foo.rs", &main_file(r#""{}", dep1::hello()"#, &["dep1"]));
let root = project.root();
let git_root = git_project.root();
assert_that(project.cargo_process("build"),
execs()
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[COMPILING] dep1 v0.5.0 ({}?tag=v0.1.0#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
path2url(git_root.clone()),
path2url(git_root),
path2url(root))));
assert_that(&project.bin("foo"), existing_file());
assert_that(process(&project.bin("foo")),
execs().with_stdout("hello world\n"));
assert_that(project.cargo("build"),
execs().with_status(0));
}
#[test]
fn cargo_compile_with_nested_paths() {
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep2]
version = "0.5.0"
path = "vendor/dep2"
[lib]
name = "dep1"
"#)
.file("src/dep1.rs", r#"
extern crate dep2;
pub fn hello() -> &'static str {
dep2::hello()
}
"#)
.file("vendor/dep2/Cargo.toml", r#"
[project]
name = "dep2"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep2"
"#)
.file("vendor/dep2/src/dep2.rs", r#"
pub fn hello() -> &'static str {
"hello world"
}
"#)
}).unwrap();
let p = project("parent")
.file("Cargo.toml", &format!(r#"
[project]
name = "parent"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
version = "0.5.0"
git = '{}'
[[bin]]
name = "parent"
"#, git_project.url()))
.file("src/parent.rs",
&main_file(r#""{}", dep1::hello()"#, &["dep1"]));
p.cargo_process("build")
.exec_with_output()
.unwrap();
assert_that(&p.bin("parent"), existing_file());
assert_that(process(&p.bin("parent")),
execs().with_stdout("hello world\n"));
}
#[test]
fn cargo_compile_with_meta_package() {
let git_project = git::new("meta-dep", |project| {
project
.file("dep1/Cargo.toml", r#"
[project]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep1"
"#)
.file("dep1/src/dep1.rs", r#"
pub fn hello() -> &'static str {
"this is dep1"
}
"#)
.file("dep2/Cargo.toml", r#"
[project]
name = "dep2"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "dep2"
"#)
.file("dep2/src/dep2.rs", r#"
pub fn hello() -> &'static str {
"this is dep2"
}
"#)
}).unwrap();
let p = project("parent")
.file("Cargo.toml", &format!(r#"
[project]
name = "parent"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
version = "0.5.0"
git = '{}'
[dependencies.dep2]
version = "0.5.0"
git = '{}'
[[bin]]
name = "parent"
"#, git_project.url(), git_project.url()))
.file("src/parent.rs",
&main_file(r#""{} {}", dep1::hello(), dep2::hello()"#, &["dep1", "dep2"]));
p.cargo_process("build")
.exec_with_output()
.unwrap();
assert_that(&p.bin("parent"), existing_file());
assert_that(process(&p.bin("parent")),
execs().with_stdout("this is dep1 this is dep2\n"));
}
#[test]
fn cargo_compile_with_short_ssh_git() {
let url = "[email protected]:a/dep";
let project = project("project")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep]
git = "{}"
[[bin]]
name = "foo"
"#, url))
.file("src/foo.rs", &main_file(r#""{}", dep1::hello()"#, &["dep1"]));
assert_that(project.cargo_process("build"),
execs()
.with_stdout("")
.with_stderr(&format!("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
invalid url `{}`: relative URL without a base
", url)));
}
#[test]
fn two_revs_same_deps() {
let bar = git::new("meta-dep", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
}).unwrap();
let repo = git2::Repository::open(&bar.root()).unwrap();
let rev1 = repo.revparse_single("HEAD").unwrap().id();
// Commit the changes and make sure we trigger a recompile
File::create(&bar.root().join("src/lib.rs")).unwrap().write_all(br#"
pub fn bar() -> i32 { 2 }
"#).unwrap();
git::add(&repo);
let rev2 = git::commit(&repo);
let foo = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.0.0"
authors = []
[dependencies.bar]
git = '{}'
rev = "{}"
[dependencies.baz]
path = "../baz"
"#, bar.url(), rev1))
.file("src/main.rs", r#"
extern crate bar;
extern crate baz;
fn main() {
assert_eq!(bar::bar(), 1);
assert_eq!(baz::baz(), 2);
}
"#);
let baz = project("baz")
.file("Cargo.toml", &format!(r#"
[package]
name = "baz"
version = "0.0.0"
authors = []
[dependencies.bar]
git = '{}'
rev = "{}"
"#, bar.url(), rev2))
.file("src/lib.rs", r#"
extern crate bar;
pub fn baz() -> i32 { bar::bar() }
"#);
baz.build();
assert_that(foo.cargo_process("build").arg("-v"),
execs().with_status(0));
assert_that(&foo.bin("foo"), existing_file());
assert_that(foo.process(&foo.bin("foo")), execs().with_status(0));
}
#[test]
fn recompilation() {
let git_project = git::new("bar", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "bar"
"#)
.file("src/bar.rs", r#"
pub fn bar() {}
"#)
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
version = "0.5.0"
git = '{}'
[[bin]]
name = "foo"
"#, git_project.url()))
.file("src/foo.rs",
&main_file(r#""{:?}", bar::bar()"#, &["bar"]));
// First time around we should compile both foo and bar
assert_that(p.cargo_process("build"),
execs().with_stderr(&format!("[UPDATING] git repository `{}`\n\
[COMPILING] bar v0.5.0 ({}#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
git_project.url(),
git_project.url(),
p.url())));
// Don't recompile the second time
assert_that(p.cargo("build"),
execs().with_stdout(""));
// Modify a file manually, shouldn't trigger a recompile
File::create(&git_project.root().join("src/bar.rs")).unwrap().write_all(br#"
pub fn bar() { println!("hello!"); }
"#).unwrap();
assert_that(p.cargo("build"),
execs().with_stdout(""));
assert_that(p.cargo("update"),
execs().with_stderr(&format!("[UPDATING] git repository `{}`",
git_project.url())));
assert_that(p.cargo("build"),
execs().with_stdout(""));
// Commit the changes and make sure we don't trigger a recompile because the
// lockfile says not to change
let repo = git2::Repository::open(&git_project.root()).unwrap();
git::add(&repo);
git::commit(&repo);
println!("compile after commit");
assert_that(p.cargo("build"),
execs().with_stdout(""));
p.root().move_into_the_past();
// Update the dependency and carry on!
assert_that(p.cargo("update"),
execs().with_stderr(&format!("[UPDATING] git repository `{}`\n\
[UPDATING] bar v0.5.0 ([..]) -> #[..]\n\
",
git_project.url())));
println!("going for the last compile");
assert_that(p.cargo("build"),
execs().with_stderr(&format!("[COMPILING] bar v0.5.0 ({}#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
git_project.url(),
p.url())));
// Make sure clean only cleans one dep
assert_that(p.cargo("clean")
.arg("-p").arg("foo"),
execs().with_stdout(""));
assert_that(p.cargo("build"),
execs().with_stderr(&format!("[COMPILING] foo v0.5.0 ({})\n",
p.url())));
}
#[test]
fn update_with_shared_deps() {
let git_project = git::new("bar", |project| {
project
.file("Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "bar"
"#)
.file("src/bar.rs", r#"
pub fn bar() {}
"#)
}).unwrap();
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
path = "dep1"
[dependencies.dep2]
path = "dep2"
"#)
.file("src/main.rs", r#"
extern crate dep1;
extern crate dep2;
fn main() {}
"#)
.file("dep1/Cargo.toml", &format!(r#"
[package]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
version = "0.5.0"
git = '{}'
"#, git_project.url()))
.file("dep1/src/lib.rs", "")
.file("dep2/Cargo.toml", &format!(r#"
[package]
name = "dep2"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
version = "0.5.0"
git = '{}'
"#, git_project.url()))
.file("dep2/src/lib.rs", "");
// First time around we should compile both foo and bar
assert_that(p.cargo_process("build"),
execs().with_stderr(&format!("\
[UPDATING] git repository `{git}`
[COMPILING] bar v0.5.0 ({git}#[..])
[COMPILING] [..] v0.5.0 ([..])
[COMPILING] [..] v0.5.0 ([..])
[COMPILING] foo v0.5.0 ({dir})\n", git = git_project.url(), dir = p.url())));
// Modify a file manually, and commit it
File::create(&git_project.root().join("src/bar.rs")).unwrap().write_all(br#"
pub fn bar() { println!("hello!"); }
"#).unwrap();
let repo = git2::Repository::open(&git_project.root()).unwrap();
let old_head = repo.head().unwrap().target().unwrap();
git::add(&repo);
git::commit(&repo);
sleep_ms(1000);
// By default, not transitive updates
println!("dep1 update");
assert_that(p.cargo("update")
.arg("-p").arg("dep1"),
execs().with_stdout(""));
// Don't do anything bad on a weird --precise argument
println!("bar bad precise update");
assert_that(p.cargo("update")
.arg("-p").arg("bar")
.arg("--precise").arg("0.1.2"),
execs().with_status(101).with_stderr("\
[UPDATING] git repository [..]
[ERROR] Unable to update [..]
To learn more, run the command again with --verbose.
"));
// Specifying a precise rev to the old rev shouldn't actually update
// anything because we already have the rev in the db.
println!("bar precise update");
assert_that(p.cargo("update")
.arg("-p").arg("bar")
.arg("--precise").arg(&old_head.to_string()),
execs().with_stdout(""));
// Updating aggressively should, however, update the repo.
println!("dep1 aggressive update");
assert_that(p.cargo("update")
.arg("-p").arg("dep1")
.arg("--aggressive"),
execs().with_stderr(&format!("[UPDATING] git repository `{}`\n\
[UPDATING] bar v0.5.0 ([..]) -> #[..]\n\
", git_project.url())));
// Make sure we still only compile one version of the git repo
println!("build");
assert_that(p.cargo("build"),
execs().with_stderr(&format!("\
[COMPILING] bar v0.5.0 ({git}#[..])
[COMPILING] [..] v0.5.0 ({dir}[..]dep[..])
[COMPILING] [..] v0.5.0 ({dir}[..]dep[..])
[COMPILING] foo v0.5.0 ({dir})\n",
git = git_project.url(), dir = p.url())));
// We should be able to update transitive deps
assert_that(p.cargo("update").arg("-p").arg("bar"),
execs().with_stderr(&format!("[UPDATING] git repository `{}`",
git_project.url())));
}
#[test]
fn dep_with_submodule() {
let project = project("foo");
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[package]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
"#)
}).unwrap();
let git_project2 = git::new("dep2", |project| {
project.file("lib.rs", "pub fn dep() {}")
}).unwrap();
let repo = git2::Repository::open(&git_project.root()).unwrap();
let url = path2url(git_project2.root()).to_string();
git::add_submodule(&repo, &url, Path::new("src"));
git::commit(&repo);
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
"#, git_project.url()))
.file("src/lib.rs", "
extern crate dep1;
pub fn foo() { dep1::dep() }
");
assert_that(project.cargo_process("build"),
execs().with_stderr("\
[UPDATING] git repository [..]
[COMPILING] dep1 [..]
[COMPILING] foo [..]").with_status(0));
}
#[test]
fn two_deps_only_update_one() {
let project = project("foo");
let git1 = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[package]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
}).unwrap();
let git2 = git::new("dep2", |project| {
project
.file("Cargo.toml", r#"
[package]
name = "dep2"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
}).unwrap();
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
[dependencies.dep2]
git = '{}'
"#, git1.url(), git2.url()))
.file("src/main.rs", "fn main() {}");
assert_that(project.cargo_process("build"),
execs()
.with_stderr(&format!("[UPDATING] git repository `[..]`\n\
[UPDATING] git repository `[..]`\n\
[COMPILING] [..] v0.5.0 ([..])\n\
[COMPILING] [..] v0.5.0 ([..])\n\
[COMPILING] foo v0.5.0 ({})\n",
project.url())));
File::create(&git1.root().join("src/lib.rs")).unwrap().write_all(br#"
pub fn foo() {}
"#).unwrap();
let repo = git2::Repository::open(&git1.root()).unwrap();
git::add(&repo);
git::commit(&repo);
assert_that(project.cargo("update")
.arg("-p").arg("dep1"),
execs()
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[UPDATING] dep1 v0.5.0 ([..]) -> #[..]\n\
", git1.url())));
}
#[test]
fn stale_cached_version() {
let bar = git::new("meta-dep", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
}).unwrap();
// Update the git database in the cache with the current state of the git
// repo
let foo = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.0.0"
authors = []
[dependencies.bar]
git = '{}'
"#, bar.url()))
.file("src/main.rs", r#"
extern crate bar;
fn main() { assert_eq!(bar::bar(), 1) }
"#);
assert_that(foo.cargo_process("build"), execs().with_status(0));
assert_that(foo.process(&foo.bin("foo")), execs().with_status(0));
// Update the repo, and simulate someone else updating the lockfile and then
// us pulling it down.
File::create(&bar.root().join("src/lib.rs")).unwrap().write_all(br#"
pub fn bar() -> i32 { 1 + 0 }
"#).unwrap();
let repo = git2::Repository::open(&bar.root()).unwrap();
git::add(&repo);
git::commit(&repo);
sleep_ms(1000);
let rev = repo.revparse_single("HEAD").unwrap().id();
File::create(&foo.root().join("Cargo.lock")).unwrap().write_all(format!(r#"
[root]
name = "foo"
version = "0.0.0"
dependencies = [
'bar 0.0.0 (git+{url}#{hash})'
]
[[package]]
name = "bar"
version = "0.0.0"
source = 'git+{url}#{hash}'
"#, url = bar.url(), hash = rev).as_bytes()).unwrap();
// Now build!
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[UPDATING] git repository `{bar}`
[COMPILING] bar v0.0.0 ({bar}#[..])
[COMPILING] foo v0.0.0 ({foo})
", bar = bar.url(), foo = foo.url())));
assert_that(foo.process(&foo.bin("foo")), execs().with_status(0));
}
#[test]
fn dep_with_changed_submodule() {
let project = project("foo");
let git_project = git::new("dep1", |project| {
project
.file("Cargo.toml", r#"
[package]
name = "dep1"
version = "0.5.0"
authors = ["[email protected]"]
"#)
}).unwrap();
let git_project2 = git::new("dep2", |project| {
project
.file("lib.rs", "pub fn dep() -> &'static str { \"project2\" }")
}).unwrap();
let git_project3 = git::new("dep3", |project| {
project
.file("lib.rs", "pub fn dep() -> &'static str { \"project3\" }")
}).unwrap();
let repo = git2::Repository::open(&git_project.root()).unwrap();
let mut sub = git::add_submodule(&repo, &git_project2.url().to_string(),
&Path::new("src"));
git::commit(&repo);
let project = project
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.dep1]
git = '{}'
"#, git_project.url()))
.file("src/main.rs", "
extern crate dep1;
pub fn main() { println!(\"{}\", dep1::dep()) }
");
println!("first run");
assert_that(project.cargo_process("run"), execs()
.with_stderr("[UPDATING] git repository `[..]`\n\
[COMPILING] dep1 v0.5.0 ([..])\n\
[COMPILING] foo v0.5.0 ([..])\n\
[RUNNING] `target[..]foo[..]`\n")
.with_stdout("project2\n")
.with_status(0));
File::create(&git_project.root().join(".gitmodules")).unwrap()
.write_all(format!("[submodule \"src\"]\n\tpath = src\n\turl={}",
git_project3.url()).as_bytes()).unwrap();
// Sync the submodule and reset it to the new remote.
sub.sync().unwrap();
{
let subrepo = sub.open().unwrap();
subrepo.remote_add_fetch("origin",
"refs/heads/*:refs/heads/*").unwrap();
subrepo.remote_set_url("origin",
&git_project3.url().to_string()).unwrap();
let mut origin = subrepo.find_remote("origin").unwrap();
origin.fetch(&[], None, None).unwrap();
let id = subrepo.refname_to_id("refs/remotes/origin/master").unwrap();
let obj = subrepo.find_object(id, None).unwrap();
subrepo.reset(&obj, git2::ResetType::Hard, None).unwrap();
}
sub.add_to_index(true).unwrap();
git::add(&repo);
git::commit(&repo);
sleep_ms(1000);
// Update the dependency and carry on!
println!("update");
assert_that(project.cargo("update").arg("-v"),
execs()
.with_stderr("")
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[UPDATING] dep1 v0.5.0 ([..]) -> #[..]\n\
", git_project.url())));
println!("last run");
assert_that(project.cargo("run"), execs()
.with_stderr("[COMPILING] dep1 v0.5.0 ([..])\n\
[COMPILING] foo v0.5.0 ([..])\n\
[RUNNING] `target[..]foo[..]`\n")
.with_stdout("project3\n")
.with_status(0));
}
#[test]
fn dev_deps_with_testing() {
let p2 = git::new("bar", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", r#"
pub fn gimme() -> &'static str { "zoidberg" }
"#)
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dev-dependencies.bar]
version = "0.5.0"
git = '{}'
"#, p2.url()))
.file("src/main.rs", r#"
fn main() {}
#[cfg(test)]
mod tests {
extern crate bar;
#[test] fn foo() { bar::gimme(); }
}
"#);
// Generate a lockfile which did not use `bar` to compile, but had to update
// `bar` to generate the lockfile
assert_that(p.cargo_process("build"),
execs().with_stderr(&format!("\
[UPDATING] git repository `{bar}`
[COMPILING] foo v0.5.0 ({url})
", url = p.url(), bar = p2.url())));
// Make sure we use the previous resolution of `bar` instead of updating it
// a second time.
assert_that(p.cargo("test"),
execs().with_stderr("\
[COMPILING] [..] v0.5.0 ([..])
[COMPILING] [..] v0.5.0 ([..]
[RUNNING] target[..]foo-[..]")
.with_stdout("
running 1 test
test tests::foo ... ok
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured
"));
}
#[test]
fn git_build_cmd_freshness() {
let foo = git::new("foo", |project| {
project.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
build = "build.rs"
"#)
.file("build.rs", "fn main() {}")
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
.file(".gitignore", "
src/bar.rs
")
}).unwrap();
foo.root().move_into_the_past();
sleep_ms(1000);
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[COMPILING] foo v0.0.0 ({url})
", url = foo.url())));
// Smoke test to make sure it doesn't compile again
println!("first pass");
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stdout(""));
// Modify an ignored file and make sure we don't rebuild
println!("second pass");
File::create(&foo.root().join("src/bar.rs")).unwrap();
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stdout(""));
}
#[test]
fn git_name_not_always_needed() {
let p2 = git::new("bar", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", r#"
pub fn gimme() -> &'static str { "zoidberg" }
"#)
}).unwrap();
let repo = git2::Repository::open(&p2.root()).unwrap();
let mut cfg = repo.config().unwrap();
let _ = cfg.remove("user.name");
let _ = cfg.remove("user.email");
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = []
[dev-dependencies.bar]
git = '{}'
"#, p2.url()))
.file("src/main.rs", "fn main() {}");
// Generate a lockfile which did not use `bar` to compile, but had to update
// `bar` to generate the lockfile
assert_that(p.cargo_process("build"),
execs().with_stderr(&format!("\
[UPDATING] git repository `{bar}`
[COMPILING] foo v0.5.0 ({url})
", url = p.url(), bar = p2.url())));
}
#[test]
fn git_repo_changing_no_rebuild() {
let bar = git::new("bar", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
}).unwrap();
// Lock p1 to the first rev in the git repo
let p1 = project("p1")
.file("Cargo.toml", &format!(r#"
[project]
name = "p1"
version = "0.5.0"
authors = []
build = 'build.rs'
[dependencies.bar]
git = '{}'
"#, bar.url()))
.file("src/main.rs", "fn main() {}")
.file("build.rs", "fn main() {}");
p1.build();
p1.root().move_into_the_past();
assert_that(p1.cargo("build"),
execs().with_stderr(&format!("\
[UPDATING] git repository `{bar}`
[COMPILING] [..]
[COMPILING] [..]
", bar = bar.url())));
// Make a commit to lock p2 to a different rev
File::create(&bar.root().join("src/lib.rs")).unwrap().write_all(br#"
pub fn bar() -> i32 { 2 }
"#).unwrap();
let repo = git2::Repository::open(&bar.root()).unwrap();
git::add(&repo);
git::commit(&repo);
// Lock p2 to the second rev
let p2 = project("p2")
.file("Cargo.toml", &format!(r#"
[project]
name = "p2"
version = "0.5.0"
authors = []
[dependencies.bar]
git = '{}'
"#, bar.url()))
.file("src/main.rs", "fn main() {}");
assert_that(p2.cargo_process("build"),
execs().with_stderr(&format!("\
[UPDATING] git repository `{bar}`
[COMPILING] [..]
[COMPILING] [..]
", bar = bar.url())));
// And now for the real test! Make sure that p1 doesn't get rebuilt
// even though the git repo has changed.
assert_that(p1.cargo("build"),
execs().with_stdout(""));
}
#[test]
fn git_dep_build_cmd() {
let p = git::new("foo", |project| {
project.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
version = "0.5.0"
path = "bar"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
build = "build.rs"
[lib]
name = "bar"
"#)
.file("bar/src/bar.rs.in", r#"
pub fn gimme() -> i32 { 0 }
"#)
.file("bar/build.rs", r#"
use std::fs;
fn main() {
fs::copy("src/bar.rs.in", "src/bar.rs").unwrap();
}
"#)
}).unwrap();
p.root().join("bar").move_into_the_past();
assert_that(p.cargo("build"),
execs().with_status(0));
assert_that(process(&p.bin("foo")),
execs().with_stdout("0\n"));
// Touching bar.rs.in should cause the `build` command to run again.
fs::File::create(&p.root().join("bar/src/bar.rs.in")).unwrap()
.write_all(b"pub fn gimme() -> i32 { 1 }").unwrap();
assert_that(p.cargo("build"),
execs().with_status(0));
assert_that(process(&p.bin("foo")),
execs().with_stdout("1\n"));
}
#[test]
fn fetch_downloads() {
let bar = git::new("bar", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
}).unwrap();
let p = project("p1")
.file("Cargo.toml", &format!(r#"
[project]
name = "p1"
version = "0.5.0"
authors = []
[dependencies.bar]
git = '{}'
"#, bar.url()))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("fetch"),
execs().with_status(0).with_stderr(&format!("\
[UPDATING] git repository `{url}`
", url = bar.url())));
assert_that(p.cargo("fetch"),
execs().with_status(0).with_stdout(""));
}
#[test]
fn warnings_in_git_dep() {
let bar = git::new("bar", |project| {
project.file("Cargo.toml", r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "fn unused() {}")
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = []
[dependencies.bar]
git = '{}'
"#, bar.url()))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("build"),
execs()
.with_stderr(&format!("[UPDATING] git repository `{}`\n\
[COMPILING] bar v0.5.0 ({}#[..])\n\
[COMPILING] foo v0.5.0 ({})\n",
bar.url(),
bar.url(),
p.url())));
}
#[test]
fn update_ambiguous() {
let foo1 = git::new("foo1", |project| {
project.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
}).unwrap();
let foo2 = git::new("foo2", |project| {
project.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.6.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
}).unwrap();
let bar = git::new("bar", |project| {
project.file("Cargo.toml", &format!(r#"
[package]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.foo]
git = '{}'
"#, foo2.url()))
.file("src/lib.rs", "")
}).unwrap();
let p = project("project")
.file("Cargo.toml", &format!(r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.foo]
git = '{}'
[dependencies.bar]
git = '{}'
"#, foo1.url(), bar.url()))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("generate-lockfile"), execs().with_status(0));
assert_that(p.cargo("update")
.arg("-p").arg("foo"),
execs().with_status(101)
.with_stderr("\
[ERROR] There are multiple `foo` packages in your project, and the specification `foo` \
is ambiguous.
Please re-run this command with `-p <spec>` where `<spec>` is one of the \
following:
foo:0.[..].0
foo:0.[..].0
"));
}
#[test]
fn update_one_dep_in_repo_with_many_deps() {
let foo = git::new("foo", |project| {
project.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
.file("a/Cargo.toml", r#"
[package]
name = "a"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("a/src/lib.rs", "")
}).unwrap();
let p = project("project")
.file("Cargo.toml", &format!(r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.foo]
git = '{}'
[dependencies.a]
git = '{}'
"#, foo.url(), foo.url()))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("generate-lockfile"), execs().with_status(0));
assert_that(p.cargo("update")
.arg("-p").arg("foo"),
execs().with_status(0)
.with_stderr(&format!("\
[UPDATING] git repository `{}`
", foo.url())));
}
#[test]
fn switch_deps_does_not_update_transitive() {
let transitive = git::new("transitive", |project| {
project.file("Cargo.toml", r#"
[package]
name = "transitive"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("src/lib.rs", "")
}).unwrap();
let dep1 = git::new("dep1", |project| {
project.file("Cargo.toml", &format!(r#"
[package]
name = "dep"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.transitive]
git = '{}'
"#, transitive.url()))
.file("src/lib.rs", "")
}).unwrap();
let dep2 = git::new("dep2", |project| {
project.file("Cargo.toml", &format!(r#"
[package]
name = "dep"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.transitive]
git = '{}'
"#, transitive.url()))
.file("src/lib.rs", "")
}).unwrap();
let p = project("project")
.file("Cargo.toml", &format!(r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.dep]
git = '{}'
"#, dep1.url()))
.file("src/main.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[UPDATING] git repository `{}`
[UPDATING] git repository `{}`
[COMPILING] transitive [..]
[COMPILING] dep [..]
[COMPILING] project [..]
", dep1.url(), transitive.url())));
// Update the dependency to point to the second repository, but this
// shouldn't update the transitive dependency which is the same.
File::create(&p.root().join("Cargo.toml")).unwrap().write_all(format!(r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.dep]
git = '{}'
"#, dep2.url()).as_bytes()).unwrap();
assert_that(p.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[UPDATING] git repository `{}`
[COMPILING] dep [..]
[COMPILING] project [..]
", dep2.url())));
}
#[test]
fn update_one_source_updates_all_packages_in_that_git_source() {
let dep = git::new("dep", |project| {
project.file("Cargo.toml", r#"
[package]
name = "dep"
version = "0.5.0"
authors = []
[dependencies.a]
path = "a"
"#)
.file("src/lib.rs", "")
.file("a/Cargo.toml", r#"
[package]
name = "a"
version = "0.5.0"
authors = []
"#)
.file("a/src/lib.rs", "")
}).unwrap();
let p = project("project")
.file("Cargo.toml", &format!(r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.dep]
git = '{}'
"#, dep.url()))
.file("src/main.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0));
let repo = git2::Repository::open(&dep.root()).unwrap();
let rev1 = repo.revparse_single("HEAD").unwrap().id();
// Just be sure to change a file
File::create(&dep.root().join("src/lib.rs")).unwrap().write_all(br#"
pub fn bar() -> i32 { 2 }
"#).unwrap();
git::add(&repo);
git::commit(&repo);
assert_that(p.cargo("update").arg("-p").arg("dep"),
execs().with_status(0));
let mut lockfile = String::new();
File::open(&p.root().join("Cargo.lock")).unwrap()
.read_to_string(&mut lockfile).unwrap();
assert!(!lockfile.contains(&rev1.to_string()),
"{} in {}", rev1, lockfile);
}
#[test]
fn switch_sources() {
let a1 = git::new("a1", |project| {
project.file("Cargo.toml", r#"
[package]
name = "a"
version = "0.5.0"
authors = []
"#)
.file("src/lib.rs", "")
}).unwrap();
let a2 = git::new("a2", |project| {
project.file("Cargo.toml", r#"
[package]
name = "a"
version = "0.5.1"
authors = []
"#)
.file("src/lib.rs", "")
}).unwrap();
let p = project("project")
.file("Cargo.toml", r#"
[project]
name = "project"
version = "0.5.0"
authors = []
[dependencies.b]
path = "b"
"#)
.file("src/main.rs", "fn main() {}")
.file("b/Cargo.toml", &format!(r#"
[project]
name = "b"
version = "0.5.0"
authors = []
[dependencies.a]
git = '{}'
"#, a1.url()))
.file("b/src/lib.rs", "pub fn main() {}");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0)
.with_stderr("\
[UPDATING] git repository `file://[..]a1`
[COMPILING] a v0.5.0 ([..]a1#[..]
[COMPILING] b v0.5.0 ([..])
[COMPILING] project v0.5.0 ([..])
"));
File::create(&p.root().join("b/Cargo.toml")).unwrap().write_all(format!(r#"
[project]
name = "b"
version = "0.5.0"
authors = []
[dependencies.a]
git = '{}'
"#, a2.url()).as_bytes()).unwrap();
assert_that(p.cargo("build"),
execs().with_status(0)
.with_stderr("\
[UPDATING] git repository `file://[..]a2`
[COMPILING] a v0.5.1 ([..]a2#[..]
[COMPILING] b v0.5.0 ([..])
[COMPILING] project v0.5.0 ([..])
"));
}
#[test]
fn dont_require_submodules_are_checked_out() {
let project = project("foo");
let git1 = git::new("dep1", |p| {
p.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = []
build = "build.rs"
"#)
.file("build.rs", "fn main() {}")
.file("src/lib.rs", "")
.file("a/foo", "")
}).unwrap();
let git2 = git::new("dep2", |p| p).unwrap();
let repo = git2::Repository::open(&git1.root()).unwrap();
let url = path2url(git2.root()).to_string();
git::add_submodule(&repo, &url, &Path::new("a/submodule"));
git::commit(&repo);
git2::Repository::init(&project.root()).unwrap();
let url = path2url(git1.root()).to_string();
let dst = paths::home().join("foo");
git2::Repository::clone(&url, &dst).unwrap();
assert_that(git1.cargo("build").arg("-v").cwd(&dst),
execs().with_status(0));
}
#[test]
fn doctest_same_name() {
let a2 = git::new("a2", |p| {
p.file("Cargo.toml", r#"
[project]
name = "a"
version = "0.5.0"
authors = []
"#)
.file("src/lib.rs", "pub fn a2() {}")
}).unwrap();
let a1 = git::new("a1", |p| {
p.file("Cargo.toml", &format!(r#"
[project]
name = "a"
version = "0.5.0"
authors = []
[dependencies]
a = {{ git = '{}' }}
"#, a2.url()))
.file("src/lib.rs", "extern crate a; pub fn a1() {}")
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
a = {{ git = '{}' }}
"#, a1.url()))
.file("src/lib.rs", r#"
#[macro_use]
extern crate a;
"#);
assert_that(p.cargo_process("test").arg("-v"),
execs().with_status(0));
}
#[test]
fn lints_are_suppressed() {
let a = git::new("a", |p| {
p.file("Cargo.toml", r#"
[project]
name = "a"
version = "0.5.0"
authors = []
"#)
.file("src/lib.rs", "
use std::option;
")
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
a = {{ git = '{}' }}
"#, a.url()))
.file("src/lib.rs", "");
assert_that(p.cargo_process("build"),
execs().with_status(0).with_stderr("\
[UPDATING] git repository `[..]`
[COMPILING] a v0.5.0 ([..])
[COMPILING] foo v0.0.1 ([..])
"));
}
#[test]
fn denied_lints_are_allowed() {
let enabled = RUSTC.with(|r| r.cap_lints);
if !enabled { return }
let a = git::new("a", |p| {
p.file("Cargo.toml", r#"
[project]
name = "a"
version = "0.5.0"
authors = []
"#)
.file("src/lib.rs", "
#![deny(warnings)]
use std::option;
")
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
a = {{ git = '{}' }}
"#, a.url()))
.file("src/lib.rs", "");
assert_that(p.cargo_process("build"),
execs().with_status(0).with_stderr("\
[UPDATING] git repository `[..]`
[COMPILING] a v0.5.0 ([..])
[COMPILING] foo v0.0.1 ([..])
"));
}
#[test]
fn add_a_git_dep() {
let git = git::new("git", |p| {
p.file("Cargo.toml", r#"
[project]
name = "git"
version = "0.5.0"
authors = []
"#)
.file("src/lib.rs", "")
}).unwrap();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies]
a = {{ path = 'a' }}
git = {{ git = '{}' }}
"#, git.url()))
.file("src/lib.rs", "")
.file("a/Cargo.toml", r#"
[package]
name = "a"
version = "0.0.1"
authors = []
"#)
.file("a/src/lib.rs", "");
assert_that(p.cargo_process("build"), execs().with_status(0));
File::create(p.root().join("a/Cargo.toml")).unwrap().write_all(format!(r#"
[package]
name = "a"
version = "0.0.1"
authors = []
[dependencies]
git = {{ git = '{}' }}
"#, git.url()).as_bytes()).unwrap();
assert_that(p.cargo("build"), execs().with_status(0));
}
| 28.808013 | 89 | 0.451708 |
e68b41ad188792eefe3e52fe0e861f3d06a4d7e5 | 5,169 | use rustc::mir;
use rustc_errors::struct_span_err;
use super::FunctionCx;
use super::LocalRef;
use super::OperandValue;
use crate::traits::BuilderMethods;
use crate::traits::*;
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
debug!("codegen_statement(statement={:?})", statement);
self.set_debug_loc(&mut bx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
if let Some(index) = place.as_local() {
match self.locals[index] {
LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
LocalRef::UnsizedPlace(cg_indirect_dest) => {
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
}
LocalRef::Operand(None) => {
let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand));
self.debug_introduce_local(&mut bx, index);
bx
}
LocalRef::Operand(Some(op)) => {
if !op.layout.is_zst() {
span_bug!(
statement.source_info.span,
"operand {:?} already assigned",
rvalue
);
}
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we codegen the operand
self.codegen_rvalue_operand(bx, rvalue).0
}
}
} else {
let cg_dest = self.codegen_place(&mut bx, place.as_ref());
self.codegen_rvalue(bx, cg_dest, rvalue)
}
}
mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
self.codegen_place(&mut bx, place.as_ref())
.codegen_set_discr(&mut bx, variant_index);
bx
}
mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_live(&mut bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
cg_indirect_place.storage_live(&mut bx);
}
bx
}
mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
cg_place.storage_dead(&mut bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
cg_indirect_place.storage_dead(&mut bx);
}
bx
}
mir::StatementKind::InlineAsm(ref asm) => {
let outputs = asm
.outputs
.iter()
.map(|output| self.codegen_place(&mut bx, output.as_ref()))
.collect();
let input_vals = asm.inputs.iter().fold(
Vec::with_capacity(asm.inputs.len()),
|mut acc, (span, input)| {
let op = self.codegen_operand(&mut bx, input);
if let OperandValue::Immediate(_) = op.val {
acc.push(op.immediate());
} else {
struct_span_err!(
bx.sess(),
span.to_owned(),
E0669,
"invalid value for constraint in inline assembly"
)
.emit();
}
acc
},
);
if input_vals.len() == asm.inputs.len() {
let res = bx.codegen_inline_asm(
&asm.asm,
outputs,
input_vals,
statement.source_info.span,
);
if !res {
struct_span_err!(
bx.sess(),
statement.source_info.span,
E0668,
"malformed inline assembly"
)
.emit();
}
}
bx
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
| mir::StatementKind::Nop => bx,
}
}
}
| 42.719008 | 94 | 0.418069 |
f83a2283de203d067c9c03528f38171070cda721 | 712 | // Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() -> miette::Result<()> {
let path = std::path::PathBuf::from("src");
let mut b = autocxx_build::Builder::new("src/main.rs", &[&path]).build()?;
b.flag_if_supported("-std=c++14")
.compile("autocxx-non-trivial-type-on-stack-example");
println!("cargo:rerun-if-changed=src/main.rs");
println!("cargo:rerun-if-changed=src/cpp.h");
Ok(())
}
| 39.555556 | 78 | 0.669944 |
dd4edf330700f143e1907808af572001f24c1576 | 1,697 | // Copyright 2018, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
// Author: Ryan A. Pavlik <[email protected]>
use crate::{unbuffer::UnbufferConstantSize, Buffer};
/// Trait for computing the buffer size needed for types
/// that can be "buffered" (serialized to a byte buffer),
pub trait BufferSize {
/// Indicates the number of bytes required in the buffer to store this.
fn buffer_size(&self) -> usize;
}
impl<T: ConstantBufferSize> BufferSize for T {
fn buffer_size(&self) -> usize {
T::constant_buffer_size()
}
}
pub trait WrappedConstantSize {
type WrappedType: Buffer + UnbufferConstantSize + ConstantBufferSize;
fn get(&self) -> Self::WrappedType;
fn new(v: Self::WrappedType) -> Self;
}
impl<T: WrappedConstantSize> ConstantBufferSize for T {
fn constant_buffer_size() -> usize {
T::WrappedType::constant_buffer_size()
}
}
/// Optional trait for things that always take the same amount of space in a buffer.
///
/// Implementing this trait gets you implementations of a bunch of buffer/unbuffer-related traits for free.
pub trait ConstantBufferSize {
/// Get the amount of space needed in a buffer.
fn constant_buffer_size() -> usize
where
Self: Sized,
{
std::mem::size_of::<Self>()
}
}
/// Trait implemented by empty messages (no body)
/// so that they can easily get their trivial/null serialization support.
pub trait EmptyMessage: Default + std::fmt::Debug {}
impl<T: EmptyMessage> WrappedConstantSize for T {
type WrappedType = ();
fn get(&self) -> Self::WrappedType {
()
}
fn new(_v: Self::WrappedType) -> Self {
Default::default()
}
}
| 29.258621 | 107 | 0.679434 |
265b2e7ef2bcac615bbb6043b68940907d5a16c3 | 1,275 | // Copyright 2020 Shift Crypto AG
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
use alloc::string::String;
pub use bitbox02_sys::{BTCCoin, BTCScriptConfig_SimpleType};
pub fn address_simple(
coin: BTCCoin,
script_type: BTCScriptConfig_SimpleType,
keypath: &[u32],
) -> Result<String, ()> {
let mut address = [0u8; 500];
match unsafe {
bitbox02_sys::app_btc_address_simple(
coin,
script_type,
keypath.as_ptr(),
keypath.len() as _,
address.as_mut_ptr(),
address.len() as _,
)
} {
true => Ok(crate::util::str_from_null_terminated(&address[..])
.unwrap()
.into()),
false => Err(()),
}
}
| 30.357143 | 75 | 0.640784 |
4b50580099e3a243fa20ff82efad56f6fb4a7ecb | 13,391 | /*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// encoding # as cursor anchor
// it points to a character that will be replaced/preceded, not succeeded
use crate::buffer_state::BufferState;
use crate::cursor_set::Cursor;
use crate::cursor_set::CursorSet;
use serde::de::Unexpected::Str;
use std::borrow::Borrow;
fn text_to_buffer_cursors(s: &str) -> (BufferState, CursorSet) {
let mut cursors: Vec<usize> = vec![];
let mut text = String::new();
dbg!(s);
for c in s.chars() {
if c == '#' {
cursors.push(text.len());
} else {
text.push(c);
}
}
dbg!(&cursors);
let cursors: Vec<Cursor> = cursors.iter().map(|a| (*a).into()).collect();
(BufferState::from_text(&text), CursorSet::new(cursors))
}
/// TODO add support for the selection, maybe preferred column
fn buffer_cursors_to_text<T: Borrow<BufferState>>(b: T, cs: &CursorSet) -> String {
let mut output = String::new();
let mut anchors: Vec<usize> = cs.set().iter().map(|c| c.a).collect();
anchors.sort();
let buffer = b.borrow().get_content().get_lines().to_string();
let mut prev: usize = 0;
for a in anchors {
if prev < buffer.len() {
output.push_str(&buffer[prev..std::cmp::min(a, buffer.len())]);
}
if output.chars().last() == Some('#') {
continue; // reducing
}
output.push_str("#");
prev = a;
}
if prev < buffer.len() {
output.push_str(&buffer[prev..]);
}
output
}
fn a_to_c(anchors: Vec<usize>) -> CursorSet {
CursorSet::new(anchors.iter().map(|a| (*a).into()).collect())
}
#[test]
fn get_buffer_test_1() {
let (bs, cs) = text_to_buffer_cursors("te#xt");
let text = bs.get_content().get_lines().to_string();
assert_eq!(text, "text".to_owned());
assert_eq!(cs.set().iter().map(|c| c.a).collect::<Vec<usize>>(), vec![2]);
}
#[test]
fn get_buffer_test_2() {
let (bs, cs) = text_to_buffer_cursors("#text#");
let text = bs.get_content().get_lines().to_string();
assert_eq!(text, "text".to_owned());
assert_eq!(cs.set().iter().map(|c| c.a).collect::<Vec<usize>>(), vec![0, 4]);
}
#[test]
fn buffer_cursors_to_text_1() {
let cursors = a_to_c(vec![2]);
let buffer = BufferState::from_text("text");
let output = buffer_cursors_to_text(&buffer, &cursors);
assert_eq!(output, "te#xt".to_owned());
}
#[test]
fn buffer_cursors_to_text_2() {
let cursors = a_to_c(vec![0, 4]);
let buffer = BufferState::from_text("text");
let output = buffer_cursors_to_text(&buffer, &cursors);
assert_eq!(output, "#text#".to_owned());
}
#[test]
fn buffer_cursors_to_text_3() {
let cursors = a_to_c(vec![1]);
let buffer = BufferState::from_text("text");
let output = buffer_cursors_to_text(&buffer, &cursors);
assert_eq!(output, "t#ext".to_owned());
}
#[test]
fn text_to_buffer_cursors_and_back() {
let text = concat!("t#here was a man\n",
"called paul\n",
"#who went to a fancy\n",
"dr#ess ball\n",
"he just went for fun\n",
"dressed up as bone\n",
"and dog ea#t h#im up in the# hall.\n"
);
let (buffer, cursors) = text_to_buffer_cursors(text);
let back = buffer_cursors_to_text(&buffer, &cursors);
assert_eq!(text, back);
}
fn apply(input: &str, f: fn(&mut CursorSet, &BufferState) -> ()) -> String {
let (bs, mut cs) = text_to_buffer_cursors(input);
f(&mut cs, &bs);
buffer_cursors_to_text(&bs, &cs)
}
#[test]
fn one_cursor_move_left() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, _| {
c.move_left();
};
assert_eq!(apply("text", f), "text");
assert_eq!(apply("te#xt", f), "t#ext");
assert_eq!(apply("t#ext", f), "#text");
assert_eq!(apply("#text", f), "#text");
assert_eq!(apply("text\n#", f), "text#\n");
}
#[test]
fn one_cursor_move_left_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, _| {
c.move_left_by(3);
};
assert_eq!(apply("text", f), "text");
assert_eq!(apply("te#xt", f), "#text");
assert_eq!(apply("t#ext", f), "#text");
assert_eq!(apply("#text", f), "#text");
assert_eq!(apply("text\n#", f), "te#xt\n");
}
#[test]
fn multiple_cursor_move_left() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, _| {
c.move_left();
c.reduce();
};
assert_eq!(apply("te#x#t", f), "t#e#xt");
assert_eq!(apply("#t#ext", f), "#text");
assert_eq!(apply("#text\n#", f), "#text#\n");
}
#[test]
fn multiple_cursor_move_left_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, _| {
c.move_left_by(3);
c.reduce();
};
assert_eq!(apply("te#x#t", f), "#text");
assert_eq!(apply("#t#ext", f), "#text");
assert_eq!(apply("#text\n#", f), "#te#xt\n");
}
#[test]
fn one_cursor_move_right() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_right(&bs);
c.reduce();
};
assert_eq!(apply("text", f), "text");
assert_eq!(apply("te#xt", f), "tex#t");
assert_eq!(apply("t#ext", f), "te#xt");
assert_eq!(apply("#text", f), "t#ext");
assert_eq!(apply("text\n#", f), "text\n#");
}
#[test]
fn one_cursor_move_right_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_right_by(bs, 3);
c.reduce();
};
assert_eq!(apply("text", f), "text");
assert_eq!(apply("te#xt", f), "text#");
assert_eq!(apply("t#ext", f), "text#");
assert_eq!(apply("#text", f), "tex#t");
assert_eq!(apply("text\n#", f), "text\n#");
}
#[test]
fn multiple_cursor_move_right() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_right(&bs);
c.reduce();
};
assert_eq!(apply("te#x#t", f), "tex#t#");
assert_eq!(apply("#t#ext", f), "t#e#xt");
assert_eq!(apply("#text\n#", f), "t#ext\n#");
assert_eq!(apply("text#\n#", f), "text\n#");
}
#[test]
fn multiple_cursor_move_right_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_right_by(&bs, 3);
c.reduce();
};
assert_eq!(apply("te#x#t", f), "text#");
assert_eq!(apply("#t#ext", f), "tex#t#");
assert_eq!(apply("#text\n#", f), "tex#t\n#");
assert_eq!(apply("text#\n#", f), "text\n#");
}
#[test]
fn single_cursor_move_down_by_1() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, 1);
c.reduce();
};
// noop
assert_eq!(apply("aaaa\nbbbb", f), "aaaa\nbbbb");
// moving down the line
assert_eq!(apply("a#aaa\nbbbb", f), "aaaa\nb#bbb");
assert_eq!(apply("aaaa#\nbbbb\ncccc", f), "aaaa\nbbbb#\ncccc");
assert_eq!(apply("aaaa#\nbbbb", f), "aaaa\nbbbb#");
assert_eq!(apply("aaaa\nbb#bb", f), "aaaa\nbbbb#");
// moving withing the line
assert_eq!(apply("te#x#t", f), "text#");
assert_eq!(apply("#t#ext", f), "text#");
assert_eq!(apply("#text\n#", f), "text\n#");
assert_eq!(apply("text#\n#", f), "text\n#");
// moving between lines varying in length
assert_eq!(apply("3#33\n22\n1", f), "333\n2#2\n1");
assert_eq!(apply("33#3\n22\n1", f), "333\n22#\n1");
assert_eq!(apply("333#\n22\n1", f), "333\n22#\n1");
assert_eq!(apply("333\n#22\n1", f), "333\n22\n#1");
assert_eq!(apply("333\n2#2\n1", f), "333\n22\n1#");
}
#[test]
fn single_cursor_move_down_by_2() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, 2);
c.reduce();
};
// moving down the line
assert_eq!(apply("aa#aa\nbbbb\ncccc", f), "aaaa\nbbbb\ncc#cc");
assert_eq!(apply("aaaa\nbbb#b\ncccc\ndddd", f), "aaaa\nbbbb\ncccc\nddd#d");
assert_eq!(apply("aaaa\nbbbb\nc#ccc\ndddd", f), "aaaa\nbbbb\ncccc\ndddd#");
assert_eq!(apply("aaaa\nbbbb\nc#ccc\ndddd\n", f), "aaaa\nbbbb\ncccc\ndddd\n#");
assert_eq!(apply("aa#a#a\nbbbb\ncccc\ndddd\n", f), "aaaa\nbbbb\ncc#c#c\ndddd\n");
assert_eq!(apply("aaaa\nb#b#b#b\ncccc\ndddd\n", f), "aaaa\nbbbb\ncccc\nd#d#d#d\n");
assert_eq!(apply("a#a#a#a\nbbbb\ncccc\ndddd\n", f), "aaaa\nbbbb\nc#c#c#c\ndddd\n");
// // moving withing the line
assert_eq!(apply("a#aaa\nbbbb", f), "aaaa\nbbbb#");
assert_eq!(apply("aaaa#\nbbbb", f), "aaaa\nbbbb#");
assert_eq!(apply("aaaa\nbb#bb", f), "aaaa\nbbbb#");
}
#[test]
fn single_cursor_move_down_by_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, 3);
c.reduce();
};
{
let text = concat!("t#here was a man\n",
"called paul\n",
"who went to a fancy\n",
"dress ball\n",
"he just went for fun\n",
"dressed up as bone\n",
"and dog eat him up in the hall.\n"
);
let new_text = concat!("there was a man\n",
"called paul\n",
"who went to a fancy\n",
"d#ress ball\n",
"he just went for fun\n",
"dressed up as bone\n",
"and dog eat him up in the hall.\n"
);
assert_eq!(apply(text, f), new_text);
}
{
let text = concat!(
"t#here was a ma#n\n",
"calle#d paul\n",
"who went to a fancy\n",
"dress ball\n",
"he just went for fun\n",
"dressed up as bone\n",
"and dog eat him up in the hall.\n"
);
let new_text = concat!(
"there was a man\n",
"called paul\n",
"who went to a fancy\n",
"d#ress ball#\n",
"he ju#st went for fun\n",
"dressed up as bone\n",
"and dog eat him up in the hall.\n"
);
assert_eq!(apply(text, f), new_text);
}
}
#[test]
fn single_cursor_move_up_by_1() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, -1);
c.reduce();
};
// noop
assert_eq!(apply("aaaa\nbbbb", f), "aaaa\nbbbb");
assert_eq!(apply("a#aaa\nbbbb", f), "#aaaa\nbbbb");
assert_eq!(apply("aaaa#\nbbbb", f), "#aaaa\nbbbb");
assert_eq!(apply("aaaa\nbbbb\ncccc#", f), "aaaa\nbbbb#\ncccc");
assert_eq!(apply("aaaa\nbb#bb", f), "aa#aa\nbbbb");
assert_eq!(apply("te#x#t", f), "#text");
assert_eq!(apply("#t#ext", f), "#text");
assert_eq!(apply("#text\n#", f), "#text\n");
assert_eq!(apply("text#\n#", f), "#text\n");
assert_eq!(apply("3#33\n22\n1", f), "#333\n22\n1");
assert_eq!(apply("333\n#22\n1", f), "#333\n22\n1");
assert_eq!(apply("333\n22#\n1", f), "33#3\n22\n1");
assert_eq!(apply("1\n22\n3#33", f), "1\n2#2\n333");
assert_eq!(apply("1\n22\n33#3", f), "1\n22#\n333");
assert_eq!(apply("1\n22\n333#", f), "1\n22#\n333");
}
#[test]
fn single_cursor_move_up_by_2() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, -2);
c.reduce();
};
assert_eq!(apply("aaaa\nbbbb\ncc#cc", f), "aa#aa\nbbbb\ncccc");
assert_eq!(apply("aaaa\nbbbb\ncccc\nddd#d", f), "aaaa\nbbb#b\ncccc\ndddd");
assert_eq!(apply("aaaa\nbb#bb\ncccc\ndddd", f), "#aaaa\nbbbb\ncccc\ndddd");
assert_eq!(apply("aaaa\nbbbb\ncccc\ndddd\n#", f), "aaaa\nbbbb\n#cccc\ndddd\n");
assert_eq!(apply("aaaa\nbbbb\ncc#c#c\ndddd\n", f), "aa#a#a\nbbbb\ncccc\ndddd\n");
assert_eq!(apply("aaaa\nbbbb\ncccc\nd#d#d#d\n", f), "aaaa\nb#b#b#b\ncccc\ndddd\n");
assert_eq!(apply("aaaa\nbbbb\nc#c#c#c\ndddd\n", f), "a#a#a#a\nbbbb\ncccc\ndddd\n");
assert_eq!(apply("aaaa\nbb#bb", f), "#aaaa\nbbbb");
assert_eq!(apply("aaaa\nbbbb#", f), "#aaaa\nbbbb");
assert_eq!(apply("aaaa\nbbb#b", f), "#aaaa\nbbbb");
}
#[test]
fn single_cursor_move_up_by_some() {
let f : fn(&mut CursorSet, &BufferState) = |c: &mut CursorSet, bs : &BufferState| {
c.move_vertically_by(&bs, -3);
c.reduce();
};
{
let text = concat!("t#here was a man\n",
"called paul\n",
"who went to a fancy\n",
"dress ball\n",
"he just went for fun\n",
"d#ressed up as bone\n",
"and dog eat him up in the hall.#\n"
);
let new_text = concat!("#there was a man\n",
"called paul\n",
"w#ho went to a fancy\n",
"dress ball#\n",
"he just went for fun\n",
"dressed up as bone\n",
"and dog eat him up in the hall.\n"
);
assert_eq!(apply(text, f), new_text);
}
} | 30.503417 | 87 | 0.5665 |
227a26c92ad4f7b73041e03ef44959979278c24c | 1,965 | use crate::{
environment::EnvironmentKind,
error::{mdbx_result, Result},
transaction::{txn_execute, TransactionKind},
Transaction,
};
use libc::c_uint;
use std::{ffi::CString, marker::PhantomData, ptr};
/// A handle to an individual database in an environment.
///
/// A database handle denotes the name and parameters of a database in an environment.
#[derive(Debug)]
pub struct Database<'txn> {
dbi: ffi::MDBX_dbi,
_marker: PhantomData<&'txn ()>,
}
impl<'txn> Database<'txn> {
/// Opens a new database handle in the given transaction.
///
/// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`,
/// or `RwTransaction::create_db`.
pub(crate) fn new<'env, K: TransactionKind, E: EnvironmentKind>(
txn: &'txn Transaction<'env, K, E>,
name: Option<&str>,
flags: c_uint,
) -> Result<Self> {
let c_name = name.map(|n| CString::new(n).unwrap());
let name_ptr = if let Some(c_name) = &c_name {
c_name.as_ptr()
} else {
ptr::null()
};
let mut dbi: ffi::MDBX_dbi = 0;
mdbx_result(txn_execute(&*txn.txn_mutex(), |txn| unsafe {
ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi)
}))?;
Ok(Self::new_from_ptr(dbi))
}
pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self {
Self {
dbi,
_marker: PhantomData,
}
}
pub(crate) fn freelist_db() -> Self {
Database {
dbi: 0,
_marker: PhantomData,
}
}
/// Returns the underlying MDBX database handle.
///
/// The caller **must** ensure that the handle is not used after the lifetime of the
/// environment, or after the database has been closed.
pub fn dbi(&self) -> ffi::MDBX_dbi {
self.dbi
}
}
unsafe impl<'txn> Send for Database<'txn> {}
unsafe impl<'txn> Sync for Database<'txn> {}
| 29.328358 | 97 | 0.588804 |
623dcc320bb8dcd83a06b860640f4fb3dbba7733 | 1,813 | // check-pass
//! Test with [Foo::baz], [Bar::foo], ...
//~^ WARNING `[Foo::baz]` cannot be resolved
//~| WARNING `[Bar::foo]` cannot be resolved
//! , [Uniooon::X] and [Qux::Z].
//~^ WARNING `[Uniooon::X]` cannot be resolved
//~| WARNING `[Qux::Z]` cannot be resolved
//!
//! , [Uniooon::X] and [Qux::Z].
//~^ WARNING `[Uniooon::X]` cannot be resolved
//~| WARNING `[Qux::Z]` cannot be resolved
/// [Qux:Y]
//~^ WARNING `[Qux:Y]` cannot be resolved
pub struct Foo {
pub bar: usize,
}
/// Foo
/// bar [BarA] bar //~ WARNING `[BarA]` cannot be resolved
/// baz
pub fn a() {}
/**
* Foo
* bar [BarB] bar //~ WARNING `[BarB]` cannot be resolved
* baz
*/
pub fn b() {}
/** Foo
bar [BarC] bar //~ WARNING `[BarC]` cannot be resolved
baz
let bar_c_1 = 0;
let bar_c_2 = 0;
let g = [bar_c_1];
let h = g[bar_c_2];
*/
pub fn c() {}
#[doc = "Foo\nbar [BarD] bar\nbaz"] //~ WARNING `[BarD]` cannot be resolved
pub fn d() {}
macro_rules! f {
($f:expr) => {
#[doc = $f] //~ WARNING `[BarF]` cannot be resolved
pub fn f() {}
}
}
f!("Foo\nbar [BarF] bar\nbaz");
/** # for example,
*
* time to introduce a link [error]*/ //~ WARNING `[error]` cannot be resolved
pub struct A;
/**
* # for example,
*
* time to introduce a link [error] //~ WARNING `[error]` cannot be resolved
*/
pub struct B;
#[doc = "single line [error]"] //~ WARNING `[error]` cannot be resolved
pub struct C;
#[doc = "single line with \"escaping\" [error]"] //~ WARNING `[error]` cannot be resolved
pub struct D;
/// Item docs. //~ WARNING `[error]` cannot be resolved
#[doc="Hello there!"]
/// [error]
pub struct E;
///
/// docs [error1] //~ WARNING `[error1]` cannot be resolved
/// docs [error2] //~ WARNING `[error2]` cannot be resolved
///
pub struct F;
| 21.329412 | 89 | 0.567016 |
f9aa0397257b8e63de1b1e5575723d8729598509 | 8,879 | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lint;
use rustc::ty::TyCtxt;
use errors::Applicability;
use syntax::ast;
use syntax_pos::Span;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::print::visibility_qualified;
use rustc::hir;
use rustc::util::nodemap::DefIdSet;
use rustc_data_structures::fx::FxHashMap;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut used_trait_imports = DefIdSet();
for &body_id in tcx.hir.krate().bodies.keys() {
let item_def_id = tcx.hir.body_owner_def_id(body_id);
let imports = tcx.used_trait_imports(item_def_id);
debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
used_trait_imports.extend(imports.iter());
}
let mut visitor = CheckVisitor { tcx, used_trait_imports };
tcx.hir.krate().visit_all_item_likes(&mut visitor);
unused_crates_lint(tcx);
}
impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &hir::Item) {
if item.vis.node.is_pub() || item.span.is_dummy() {
return;
}
if let hir::ItemKind::Use(ref path, _) = item.node {
self.check_import(item.id, path.span);
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
struct CheckVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
used_trait_imports: DefIdSet,
}
impl<'a, 'tcx> CheckVisitor<'a, 'tcx> {
fn check_import(&self, id: ast::NodeId, span: Span) {
let def_id = self.tcx.hir.local_def_id(id);
if !self.tcx.maybe_unused_trait_import(def_id) {
return;
}
let import_def_id = self.tcx.hir.local_def_id(id);
if self.used_trait_imports.contains(&import_def_id) {
return;
}
let msg = if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
format!("unused import: `{}`", snippet)
} else {
"unused import".to_owned()
};
self.tcx.lint_node(lint::builtin::UNUSED_IMPORTS, id, span, &msg);
}
}
fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) {
let lint = lint::builtin::UNUSED_EXTERN_CRATES;
// Collect first the crates that are completely unused. These we
// can always suggest removing (no matter which edition we are
// in).
let unused_extern_crates: FxHashMap<DefId, Span> =
tcx.maybe_unused_extern_crates(LOCAL_CRATE)
.iter()
.filter(|&&(def_id, _)| {
// The `def_id` here actually was calculated during resolution (at least
// at the time of this writing) and is being shipped to us via a side
// channel of the tcx. There may have been extra expansion phases,
// however, which ended up removing the `def_id` *after* expansion such
// as the `ReplaceBodyWithLoop` pass (which is a bit of a hack, but hey)
//
// As a result we need to verify that `def_id` is indeed still valid for
// our AST and actually present in the HIR map. If it's not there then
// there's safely nothing to warn about, and otherwise we carry on with
// our execution.
//
// Note that if we carry through to the `extern_mod_stmt_cnum` query
// below it'll cause a panic because `def_id` is actually bogus at this
// point in time otherwise.
if let Some(id) = tcx.hir.as_local_node_id(def_id) {
if tcx.hir.find(id).is_none() {
return false;
}
}
true
})
.filter(|&&(def_id, _)| {
let cnum = tcx.extern_mod_stmt_cnum(def_id).unwrap();
!tcx.is_compiler_builtins(cnum)
&& !tcx.is_panic_runtime(cnum)
&& !tcx.has_global_allocator(cnum)
&& !tcx.has_panic_handler(cnum)
})
.cloned()
.collect();
// Collect all the extern crates (in a reliable order).
let mut crates_to_lint = vec![];
tcx.hir.krate().visit_all_item_likes(&mut CollectExternCrateVisitor {
tcx,
crates_to_lint: &mut crates_to_lint,
});
for extern_crate in &crates_to_lint {
let id = tcx.hir.as_local_node_id(extern_crate.def_id).unwrap();
let item = tcx.hir.expect_item(id);
// If the crate is fully unused, we suggest removing it altogether.
// We do this in any edition.
if extern_crate.warn_if_unused {
if let Some(&span) = unused_extern_crates.get(&extern_crate.def_id) {
let msg = "unused extern crate";
// Removal suggestion span needs to include attributes (Issue #54400)
let span_with_attrs = tcx.get_attrs(extern_crate.def_id).iter()
.map(|attr| attr.span)
.fold(span, |acc, attr_span| acc.to(attr_span));
tcx.struct_span_lint_node(lint, id, span, msg)
.span_suggestion_short_with_applicability(
span_with_attrs,
"remove it",
String::new(),
Applicability::MachineApplicable)
.emit();
continue;
}
}
// If we are not in Rust 2018 edition, then we don't make any further
// suggestions.
if !tcx.sess.rust_2018() {
continue;
}
// If the extern crate isn't in the extern prelude,
// there is no way it can be written as an `use`.
let orig_name = extern_crate.orig_name.unwrap_or(item.name);
if !tcx.extern_prelude.contains(&orig_name) {
continue;
}
// If the extern crate has any attributes, they may have funky
// semantics we can't faithfully represent using `use` (most
// notably `#[macro_use]`). Ignore it.
if !tcx.get_attrs(extern_crate.def_id).is_empty() {
continue;
}
// Otherwise, we can convert it into a `use` of some kind.
let msg = "`extern crate` is not idiomatic in the new edition";
let help = format!(
"convert it to a `{}`",
visibility_qualified(&item.vis, "use")
);
let base_replacement = match extern_crate.orig_name {
Some(orig_name) => format!("use {} as {};", orig_name, item.name),
None => format!("use {};", item.name),
};
let replacement = visibility_qualified(&item.vis, &base_replacement);
tcx.struct_span_lint_node(lint, id, extern_crate.span, msg)
.span_suggestion_short_with_applicability(
extern_crate.span,
&help,
replacement,
Applicability::MachineApplicable,
)
.emit();
}
}
struct CollectExternCrateVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
crates_to_lint: &'a mut Vec<ExternCrateToLint>,
}
struct ExternCrateToLint {
/// def-id of the extern crate
def_id: DefId,
/// span from the item
span: Span,
/// if `Some`, then this is renamed (`extern crate orig_name as
/// crate_name`), and -- perhaps surprisingly -- this stores the
/// *original* name (`item.name` will contain the new name)
orig_name: Option<ast::Name>,
/// if `false`, the original name started with `_`, so we shouldn't lint
/// about it going unused (but we should still emit idiom lints).
warn_if_unused: bool,
}
impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CollectExternCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &hir::Item) {
if let hir::ItemKind::ExternCrate(orig_name) = item.node {
let extern_crate_def_id = self.tcx.hir.local_def_id(item.id);
self.crates_to_lint.push(
ExternCrateToLint {
def_id: extern_crate_def_id,
span: item.span,
orig_name,
warn_if_unused: !item.name.as_str().starts_with('_'),
}
);
}
}
fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) {
}
fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) {
}
}
| 36.539095 | 91 | 0.593535 |
26d8930b7b4edbf1c213b94df08a3a532678ea3e | 3,606 | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::sync::Arc;
use common_arrow::arrow::array::Array;
use common_arrow::arrow::array::ArrayRef;
use common_arrow::arrow::compute::cast;
use common_exception::ErrorCode;
use common_exception::Result;
use crate::prelude::*;
use crate::series::IntoSeries;
use crate::series::Series;
pub trait ArrayCast: Debug {
fn cast_with_type(&self, _data_type: &DataType) -> Result<Series> {
Err(ErrorCode::BadDataValueType(format!(
"Unsupported cast_with_type operation for {:?}",
self,
)))
}
}
fn cast_ca(ca: &dyn Array, data_type: &DataType) -> Result<Series> {
let arrow_type = data_type.to_arrow();
let arrow_type = get_physical_arrow_type(&arrow_type);
// we enable ignore_overflow by default
let array = cast::wrapping_cast(ca, arrow_type)?;
let array: ArrayRef = Arc::from(array);
Ok(array.into_series())
}
impl<T> ArrayCast for DFPrimitiveArray<T>
where T: DFPrimitiveType
{
fn cast_with_type(&self, data_type: &DataType) -> Result<Series> {
cast_ca(&self.array, data_type)
}
}
impl ArrayCast for DFStringArray {
fn cast_with_type(&self, data_type: &DataType) -> Result<Series> {
cast_ca(&self.array, data_type)
}
}
impl ArrayCast for DFBooleanArray {
fn cast_with_type(&self, data_type: &DataType) -> Result<Series> {
cast_ca(&self.array, data_type)
}
}
impl ArrayCast for DFNullArray {
fn cast_with_type(&self, data_type: &DataType) -> Result<Series> {
match data_type {
DataType::Null => Ok(self.clone().into_series()),
DataType::Boolean => Ok(DFBooleanArray::full_null(self.len()).into_series()),
DataType::UInt8 => Ok(DFUInt8Array::full_null(self.len()).into_series()),
DataType::UInt16 => Ok(DFUInt16Array::full_null(self.len()).into_series()),
DataType::UInt32 => Ok(DFUInt32Array::full_null(self.len()).into_series()),
DataType::UInt64 => Ok(DFUInt64Array::full_null(self.len()).into_series()),
DataType::Int8 => Ok(DFInt8Array::full_null(self.len()).into_series()),
DataType::Int16 => Ok(DFInt16Array::full_null(self.len()).into_series()),
DataType::Int32 => Ok(DFInt32Array::full_null(self.len()).into_series()),
DataType::Int64 => Ok(DFInt64Array::full_null(self.len()).into_series()),
DataType::Float32 => Ok(DFFloat32Array::full_null(self.len()).into_series()),
DataType::Float64 => Ok(DFFloat64Array::full_null(self.len()).into_series()),
DataType::String => Ok(DFStringArray::full_null(self.len()).into_series()),
DataType::List(_) => Ok(DFListArray::full_null(self.len()).into_series()),
_ => Err(ErrorCode::BadDataValueType(format!(
"Unsupported cast_with_type from array: {:?} into data_type: {:?}",
self, data_type,
))),
}
}
}
impl ArrayCast for DFListArray {}
impl ArrayCast for DFStructArray {}
| 38.361702 | 89 | 0.662784 |
4a5ac0c742352a7fb8d66173ac88e9593e62c5e6 | 780 | use std::mem;
use std::slice;
use alga::general::Real;
use na::{Point2, Point3, Vector2, Vector3};
/// Trait that transforms thing to a slice of u8.
pub trait AsBytes {
fn as_bytes<'a>(&'a self) -> &'a [u8];
}
macro_rules! generic_as_bytes_impl(
($t: ident, $dimension: expr) => (
impl<N: Real> AsBytes for $t<N> {
#[inline(always)]
fn as_bytes<'a>(&'a self) -> &'a [u8] {
unsafe {
slice::from_raw_parts(mem::transmute(self), mem::size_of::<N>() * $dimension)
}
}
}
)
);
generic_as_bytes_impl!(Vector2, 2);
generic_as_bytes_impl!(Point2, 2);
generic_as_bytes_impl!(Vector3, 2);
generic_as_bytes_impl!(Point3, 2);
// FIXME: implement for all `T: Copy` insead?
| 25.16129 | 97 | 0.574359 |
e95271c1c56dcbf791ffffe25e60c01c645acbc9 | 3,495 | use crate::{Config, Result};
use log::{info, warn};
use std::{path::Path, time::Instant};
use winit::{
dpi::PhysicalSize,
event::{Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use crate::input::InputState;
use crate::objects::{Cube, Object};
use crate::renderer::Renderer;
pub struct App {
window: Window,
event_loop: EventLoop<()>,
input_state: InputState,
renderer: Renderer,
cube: Cube,
}
impl App {
pub async fn new(config: Config) -> Result<App> {
let input_state = InputState::new();
let init_start = Instant::now();
let (window, event_loop) = {
let width = config.window.width;
let height = config.window.height;
let title = config.application.name;
let event_loop = EventLoop::new();
let size: PhysicalSize<u32> = PhysicalSize::from((width, height));
let window = WindowBuilder::new()
.with_inner_size(size)
.with_title(title)
.build(&event_loop)?;
(window, event_loop)
};
info!("Window and Event Loop Created");
let mut renderer = Renderer::new(&window, config.window.bg_color).await;
renderer.init_clear_screen();
let cube = Cube::new(&renderer)?;
warn!(
"Initialization time: {:#?} sec",
Instant::now().duration_since(init_start).as_secs_f32()
);
Ok(App {
window,
event_loop,
input_state,
renderer,
// objects,
cube,
})
}
pub fn run(self) {
info!("Event Loop Starting");
let mut input_state = self.input_state;
let window = self.window;
let mut renderer = self.renderer;
// let objects = self.objects;
let cube = self.cube;
self.event_loop.run(move |event, _, control_flow| {
match event {
Event::MainEventsCleared => {
if input_state.is_key_pressed(VirtualKeyCode::Escape) {
info!("Escape Key Pressed.");
*control_flow = ControlFlow::Exit;
}
window.request_redraw();
}
Event::RedrawRequested(_) => {
renderer.render(&cube);
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => *control_flow = ControlFlow::Exit,
Event::WindowEvent {
event: WindowEvent::Resized(physical_size),
..
} => renderer.resize(physical_size),
Event::WindowEvent {
event: WindowEvent::ScaleFactorChanged { new_inner_size, .. },
..
} => renderer.resize(*new_inner_size),
Event::LoopDestroyed => {
info!("Loop Destroyed");
}
Event::DeviceEvent { event, .. } => {
input_state.update(&event);
}
// ControlFlow::Poll continuously runs the event loop, even if the OS hasn't
// dispatched any events. This is ideal for games and similar applications.
_ => *control_flow = ControlFlow::Poll,
}
});
}
}
| 31.772727 | 92 | 0.509013 |
7216f59c4cf42da413eb1555185c8e453df502aa | 31,423 |
::bobbin_mcu::periph!( NVIC, Nvic, NVIC_PERIPH, NvicPeriph, NVIC_OWNED, NVIC_REF_COUNT, 0xe000e000, 0x00, 0x00);
#[doc="Nested Vectored Interrupt Controller"]
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct NvicPeriph(pub usize);
impl NvicPeriph {
#[doc="Get the ISER Register."]
#[inline] pub fn iser_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Iser, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Iser, 0x100, 0x4)
}
#[doc="Get the *mut pointer for the ISER register."]
#[inline] pub fn iser_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Iser {
self.iser_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the ISER register."]
#[inline] pub fn iser_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Iser {
self.iser_reg().ptr(index.into())
}
#[doc="Read the ISER register."]
#[inline] pub fn iser<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Iser {
self.iser_reg().read(index.into())
}
#[doc="Write the ISER register."]
#[inline] pub fn write_iser<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Iser) -> &Self {
self.iser_reg().write(index.into(), value);
self
}
#[doc="Set the ISER register."]
#[inline] pub fn set_iser<I: Into<::bobbin_bits::R8>, F: FnOnce(Iser) -> Iser>(&self, index: I, f: F) -> &Self {
self.iser_reg().set(index.into(), f);
self
}
#[doc="Modify the ISER register."]
#[inline] pub fn with_iser<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Iser) -> Iser>(&self, index: I, f: F) -> &Self {
self.iser_reg().with(index.into(), f);
self
}
#[doc="Get the ICER Register."]
#[inline] pub fn icer_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Icer, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Icer, 0x180, 0x4)
}
#[doc="Get the *mut pointer for the ICER register."]
#[inline] pub fn icer_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Icer {
self.icer_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the ICER register."]
#[inline] pub fn icer_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Icer {
self.icer_reg().ptr(index.into())
}
#[doc="Read the ICER register."]
#[inline] pub fn icer<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Icer {
self.icer_reg().read(index.into())
}
#[doc="Write the ICER register."]
#[inline] pub fn write_icer<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Icer) -> &Self {
self.icer_reg().write(index.into(), value);
self
}
#[doc="Set the ICER register."]
#[inline] pub fn set_icer<I: Into<::bobbin_bits::R8>, F: FnOnce(Icer) -> Icer>(&self, index: I, f: F) -> &Self {
self.icer_reg().set(index.into(), f);
self
}
#[doc="Modify the ICER register."]
#[inline] pub fn with_icer<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Icer) -> Icer>(&self, index: I, f: F) -> &Self {
self.icer_reg().with(index.into(), f);
self
}
#[doc="Get the ISPR Register."]
#[inline] pub fn ispr_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Ispr, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Ispr, 0x200, 0x4)
}
#[doc="Get the *mut pointer for the ISPR register."]
#[inline] pub fn ispr_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Ispr {
self.ispr_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the ISPR register."]
#[inline] pub fn ispr_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Ispr {
self.ispr_reg().ptr(index.into())
}
#[doc="Read the ISPR register."]
#[inline] pub fn ispr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Ispr {
self.ispr_reg().read(index.into())
}
#[doc="Write the ISPR register."]
#[inline] pub fn write_ispr<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Ispr) -> &Self {
self.ispr_reg().write(index.into(), value);
self
}
#[doc="Set the ISPR register."]
#[inline] pub fn set_ispr<I: Into<::bobbin_bits::R8>, F: FnOnce(Ispr) -> Ispr>(&self, index: I, f: F) -> &Self {
self.ispr_reg().set(index.into(), f);
self
}
#[doc="Modify the ISPR register."]
#[inline] pub fn with_ispr<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Ispr) -> Ispr>(&self, index: I, f: F) -> &Self {
self.ispr_reg().with(index.into(), f);
self
}
#[doc="Get the ICPR Register."]
#[inline] pub fn icpr_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Icpr, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Icpr, 0x280, 0x4)
}
#[doc="Get the *mut pointer for the ICPR register."]
#[inline] pub fn icpr_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Icpr {
self.icpr_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the ICPR register."]
#[inline] pub fn icpr_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Icpr {
self.icpr_reg().ptr(index.into())
}
#[doc="Read the ICPR register."]
#[inline] pub fn icpr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Icpr {
self.icpr_reg().read(index.into())
}
#[doc="Write the ICPR register."]
#[inline] pub fn write_icpr<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Icpr) -> &Self {
self.icpr_reg().write(index.into(), value);
self
}
#[doc="Set the ICPR register."]
#[inline] pub fn set_icpr<I: Into<::bobbin_bits::R8>, F: FnOnce(Icpr) -> Icpr>(&self, index: I, f: F) -> &Self {
self.icpr_reg().set(index.into(), f);
self
}
#[doc="Modify the ICPR register."]
#[inline] pub fn with_icpr<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Icpr) -> Icpr>(&self, index: I, f: F) -> &Self {
self.icpr_reg().with(index.into(), f);
self
}
#[doc="Get the IABR Register."]
#[inline] pub fn iabr_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Iabr, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Iabr, 0x280, 0x4)
}
#[doc="Get the *mut pointer for the IABR register."]
#[inline] pub fn iabr_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Iabr {
self.iabr_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the IABR register."]
#[inline] pub fn iabr_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Iabr {
self.iabr_reg().ptr(index.into())
}
#[doc="Read the IABR register."]
#[inline] pub fn iabr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Iabr {
self.iabr_reg().read(index.into())
}
#[doc="Write the IABR register."]
#[inline] pub fn write_iabr<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Iabr) -> &Self {
self.iabr_reg().write(index.into(), value);
self
}
#[doc="Set the IABR register."]
#[inline] pub fn set_iabr<I: Into<::bobbin_bits::R8>, F: FnOnce(Iabr) -> Iabr>(&self, index: I, f: F) -> &Self {
self.iabr_reg().set(index.into(), f);
self
}
#[doc="Modify the IABR register."]
#[inline] pub fn with_iabr<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Iabr) -> Iabr>(&self, index: I, f: F) -> &Self {
self.iabr_reg().with(index.into(), f);
self
}
#[doc="Get the IPR Register."]
#[inline] pub fn ipr_reg(&self) -> ::bobbin_mcu::register::RegisterArray<Ipr, ::bobbin_bits::R8> {
::bobbin_mcu::register::RegisterArray::new(self.0 as *mut Ipr, 0x400, 0x4)
}
#[doc="Get the *mut pointer for the IPR register."]
#[inline] pub fn ipr_mut<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *mut Ipr {
self.ipr_reg().ptr(index.into())
}
#[doc="Get the *const pointer for the IPR register."]
#[inline] pub fn ipr_ptr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> *const Ipr {
self.ipr_reg().ptr(index.into())
}
#[doc="Read the IPR register."]
#[inline] pub fn ipr<I: Into<::bobbin_bits::R8>>(&self, index: I) -> Ipr {
self.ipr_reg().read(index.into())
}
#[doc="Write the IPR register."]
#[inline] pub fn write_ipr<I: Into<::bobbin_bits::R8>>(&self, index: I, value: Ipr) -> &Self {
self.ipr_reg().write(index.into(), value);
self
}
#[doc="Set the IPR register."]
#[inline] pub fn set_ipr<I: Into<::bobbin_bits::R8>, F: FnOnce(Ipr) -> Ipr>(&self, index: I, f: F) -> &Self {
self.ipr_reg().set(index.into(), f);
self
}
#[doc="Modify the IPR register."]
#[inline] pub fn with_ipr<I: Into<::bobbin_bits::R8> + Copy, F: FnOnce(Ipr) -> Ipr>(&self, index: I, f: F) -> &Self {
self.ipr_reg().with(index.into(), f);
self
}
#[doc="Get the STIR Register."]
#[inline] pub fn stir_reg(&self) -> ::bobbin_mcu::register::Register<Stir> {
::bobbin_mcu::register::Register::new(self.0 as *mut Stir, 0xf00)
}
#[doc="Get the *mut pointer for the STIR register."]
#[inline] pub fn stir_mut(&self) -> *mut Stir {
self.stir_reg().ptr()
}
#[doc="Get the *const pointer for the STIR register."]
#[inline] pub fn stir_ptr(&self) -> *const Stir {
self.stir_reg().ptr()
}
#[doc="Read the STIR register."]
#[inline] pub fn stir(&self) -> Stir {
self.stir_reg().read()
}
#[doc="Write the STIR register."]
#[inline] pub fn write_stir(&self, value: Stir) -> &Self {
self.stir_reg().write(value);
self
}
#[doc="Set the STIR register."]
#[inline] pub fn set_stir<F: FnOnce(Stir) -> Stir>(&self, f: F) -> &Self {
self.stir_reg().set(f);
self
}
#[doc="Modify the STIR register."]
#[inline] pub fn with_stir<F: FnOnce(Stir) -> Stir>(&self, f: F) -> &Self {
self.stir_reg().with(f);
self
}
}
#[doc="Interrupt Set-Enable Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Iser(pub u32);
impl Iser {
#[doc="Interrupt set-enable bits"]
#[inline] pub fn setena<I: Into<::bobbin_bits::R32>>(&self, index: I) -> ::bobbin_bits::U1 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + index;
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0x1) as u8) } // [0]
}
#[doc="Returns true if SETENA != 0"]
#[inline] pub fn test_setena<I: Into<::bobbin_bits::R32>>(&self, index: I) -> bool{
self.setena(index) != 0
}
#[doc="Sets the SETENA field."]
#[inline] pub fn set_setena<I: Into<::bobbin_bits::R32>, V: Into<::bobbin_bits::U1>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U1 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + index;
self.0 &= !(0x1 << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Iser {
#[inline]
fn from(other: u32) -> Self {
Iser(other)
}
}
impl ::core::fmt::Display for Iser {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Iser {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.setena(0) != 0 { try!(write!(f, " setena[0]"))}
if self.setena(1) != 0 { try!(write!(f, " setena[1]"))}
if self.setena(2) != 0 { try!(write!(f, " setena[2]"))}
if self.setena(3) != 0 { try!(write!(f, " setena[3]"))}
if self.setena(4) != 0 { try!(write!(f, " setena[4]"))}
if self.setena(5) != 0 { try!(write!(f, " setena[5]"))}
if self.setena(6) != 0 { try!(write!(f, " setena[6]"))}
if self.setena(7) != 0 { try!(write!(f, " setena[7]"))}
if self.setena(8) != 0 { try!(write!(f, " setena[8]"))}
if self.setena(9) != 0 { try!(write!(f, " setena[9]"))}
if self.setena(10) != 0 { try!(write!(f, " setena[10]"))}
if self.setena(11) != 0 { try!(write!(f, " setena[11]"))}
if self.setena(12) != 0 { try!(write!(f, " setena[12]"))}
if self.setena(13) != 0 { try!(write!(f, " setena[13]"))}
if self.setena(14) != 0 { try!(write!(f, " setena[14]"))}
if self.setena(15) != 0 { try!(write!(f, " setena[15]"))}
if self.setena(16) != 0 { try!(write!(f, " setena[16]"))}
if self.setena(17) != 0 { try!(write!(f, " setena[17]"))}
if self.setena(18) != 0 { try!(write!(f, " setena[18]"))}
if self.setena(19) != 0 { try!(write!(f, " setena[19]"))}
if self.setena(20) != 0 { try!(write!(f, " setena[20]"))}
if self.setena(21) != 0 { try!(write!(f, " setena[21]"))}
if self.setena(22) != 0 { try!(write!(f, " setena[22]"))}
if self.setena(23) != 0 { try!(write!(f, " setena[23]"))}
if self.setena(24) != 0 { try!(write!(f, " setena[24]"))}
if self.setena(25) != 0 { try!(write!(f, " setena[25]"))}
if self.setena(26) != 0 { try!(write!(f, " setena[26]"))}
if self.setena(27) != 0 { try!(write!(f, " setena[27]"))}
if self.setena(28) != 0 { try!(write!(f, " setena[28]"))}
if self.setena(29) != 0 { try!(write!(f, " setena[29]"))}
if self.setena(30) != 0 { try!(write!(f, " setena[30]"))}
if self.setena(31) != 0 { try!(write!(f, " setena[31]"))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Interrupt Clear-Enable Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Icer(pub u32);
impl Icer {
#[doc="Interrupt clear-enable bits"]
#[inline] pub fn clrena<I: Into<::bobbin_bits::R32>>(&self, index: I) -> ::bobbin_bits::U1 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + index;
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0x1) as u8) } // [0]
}
#[doc="Returns true if CLRENA != 0"]
#[inline] pub fn test_clrena<I: Into<::bobbin_bits::R32>>(&self, index: I) -> bool{
self.clrena(index) != 0
}
#[doc="Sets the CLRENA field."]
#[inline] pub fn set_clrena<I: Into<::bobbin_bits::R32>, V: Into<::bobbin_bits::U1>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U1 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + index;
self.0 &= !(0x1 << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Icer {
#[inline]
fn from(other: u32) -> Self {
Icer(other)
}
}
impl ::core::fmt::Display for Icer {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Icer {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.clrena(0) != 0 { try!(write!(f, " clrena[0]"))}
if self.clrena(1) != 0 { try!(write!(f, " clrena[1]"))}
if self.clrena(2) != 0 { try!(write!(f, " clrena[2]"))}
if self.clrena(3) != 0 { try!(write!(f, " clrena[3]"))}
if self.clrena(4) != 0 { try!(write!(f, " clrena[4]"))}
if self.clrena(5) != 0 { try!(write!(f, " clrena[5]"))}
if self.clrena(6) != 0 { try!(write!(f, " clrena[6]"))}
if self.clrena(7) != 0 { try!(write!(f, " clrena[7]"))}
if self.clrena(8) != 0 { try!(write!(f, " clrena[8]"))}
if self.clrena(9) != 0 { try!(write!(f, " clrena[9]"))}
if self.clrena(10) != 0 { try!(write!(f, " clrena[10]"))}
if self.clrena(11) != 0 { try!(write!(f, " clrena[11]"))}
if self.clrena(12) != 0 { try!(write!(f, " clrena[12]"))}
if self.clrena(13) != 0 { try!(write!(f, " clrena[13]"))}
if self.clrena(14) != 0 { try!(write!(f, " clrena[14]"))}
if self.clrena(15) != 0 { try!(write!(f, " clrena[15]"))}
if self.clrena(16) != 0 { try!(write!(f, " clrena[16]"))}
if self.clrena(17) != 0 { try!(write!(f, " clrena[17]"))}
if self.clrena(18) != 0 { try!(write!(f, " clrena[18]"))}
if self.clrena(19) != 0 { try!(write!(f, " clrena[19]"))}
if self.clrena(20) != 0 { try!(write!(f, " clrena[20]"))}
if self.clrena(21) != 0 { try!(write!(f, " clrena[21]"))}
if self.clrena(22) != 0 { try!(write!(f, " clrena[22]"))}
if self.clrena(23) != 0 { try!(write!(f, " clrena[23]"))}
if self.clrena(24) != 0 { try!(write!(f, " clrena[24]"))}
if self.clrena(25) != 0 { try!(write!(f, " clrena[25]"))}
if self.clrena(26) != 0 { try!(write!(f, " clrena[26]"))}
if self.clrena(27) != 0 { try!(write!(f, " clrena[27]"))}
if self.clrena(28) != 0 { try!(write!(f, " clrena[28]"))}
if self.clrena(29) != 0 { try!(write!(f, " clrena[29]"))}
if self.clrena(30) != 0 { try!(write!(f, " clrena[30]"))}
if self.clrena(31) != 0 { try!(write!(f, " clrena[31]"))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Interrupt Set-Pending Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Ispr(pub u32);
impl Ispr {
#[doc="Interrupt set-pending bits"]
#[inline] pub fn setpend<I: Into<::bobbin_bits::R32>>(&self, index: I) -> ::bobbin_bits::U1 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + index;
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0x1) as u8) } // [0]
}
#[doc="Returns true if SETPEND != 0"]
#[inline] pub fn test_setpend<I: Into<::bobbin_bits::R32>>(&self, index: I) -> bool{
self.setpend(index) != 0
}
#[doc="Sets the SETPEND field."]
#[inline] pub fn set_setpend<I: Into<::bobbin_bits::R32>, V: Into<::bobbin_bits::U1>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U1 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + index;
self.0 &= !(0x1 << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Ispr {
#[inline]
fn from(other: u32) -> Self {
Ispr(other)
}
}
impl ::core::fmt::Display for Ispr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Ispr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.setpend(0) != 0 { try!(write!(f, " setpend[0]"))}
if self.setpend(1) != 0 { try!(write!(f, " setpend[1]"))}
if self.setpend(2) != 0 { try!(write!(f, " setpend[2]"))}
if self.setpend(3) != 0 { try!(write!(f, " setpend[3]"))}
if self.setpend(4) != 0 { try!(write!(f, " setpend[4]"))}
if self.setpend(5) != 0 { try!(write!(f, " setpend[5]"))}
if self.setpend(6) != 0 { try!(write!(f, " setpend[6]"))}
if self.setpend(7) != 0 { try!(write!(f, " setpend[7]"))}
if self.setpend(8) != 0 { try!(write!(f, " setpend[8]"))}
if self.setpend(9) != 0 { try!(write!(f, " setpend[9]"))}
if self.setpend(10) != 0 { try!(write!(f, " setpend[10]"))}
if self.setpend(11) != 0 { try!(write!(f, " setpend[11]"))}
if self.setpend(12) != 0 { try!(write!(f, " setpend[12]"))}
if self.setpend(13) != 0 { try!(write!(f, " setpend[13]"))}
if self.setpend(14) != 0 { try!(write!(f, " setpend[14]"))}
if self.setpend(15) != 0 { try!(write!(f, " setpend[15]"))}
if self.setpend(16) != 0 { try!(write!(f, " setpend[16]"))}
if self.setpend(17) != 0 { try!(write!(f, " setpend[17]"))}
if self.setpend(18) != 0 { try!(write!(f, " setpend[18]"))}
if self.setpend(19) != 0 { try!(write!(f, " setpend[19]"))}
if self.setpend(20) != 0 { try!(write!(f, " setpend[20]"))}
if self.setpend(21) != 0 { try!(write!(f, " setpend[21]"))}
if self.setpend(22) != 0 { try!(write!(f, " setpend[22]"))}
if self.setpend(23) != 0 { try!(write!(f, " setpend[23]"))}
if self.setpend(24) != 0 { try!(write!(f, " setpend[24]"))}
if self.setpend(25) != 0 { try!(write!(f, " setpend[25]"))}
if self.setpend(26) != 0 { try!(write!(f, " setpend[26]"))}
if self.setpend(27) != 0 { try!(write!(f, " setpend[27]"))}
if self.setpend(28) != 0 { try!(write!(f, " setpend[28]"))}
if self.setpend(29) != 0 { try!(write!(f, " setpend[29]"))}
if self.setpend(30) != 0 { try!(write!(f, " setpend[30]"))}
if self.setpend(31) != 0 { try!(write!(f, " setpend[31]"))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Interrupt Clear-Pending Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Icpr(pub u32);
impl Icpr {
#[doc="Interrupt clear-pending bits"]
#[inline] pub fn clrpend<I: Into<::bobbin_bits::R32>>(&self, index: I) -> ::bobbin_bits::U1 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + index;
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0x1) as u8) } // [0]
}
#[doc="Returns true if CLRPEND != 0"]
#[inline] pub fn test_clrpend<I: Into<::bobbin_bits::R32>>(&self, index: I) -> bool{
self.clrpend(index) != 0
}
#[doc="Sets the CLRPEND field."]
#[inline] pub fn set_clrpend<I: Into<::bobbin_bits::R32>, V: Into<::bobbin_bits::U1>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U1 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + index;
self.0 &= !(0x1 << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Icpr {
#[inline]
fn from(other: u32) -> Self {
Icpr(other)
}
}
impl ::core::fmt::Display for Icpr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Icpr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.clrpend(0) != 0 { try!(write!(f, " clrpend[0]"))}
if self.clrpend(1) != 0 { try!(write!(f, " clrpend[1]"))}
if self.clrpend(2) != 0 { try!(write!(f, " clrpend[2]"))}
if self.clrpend(3) != 0 { try!(write!(f, " clrpend[3]"))}
if self.clrpend(4) != 0 { try!(write!(f, " clrpend[4]"))}
if self.clrpend(5) != 0 { try!(write!(f, " clrpend[5]"))}
if self.clrpend(6) != 0 { try!(write!(f, " clrpend[6]"))}
if self.clrpend(7) != 0 { try!(write!(f, " clrpend[7]"))}
if self.clrpend(8) != 0 { try!(write!(f, " clrpend[8]"))}
if self.clrpend(9) != 0 { try!(write!(f, " clrpend[9]"))}
if self.clrpend(10) != 0 { try!(write!(f, " clrpend[10]"))}
if self.clrpend(11) != 0 { try!(write!(f, " clrpend[11]"))}
if self.clrpend(12) != 0 { try!(write!(f, " clrpend[12]"))}
if self.clrpend(13) != 0 { try!(write!(f, " clrpend[13]"))}
if self.clrpend(14) != 0 { try!(write!(f, " clrpend[14]"))}
if self.clrpend(15) != 0 { try!(write!(f, " clrpend[15]"))}
if self.clrpend(16) != 0 { try!(write!(f, " clrpend[16]"))}
if self.clrpend(17) != 0 { try!(write!(f, " clrpend[17]"))}
if self.clrpend(18) != 0 { try!(write!(f, " clrpend[18]"))}
if self.clrpend(19) != 0 { try!(write!(f, " clrpend[19]"))}
if self.clrpend(20) != 0 { try!(write!(f, " clrpend[20]"))}
if self.clrpend(21) != 0 { try!(write!(f, " clrpend[21]"))}
if self.clrpend(22) != 0 { try!(write!(f, " clrpend[22]"))}
if self.clrpend(23) != 0 { try!(write!(f, " clrpend[23]"))}
if self.clrpend(24) != 0 { try!(write!(f, " clrpend[24]"))}
if self.clrpend(25) != 0 { try!(write!(f, " clrpend[25]"))}
if self.clrpend(26) != 0 { try!(write!(f, " clrpend[26]"))}
if self.clrpend(27) != 0 { try!(write!(f, " clrpend[27]"))}
if self.clrpend(28) != 0 { try!(write!(f, " clrpend[28]"))}
if self.clrpend(29) != 0 { try!(write!(f, " clrpend[29]"))}
if self.clrpend(30) != 0 { try!(write!(f, " clrpend[30]"))}
if self.clrpend(31) != 0 { try!(write!(f, " clrpend[31]"))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Interrupt Active Bit Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Iabr(pub u32);
impl Iabr {
#[doc="Interrupt clear-pending bits"]
#[inline] pub fn active<I: Into<::bobbin_bits::R32>>(&self, index: I) -> ::bobbin_bits::U1 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + index;
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0x1) as u8) } // [0]
}
#[doc="Returns true if ACTIVE != 0"]
#[inline] pub fn test_active<I: Into<::bobbin_bits::R32>>(&self, index: I) -> bool{
self.active(index) != 0
}
#[doc="Sets the ACTIVE field."]
#[inline] pub fn set_active<I: Into<::bobbin_bits::R32>, V: Into<::bobbin_bits::U1>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U1 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + index;
self.0 &= !(0x1 << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Iabr {
#[inline]
fn from(other: u32) -> Self {
Iabr(other)
}
}
impl ::core::fmt::Display for Iabr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Iabr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.active(0) != 0 { try!(write!(f, " active[0]"))}
if self.active(1) != 0 { try!(write!(f, " active[1]"))}
if self.active(2) != 0 { try!(write!(f, " active[2]"))}
if self.active(3) != 0 { try!(write!(f, " active[3]"))}
if self.active(4) != 0 { try!(write!(f, " active[4]"))}
if self.active(5) != 0 { try!(write!(f, " active[5]"))}
if self.active(6) != 0 { try!(write!(f, " active[6]"))}
if self.active(7) != 0 { try!(write!(f, " active[7]"))}
if self.active(8) != 0 { try!(write!(f, " active[8]"))}
if self.active(9) != 0 { try!(write!(f, " active[9]"))}
if self.active(10) != 0 { try!(write!(f, " active[10]"))}
if self.active(11) != 0 { try!(write!(f, " active[11]"))}
if self.active(12) != 0 { try!(write!(f, " active[12]"))}
if self.active(13) != 0 { try!(write!(f, " active[13]"))}
if self.active(14) != 0 { try!(write!(f, " active[14]"))}
if self.active(15) != 0 { try!(write!(f, " active[15]"))}
if self.active(16) != 0 { try!(write!(f, " active[16]"))}
if self.active(17) != 0 { try!(write!(f, " active[17]"))}
if self.active(18) != 0 { try!(write!(f, " active[18]"))}
if self.active(19) != 0 { try!(write!(f, " active[19]"))}
if self.active(20) != 0 { try!(write!(f, " active[20]"))}
if self.active(21) != 0 { try!(write!(f, " active[21]"))}
if self.active(22) != 0 { try!(write!(f, " active[22]"))}
if self.active(23) != 0 { try!(write!(f, " active[23]"))}
if self.active(24) != 0 { try!(write!(f, " active[24]"))}
if self.active(25) != 0 { try!(write!(f, " active[25]"))}
if self.active(26) != 0 { try!(write!(f, " active[26]"))}
if self.active(27) != 0 { try!(write!(f, " active[27]"))}
if self.active(28) != 0 { try!(write!(f, " active[28]"))}
if self.active(29) != 0 { try!(write!(f, " active[29]"))}
if self.active(30) != 0 { try!(write!(f, " active[30]"))}
if self.active(31) != 0 { try!(write!(f, " active[31]"))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Interrupt Priority Register x"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Ipr(pub u32);
impl Ipr {
#[doc="Interrupt Priority"]
#[inline] pub fn pri<I: Into<::bobbin_bits::R4>>(&self, index: I) -> ::bobbin_bits::U8 {
let index: usize = index.into().value() as usize;
let shift: usize = 0 + (index << 3);
unsafe { ::core::mem::transmute(((self.0 >> shift) & 0xff) as u8) } // [7:0]
}
#[doc="Returns true if PRI != 0"]
#[inline] pub fn test_pri<I: Into<::bobbin_bits::R4>>(&self, index: I) -> bool{
self.pri(index) != 0
}
#[doc="Sets the PRI field."]
#[inline] pub fn set_pri<I: Into<::bobbin_bits::R4>, V: Into<::bobbin_bits::U8>>(mut self, index: I, value: V) -> Self {
let index: usize = index.into().value() as usize;
let value: ::bobbin_bits::U8 = value.into();
let value: u32 = value.into();
let shift: usize = 0 + (index << 3);
self.0 &= !(0xff << shift);
self.0 |= value << shift;
self
}
}
impl From<u32> for Ipr {
#[inline]
fn from(other: u32) -> Self {
Ipr(other)
}
}
impl ::core::fmt::Display for Ipr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Ipr {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.pri(0) != 0 { try!(write!(f, " pri[0]=0x{:x}", self.pri(0)))}
if self.pri(1) != 0 { try!(write!(f, " pri[1]=0x{:x}", self.pri(1)))}
if self.pri(2) != 0 { try!(write!(f, " pri[2]=0x{:x}", self.pri(2)))}
if self.pri(3) != 0 { try!(write!(f, " pri[3]=0x{:x}", self.pri(3)))}
try!(write!(f, "]"));
Ok(())
}
}
#[doc="Software Trigger Interrupt Register"]
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct Stir(pub u32);
impl Stir {
#[doc="Interrupt ID of the interrupt to trigger, in the range 0-239."]
#[inline] pub fn intid(&self) -> ::bobbin_bits::U8 {
unsafe { ::core::mem::transmute(((self.0 >> 0) & 0xff) as u8) } // [7:0]
}
#[doc="Returns true if INTID != 0"]
#[inline] pub fn test_intid(&self) -> bool {
self.intid() != 0
}
#[doc="Sets the INTID field."]
#[inline] pub fn set_intid<V: Into<::bobbin_bits::U8>>(mut self, value: V) -> Self {
let value: ::bobbin_bits::U8 = value.into();
let value: u32 = value.into();
self.0 &= !(0xff << 0);
self.0 |= value << 0;
self
}
}
impl From<u32> for Stir {
#[inline]
fn from(other: u32) -> Self {
Stir(other)
}
}
impl ::core::fmt::Display for Stir {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
self.0.fmt(f)
}
}
impl ::core::fmt::Debug for Stir {
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
try!(write!(f, "[0x{:08x}", self.0));
if self.intid() != 0 { try!(write!(f, " intid=0x{:x}", self.intid()))}
try!(write!(f, "]"));
Ok(())
}
}
| 39.876904 | 129 | 0.531744 |
4896c8dc8ca0baf0eb971909f2a3f284c62a580b | 76,251 | use std::time::Duration;
use std::{iter::Iterator, sync::Arc, time::Instant};
use druid::TimerToken;
use druid::{
kurbo::{BezPath, Line},
piet::{PietText, PietTextLayout, Text, TextLayout as _, TextLayoutBuilder},
BoxConstraints, Color, Command, Env, Event, EventCtx, FontFamily,
InternalLifeCycle, LayoutCtx, LifeCycle, LifeCycleCtx, MouseButton, MouseEvent,
PaintCtx, Point, Rect, RenderContext, Size, Target, TextLayout, UpdateCtx, Vec2,
Widget, WidgetId,
};
use lapce_data::{
buffer::{matching_pair_direction, BufferContent, DiffLines, LocalBufferKind},
command::{
CommandTarget, LapceCommand, LapceCommandNew, LapceUICommand,
LapceWorkbenchCommand, LAPCE_UI_COMMAND,
},
config::{Config, LapceTheme},
data::{LapceTabData, PanelData, PanelKind},
editor::{EditorLocation, LapceEditorBufferData, Syntax},
menu::MenuItem,
movement::{ColPosition, CursorMode, Movement, Selection},
panel::PanelPosition,
state::{Mode, VisualMode},
};
use lapce_rpc::buffer::BufferId;
use lsp_types::{DiagnosticSeverity, DocumentChanges, TextEdit, Url, WorkspaceEdit};
use strum::EnumMessage;
pub mod container;
pub mod diff_split;
pub mod gutter;
pub mod header;
pub mod tab;
pub mod tab_header;
pub mod tab_header_content;
pub mod view;
pub struct LapceUI {}
#[derive(Copy, Clone)]
pub struct EditorCount(Option<usize>);
#[derive(Copy, Clone)]
pub enum EditorOperator {
Delete(EditorCount),
Yank(EditorCount),
}
#[derive(Clone)]
pub struct EditorUIState {
pub buffer_id: BufferId,
pub cursor: (usize, usize),
pub mode: Mode,
pub visual_mode: VisualMode,
pub selection: Selection,
pub selection_start_line: usize,
pub selection_end_line: usize,
}
#[derive(Clone)]
pub struct EditorState {
pub editor_id: WidgetId,
pub view_id: WidgetId,
pub split_id: WidgetId,
pub tab_id: WidgetId,
pub buffer_id: Option<BufferId>,
pub char_width: f64,
pub width: f64,
pub height: f64,
pub selection: Selection,
pub scroll_offset: Vec2,
pub scroll_size: Size,
pub view_size: Size,
pub gutter_width: f64,
pub header_height: f64,
pub locations: Vec<EditorLocation>,
pub current_location: usize,
pub saved_buffer_id: BufferId,
pub saved_selection: Selection,
pub saved_scroll_offset: Vec2,
#[allow(dead_code)]
last_movement: Movement,
}
// pub enum LapceEditorContainerKind {
// Container(WidgetPod<LapceEditorViewData, LapceEditorContainer>),
// DiffSplit(LapceSplitNew),
// }
#[derive(Clone, Copy)]
enum ClickKind {
Single,
Double,
Triple,
Quadruple,
}
pub struct LapceEditor {
view_id: WidgetId,
placeholder: Option<String>,
#[allow(dead_code)]
commands: Vec<(LapceCommandNew, PietTextLayout, Rect, PietTextLayout)>,
last_left_click: Option<(Instant, ClickKind, Point)>,
mouse_pos: Point,
/// A timer for listening for when the user has hovered for long enough to trigger showing
/// of hover info (if there is any)
mouse_hover_timer: TimerToken,
}
impl LapceEditor {
pub fn new(view_id: WidgetId) -> Self {
Self {
view_id,
placeholder: None,
commands: vec![],
last_left_click: None,
mouse_pos: Point::ZERO,
mouse_hover_timer: TimerToken::INVALID,
}
}
fn mouse_down(
&mut self,
ctx: &mut EventCtx,
mouse_event: &MouseEvent,
editor_data: &mut LapceEditorBufferData,
config: &Config,
) {
ctx.set_handled();
match mouse_event.button {
MouseButton::Left => {
self.left_click(ctx, mouse_event, editor_data, config);
editor_data.cancel_completion();
// TODO: Don't cancel over here, because it would good to allow the user to
// select text inside the hover data
editor_data.cancel_hover();
}
MouseButton::Right => {
self.right_click(ctx, editor_data, mouse_event, config);
editor_data.cancel_completion();
editor_data.cancel_hover();
}
MouseButton::Middle => {}
_ => (),
}
}
fn left_click(
&mut self,
ctx: &mut EventCtx,
mouse_event: &MouseEvent,
editor_data: &mut LapceEditorBufferData,
config: &Config,
) {
let mut click_kind = ClickKind::Single;
if let Some((instant, kind, pos)) = self.last_left_click.as_ref() {
if pos == &mouse_event.pos && instant.elapsed().as_millis() < 500 {
click_kind = match kind {
ClickKind::Single => ClickKind::Double,
ClickKind::Double => ClickKind::Triple,
ClickKind::Triple => ClickKind::Quadruple,
ClickKind::Quadruple => ClickKind::Quadruple,
};
}
}
self.last_left_click = Some((Instant::now(), click_kind, mouse_event.pos));
match click_kind {
ClickKind::Single => {
editor_data.single_click(ctx, mouse_event, config);
}
ClickKind::Double => {
editor_data.double_click(ctx, mouse_event, config);
}
ClickKind::Triple => {
editor_data.triple_click(ctx, mouse_event, config);
}
ClickKind::Quadruple => {}
}
}
fn right_click(
&mut self,
ctx: &mut EventCtx,
editor_data: &mut LapceEditorBufferData,
mouse_event: &MouseEvent,
config: &Config,
) {
editor_data.single_click(ctx, mouse_event, config);
let menu_items = vec![
MenuItem {
text: LapceCommand::GotoDefinition
.get_message()
.unwrap()
.to_string(),
command: LapceCommandNew {
cmd: LapceCommand::GotoDefinition.to_string(),
palette_desc: None,
data: None,
target: CommandTarget::Focus,
},
},
MenuItem {
text: "Command Palette".to_string(),
command: LapceCommandNew {
cmd: LapceWorkbenchCommand::PaletteCommand.to_string(),
palette_desc: None,
data: None,
target: CommandTarget::Workbench,
},
},
];
let point = mouse_event.pos + editor_data.editor.window_origin.to_vec2();
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::ShowMenu(point.round(), Arc::new(menu_items)),
Target::Auto,
));
}
pub fn get_size(
data: &LapceEditorBufferData,
text: &mut PietText,
editor_size: Size,
panels: &im::HashMap<PanelPosition, Arc<PanelData>>,
env: &Env,
) -> Size {
let line_height = data.config.editor.line_height as f64;
let width = data.config.editor_text_width(text, "W");
match &data.editor.content {
BufferContent::File(_) => {
if data.editor.code_lens {
if let Some(syntax) = data.buffer.syntax.as_ref() {
let height =
syntax.lens.height_of_line(syntax.lens.len() + 1);
Size::new(
(width * data.buffer.max_len as f64)
.max(editor_size.width),
(height as f64 - line_height).max(0.0)
+ editor_size.height,
)
} else {
let height = data.buffer.num_lines
* data.config.editor.code_lens_font_size;
Size::new(
(width * data.buffer.max_len as f64)
.max(editor_size.width),
(height as f64 - line_height).max(0.0)
+ editor_size.height,
)
}
} else if let Some(compare) = data.editor.compare.as_ref() {
let mut lines = 0;
if let Some(changes) = data.buffer.history_changes.get(compare) {
for change in changes.iter() {
match change {
DiffLines::Left(l) => lines += l.len(),
DiffLines::Both(_l, r) => lines += r.len(),
DiffLines::Skip(_l, _r) => lines += 1,
DiffLines::Right(r) => lines += r.len(),
}
}
}
Size::new(
(width * data.buffer.max_len as f64).max(editor_size.width),
(line_height * lines as f64 - line_height).max(0.0)
+ editor_size.height,
)
} else {
Size::new(
(width * data.buffer.max_len as f64).max(editor_size.width),
(line_height * data.buffer.num_lines as f64 - line_height)
.max(0.0)
+ editor_size.height,
)
}
}
BufferContent::Local(kind) => match kind {
LocalBufferKind::FilePicker
| LocalBufferKind::Search
| LocalBufferKind::Settings
| LocalBufferKind::Keymap => Size::new(
editor_size.width.max(width * data.buffer.rope.len() as f64),
env.get(LapceTheme::INPUT_LINE_HEIGHT)
+ env.get(LapceTheme::INPUT_LINE_PADDING) * 2.0,
),
LocalBufferKind::SourceControl => {
for (pos, panels) in panels.iter() {
for panel_kind in panels.widgets.iter() {
if panel_kind == &PanelKind::SourceControl {
return match pos {
PanelPosition::BottomLeft
| PanelPosition::BottomRight => {
let width = 200.0;
Size::new(width, editor_size.height)
}
_ => {
let height = 100.0f64;
let height = height.max(
line_height
* data.buffer.num_lines() as f64,
);
Size::new(
(width * data.buffer.max_len as f64)
.max(editor_size.width),
height,
)
}
};
}
}
}
Size::ZERO
}
LocalBufferKind::Empty => editor_size,
},
BufferContent::Value(_) => Size::new(
editor_size.width.max(width * data.buffer.rope.len() as f64),
env.get(LapceTheme::INPUT_LINE_HEIGHT)
+ env.get(LapceTheme::INPUT_LINE_PADDING) * 2.0,
),
}
}
pub fn paint_code_lens_content(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
is_focused: bool,
) {
let rect = ctx.region().bounding_box();
let ref_text_layout = ctx
.text()
.new_text_layout("W")
.font(
data.config.editor.font_family(),
data.config.editor.font_size as f64,
)
.build()
.unwrap();
let char_width = ref_text_layout.size().width;
let y_shift = (data.config.editor.line_height as f64
- ref_text_layout.size().height)
/ 2.0;
let small_char_width = data
.config
.char_width(ctx.text(), data.config.editor.code_lens_font_size as f64);
let empty_lens = Syntax::lens_from_normal_lines(
data.buffer.len(),
data.config.editor.line_height,
data.config.editor.code_lens_font_size,
&[],
);
let lens = if let Some(syntax) = data.buffer.syntax.as_ref() {
&syntax.lens
} else {
&empty_lens
};
let cursor_line = data
.buffer
.line_of_offset(data.editor.cursor.offset().min(data.buffer.len()));
let last_line = data.buffer.line_of_offset(data.buffer.len());
let start_line =
lens.line_of_height(rect.y0.floor() as usize).min(last_line);
let end_line = lens
.line_of_height(rect.y1.ceil() as usize + data.config.editor.line_height)
.min(last_line);
let start_offset = data.buffer.offset_of_line(start_line);
let end_offset = data.buffer.offset_of_line(end_line + 1);
let mut lines_iter = data.buffer.rope.lines(start_offset..end_offset);
let mut y = lens.height_of_line(start_line) as f64;
for (line, line_height) in lens.iter_chunks(start_line..end_line + 1) {
if let Some(line_content) = lines_iter.next() {
let is_small = line_height < data.config.editor.line_height;
let mut x = 0.0;
if is_small {
for ch in line_content.chars() {
if ch == ' ' {
x += char_width - small_char_width;
} else if ch == '\t' {
x += (char_width - small_char_width)
* data.config.editor.tab_width as f64;
} else {
break;
}
}
}
Self::paint_cursor_on_line(
data,
ctx,
is_focused,
cursor_line,
line,
x,
y,
if is_small {
small_char_width
} else {
char_width
},
line_height as f64,
);
let text_layout = data.buffer.new_text_layout(
ctx,
line,
&line_content,
None,
if is_small {
data.config.editor.code_lens_font_size
} else {
data.config.editor.font_size
},
[rect.x0, rect.x1],
&data.config,
);
ctx.draw_text(
&text_layout,
Point::new(x, if is_small { y } else { y + y_shift }),
);
y += line_height as f64;
}
}
}
fn paint_content(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
is_focused: bool,
placeholder: Option<&String>,
env: &Env,
) {
let line_height = Self::line_height(data, env);
let line_padding = Self::line_padding(data, env);
let font_size = if data.editor.content.is_input() {
env.get(LapceTheme::INPUT_FONT_SIZE) as usize
} else {
data.config.editor.font_size
};
let text_layout = ctx
.text()
.new_text_layout("W")
.font(data.config.editor.font_family(), font_size as f64)
.build()
.unwrap();
let char_width = text_layout.size().width;
let y_shift = (line_height - text_layout.size().height) / 2.0;
if data.editor.content.is_input()
|| (data.editor.compare.is_none() && !data.editor.code_lens)
{
Self::paint_cursor(data, ctx, is_focused, placeholder, char_width, env);
Self::paint_find(data, ctx, char_width, env);
}
let self_size = ctx.size();
let rect = ctx.region().bounding_box();
let start_line = (rect.y0 / line_height).floor() as usize;
let end_line = (rect.y1 / line_height).ceil() as usize;
if !data.editor.content.is_input() && data.editor.code_lens {
Self::paint_code_lens_content(data, ctx, is_focused);
} else if let Some(compare) = data.editor.compare.as_ref() {
if let Some(changes) = data.buffer.history_changes.get(compare) {
let cursor_line =
data.buffer.line_of_offset(data.editor.cursor.offset());
let mut line = 0;
for change in changes.iter() {
match change {
DiffLines::Left(range) => {
let len = range.len();
line += len;
if line < start_line {
continue;
}
ctx.fill(
Size::new(self_size.width, line_height * len as f64)
.to_rect()
.with_origin(Point::new(
0.0,
line_height * (line - len) as f64,
)),
data.config.get_color_unchecked(
LapceTheme::SOURCE_CONTROL_REMOVED,
),
);
for l in line - len..line {
if l < start_line {
continue;
}
let actual_line = l - (line - len) + range.start;
if let Some(text_layout) =
data.buffer.history_text_layout(
ctx,
compare,
actual_line,
None,
[rect.x0, rect.x1],
&data.config,
)
{
ctx.draw_text(
&text_layout,
Point::new(
0.0,
line_height * l as f64 + y_shift,
),
);
}
if l > end_line {
break;
}
}
}
DiffLines::Skip(left, right) => {
let rect = Size::new(self_size.width, line_height)
.to_rect()
.with_origin(Point::new(
0.0,
line_height * line as f64,
));
ctx.fill(
rect,
data.config.get_color_unchecked(
LapceTheme::PANEL_BACKGROUND,
),
);
ctx.stroke(
rect,
data.config.get_color_unchecked(
LapceTheme::EDITOR_FOREGROUND,
),
1.0,
);
let text_layout = ctx
.text()
.new_text_layout(format!(
" -{}, +{}",
left.end + 1,
right.end + 1
))
.font(
data.config.editor.font_family(),
font_size as f64,
)
.text_color(
data.config
.get_color_unchecked(
LapceTheme::EDITOR_FOREGROUND,
)
.clone(),
)
.build_with_info(
true,
data.config.editor.tab_width,
Some([rect.x0, rect.x1]),
);
ctx.draw_text(
&text_layout,
Point::new(0.0, line_height * line as f64 + y_shift),
);
line += 1;
}
DiffLines::Both(_left, right) => {
let len = right.len();
line += len;
if line < start_line {
continue;
}
for l in line - len..line {
if l < start_line {
continue;
}
let rope_line = l - (line - len) + right.start;
Self::paint_cursor_on_line(
data,
ctx,
is_focused,
cursor_line,
rope_line,
0.0,
l as f64 * line_height,
char_width,
line_height,
);
let text_layout = data.buffer.new_text_layout(
ctx,
rope_line,
&data.buffer.line_content(rope_line),
None,
font_size,
[rect.x0, rect.x1],
&data.config,
);
ctx.draw_text(
&text_layout,
Point::new(
0.0,
line_height * l as f64 + y_shift,
),
);
if l > end_line {
break;
}
}
}
DiffLines::Right(range) => {
let len = range.len();
line += len;
if line < start_line {
continue;
}
ctx.fill(
Size::new(
self_size.width,
line_height * range.len() as f64,
)
.to_rect()
.with_origin(
Point::new(
0.0,
line_height * (line - range.len()) as f64,
),
),
data.config.get_color_unchecked(
LapceTheme::SOURCE_CONTROL_ADDED,
),
);
for l in line - len..line {
if l < start_line {
continue;
}
let rope_line = l - (line - len) + range.start;
Self::paint_cursor_on_line(
data,
ctx,
is_focused,
cursor_line,
rope_line,
0.0,
l as f64 * line_height,
char_width,
line_height,
);
let text_layout = data.buffer.new_text_layout(
ctx,
rope_line,
&data.buffer.line_content(rope_line),
None,
font_size,
[rect.x0, rect.x1],
&data.config,
);
ctx.draw_text(
&text_layout,
Point::new(
0.0,
line_height * l as f64 + y_shift,
),
);
if l > end_line {
break;
}
}
}
}
}
}
return;
} else {
let cursor_offset = data.editor.cursor.offset();
let cursor_line = data.buffer.line_of_offset(cursor_offset);
let start_offset = data.buffer.offset_of_line(start_line);
let end_offset = data.buffer.offset_of_line(end_line + 1);
let mode = data.editor.cursor.get_mode();
for (i, line_content) in data
.buffer
.slice_to_cow(start_offset..end_offset)
.split('\n')
.enumerate()
{
let line = i + start_line;
let cursor_index =
if is_focused && mode != Mode::Insert && line == cursor_line {
let cursor_line_start = data
.buffer
.offset_of_line(cursor_line)
.min(data.buffer.len());
let index = data
.buffer
.slice_to_cow(cursor_line_start..cursor_offset)
.len();
Some(index)
} else {
None
};
let text_layout = data.buffer.new_text_layout(
ctx,
line,
line_content,
cursor_index,
font_size,
[rect.x0, rect.x1],
&data.config,
);
ctx.draw_text(
&text_layout,
Point::new(
0.0,
line_height * line as f64 + y_shift + line_padding,
),
);
}
}
Self::paint_snippet(data, ctx);
Self::paint_diagnostics(data, ctx);
if data.buffer.len() == 0 {
if let Some(placeholder) = placeholder {
let text_layout = ctx
.text()
.new_text_layout(placeholder.to_string())
.font(FontFamily::SYSTEM_UI, 13.0)
.text_color(
data.config
.get_color_unchecked(LapceTheme::EDITOR_DIM)
.clone(),
)
.build()
.unwrap();
ctx.draw_text(&text_layout, Point::new(0.0, y_shift));
}
}
}
#[allow(clippy::too_many_arguments)]
fn paint_cursor_on_line(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
is_focused: bool,
cursor_line: usize,
actual_line: usize,
x_shift: f64,
y: f64,
char_width: f64,
line_height: f64,
) {
match &data.editor.cursor.mode {
CursorMode::Normal(_) => {}
CursorMode::Visual { start, end, mode } => {
let (start_line, start_col) = data.buffer.offset_to_line_col(
*start.min(end),
data.config.editor.tab_width,
);
let (end_line, end_col) = data.buffer.offset_to_line_col(
*start.max(end),
data.config.editor.tab_width,
);
if actual_line < start_line || actual_line > end_line {
return;
}
let left_col = match mode {
VisualMode::Normal => {
if start_line == actual_line {
start_col
} else {
0
}
}
VisualMode::Linewise => 0,
VisualMode::Blockwise => {
let max_col = data.buffer.line_end_col(
actual_line,
false,
data.config.editor.tab_width,
);
let left = start_col.min(end_col);
if left > max_col {
return;
}
left
}
};
let right_col = match mode {
VisualMode::Normal => {
if actual_line == end_line {
let max_col = data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
);
(end_col + 1).min(max_col)
} else {
data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
) + 1
}
}
VisualMode::Linewise => {
data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
) + 1
}
VisualMode::Blockwise => {
let max_col = data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
);
let right = match data.editor.cursor.horiz.as_ref() {
Some(&ColPosition::End) => max_col,
_ => (end_col.max(start_col) + 1).min(max_col),
};
right
}
};
let x0 = left_col as f64 * char_width + x_shift;
let x1 = right_col as f64 * char_width + x_shift;
let y0 = y;
let y1 = y0 + line_height;
ctx.fill(
Rect::new(x0, y0, x1, y1),
data.config
.get_color_unchecked(LapceTheme::EDITOR_SELECTION),
);
}
CursorMode::Insert(selection) => {
let start_offset = data.buffer.offset_of_line(actual_line);
let end_offset = data.buffer.offset_of_line(actual_line + 1);
let regions = selection.regions_in_range(start_offset, end_offset);
for region in regions {
if region.is_caret() {
let caret_actual_line =
data.buffer.line_of_offset(region.end());
if caret_actual_line == actual_line {
let size = ctx.size();
ctx.fill(
Rect::ZERO
.with_origin(Point::new(0.0, y))
.with_size(Size::new(size.width, line_height)),
data.config.get_color_unchecked(
LapceTheme::EDITOR_CURRENT_LINE,
),
);
}
} else {
let start = region.start();
let end = region.end();
let (start_line, start_col) =
data.buffer.offset_to_line_col(
start.min(end),
data.config.editor.tab_width,
);
let (end_line, end_col) = data.buffer.offset_to_line_col(
start.max(end),
data.config.editor.tab_width,
);
let left_col = match actual_line {
_ if actual_line == start_line => start_col,
_ => 0,
};
let right_col = match actual_line {
_ if actual_line == end_line => {
let max_col = data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
);
end_col.min(max_col)
}
_ => data.buffer.line_end_col(
actual_line,
true,
data.config.editor.tab_width,
),
};
let x0 = left_col as f64 * char_width + x_shift;
let x1 = right_col as f64 * char_width + x_shift;
let y0 = y;
let y1 = y0 + line_height;
ctx.fill(
Rect::new(x0, y0, x1, y1),
data.config
.get_color_unchecked(LapceTheme::EDITOR_SELECTION),
);
}
}
for region in regions {
if is_focused {
let (caret_actual_line, col) =
data.buffer.offset_to_line_col(
region.end(),
data.config.editor.tab_width,
);
if caret_actual_line == actual_line {
let x = col as f64 * char_width + x_shift;
ctx.stroke(
Line::new(
Point::new(x, y),
Point::new(x, y + line_height),
),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CARET),
2.0,
)
}
}
}
}
}
if cursor_line == actual_line {
if let CursorMode::Normal(_) = &data.editor.cursor.mode {
let size = ctx.size();
ctx.fill(
Rect::ZERO
.with_origin(Point::new(0.0, y))
.with_size(Size::new(size.width, line_height)),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CURRENT_LINE),
);
}
match &data.editor.cursor.mode {
CursorMode::Normal(_) | CursorMode::Visual { .. } => {
if is_focused {
let (x0, x1) = data.editor.cursor.current_char(
&data.buffer,
char_width,
&data.config,
);
let cursor_width =
if x1 > x0 { x1 - x0 } else { char_width };
ctx.fill(
Rect::ZERO
.with_origin(Point::new(x0 + x_shift, y))
.with_size(Size::new(cursor_width, line_height)),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CARET),
);
}
}
CursorMode::Insert(_) => {}
}
}
}
fn paint_cursor(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
is_focused: bool,
placeholder: Option<&String>,
width: f64,
env: &Env,
) {
let line_height = Self::line_height(data, env);
let line_padding = Self::line_padding(data, env);
let start_line =
(data.editor.scroll_offset.y / line_height).floor() as usize;
let end_line = ((data.editor.size.borrow().height
+ data.editor.scroll_offset.y)
/ line_height)
.ceil() as usize;
match &data.editor.cursor.mode {
CursorMode::Normal(offset) => {
let line = data.buffer.line_of_offset(*offset);
Self::paint_cursor_line(data, ctx, line, is_focused, placeholder);
if is_focused {
let (x0, x1) = data.editor.cursor.current_char(
&data.buffer,
width,
&data.config,
);
let char_width = if x1 > x0 { x1 - x0 } else { width };
ctx.fill(
Rect::ZERO
.with_origin(Point::new(
x0,
line as f64 * line_height + line_padding,
))
.with_size(Size::new(char_width, line_height)),
data.config.get_color_unchecked(LapceTheme::EDITOR_CARET),
);
}
}
CursorMode::Visual { start, end, mode } => {
let paint_start_line = start_line;
let paint_end_line = end_line;
let (start_line, start_col) = data.buffer.offset_to_line_col(
*start.min(end),
data.config.editor.tab_width,
);
let (end_line, end_col) = data.buffer.offset_to_line_col(
*start.max(end),
data.config.editor.tab_width,
);
for line in paint_start_line..paint_end_line {
if line < start_line || line > end_line {
continue;
}
let line_content = data.buffer.line_content(line);
let left_col = match mode {
VisualMode::Normal => match line {
_ if line == start_line => start_col,
_ => 0,
},
VisualMode::Linewise => 0,
VisualMode::Blockwise => {
let max_col = data.buffer.line_end_col(
line,
false,
data.config.editor.tab_width,
);
let left = start_col.min(end_col);
if left > max_col {
continue;
}
left
}
};
let x0 = left_col as f64 * width;
let right_col = match mode {
VisualMode::Normal => match line {
_ if line == end_line => {
let max_col = data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
);
(end_col + 1).min(max_col)
}
_ => {
data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
) + 1
}
},
VisualMode::Linewise => {
data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
) + 1
}
VisualMode::Blockwise => {
let max_col = data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
);
let right = match data.editor.cursor.horiz.as_ref() {
Some(&ColPosition::End) => max_col,
_ => (end_col.max(start_col) + 1).min(max_col),
};
right
}
};
if !line_content.is_empty() {
let x1 = right_col as f64 * width;
let y0 = line as f64 * line_height + line_padding;
let y1 = y0 + line_height;
ctx.fill(
Rect::new(x0, y0, x1, y1),
data.config
.get_color_unchecked(LapceTheme::EDITOR_SELECTION),
);
}
if is_focused {
let line = data.buffer.line_of_offset(*end);
let (x0, x1) = data.editor.cursor.current_char(
&data.buffer,
width,
&data.config,
);
let char_width = if x1 > x0 { x1 - x0 } else { width };
ctx.fill(
Rect::ZERO
.with_origin(Point::new(
x0,
line as f64 * line_height + line_padding,
))
.with_size(Size::new(char_width, line_height)),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CARET),
);
}
}
}
CursorMode::Insert(selection) => {
let offset = selection.get_cursor_offset();
let _line = data.buffer.line_of_offset(offset);
let last_line = data.buffer.last_line();
let end_line = if end_line > last_line {
last_line
} else {
end_line
};
let start = data.buffer.offset_of_line(start_line);
let end = data.buffer.offset_of_line(end_line + 1);
let regions = selection.regions_in_range(start, end);
for region in regions {
if region.start() == region.end() {
let line = data.buffer.line_of_offset(region.start());
Self::paint_cursor_line(
data,
ctx,
line,
is_focused,
placeholder,
);
} else {
let start = region.start();
let end = region.end();
let paint_start_line = start_line;
let paint_end_line = end_line;
let (start_line, start_col) =
data.buffer.offset_to_line_col(
start.min(end),
data.config.editor.tab_width,
);
let (end_line, end_col) = data.buffer.offset_to_line_col(
start.max(end),
data.config.editor.tab_width,
);
for line in paint_start_line..paint_end_line + 1 {
if line < start_line || line > end_line {
continue;
}
let line_content = data.buffer.line_content(line);
let left_col = match line {
_ if line == start_line => start_col,
_ => 0,
};
let x0 = left_col as f64 * width;
let right_col = match line {
_ if line == end_line => {
let max_col = data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
);
end_col.min(max_col)
}
_ => data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
),
};
if !line_content.is_empty() {
let x1 = right_col as f64 * width;
let y0 = line as f64 * line_height + line_padding;
let y1 = y0 + line_height;
ctx.fill(
Rect::new(x0, y0, x1, y1),
data.config.get_color_unchecked(
LapceTheme::EDITOR_SELECTION,
),
);
}
}
}
}
for region in regions {
if is_focused {
let (line, col) = data.buffer.offset_to_line_col(
region.end(),
data.config.editor.tab_width,
);
let x = col as f64 * width;
let y = line as f64 * line_height + line_padding;
ctx.stroke(
Line::new(
Point::new(x, y),
Point::new(x, y + line_height),
),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CARET),
2.0,
)
}
}
}
}
}
fn paint_cursor_line(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
line: usize,
is_focused: bool,
placeholder: Option<&String>,
) {
if !is_focused && data.buffer.len() == 0 && placeholder.is_some() {
return;
}
if data.editor.content.is_input() {
return;
}
let line_height = data.config.editor.line_height as f64;
let size = ctx.size();
ctx.fill(
Rect::ZERO
.with_origin(Point::new(0.0, line as f64 * line_height))
.with_size(Size::new(size.width, line_height)),
data.config
.get_color_unchecked(LapceTheme::EDITOR_CURRENT_LINE),
);
}
fn paint_find(
data: &LapceEditorBufferData,
ctx: &mut PaintCtx,
char_width: f64,
env: &Env,
) {
if data.editor.content.is_search() {
return;
}
if !data.find.visual {
return;
}
let line_height = Self::line_height(data, env);
let start_line =
(data.editor.scroll_offset.y / line_height).floor() as usize;
let end_line = ((data.editor.size.borrow().height
+ data.editor.scroll_offset.y)
/ line_height)
.ceil() as usize;
let start_offset = data.buffer.offset_of_line(start_line);
let end_offset = data.buffer.offset_of_line(end_line + 1);
let cursor_offset = data.editor.cursor.offset();
data.buffer.update_find(&data.find, start_line, end_line);
if data.find.search_string.is_some() {
for region in data
.buffer
.find
.borrow()
.occurrences()
.regions_in_range(start_offset, end_offset)
{
let start = region.min();
let end = region.max();
let active = start <= cursor_offset && cursor_offset <= end;
let (start_line, start_col) = data
.buffer
.offset_to_line_col(start, data.config.editor.tab_width);
let (end_line, end_col) = data
.buffer
.offset_to_line_col(end, data.config.editor.tab_width);
for line in start_line..end_line + 1 {
let left_col = if line == start_line { start_col } else { 0 };
let right_col = if line == end_line {
end_col
} else {
data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
) + 1
};
let x0 = left_col as f64 * char_width;
let x1 = right_col as f64 * char_width;
let y0 = line as f64 * line_height;
let y1 = y0 + line_height;
let rect = Rect::new(x0, y0, x1, y1);
if active {
ctx.fill(
rect,
&data
.config
.get_color_unchecked(LapceTheme::EDITOR_CARET)
.clone()
.with_alpha(0.5),
);
}
ctx.stroke(
rect,
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND),
1.0,
);
}
}
}
}
fn paint_snippet(data: &LapceEditorBufferData, ctx: &mut PaintCtx) {
let line_height = data.config.editor.line_height as f64;
let start_line =
(data.editor.scroll_offset.y / line_height).floor() as usize;
let end_line = ((data.editor.size.borrow().height
+ data.editor.scroll_offset.y)
/ line_height)
.ceil() as usize;
let width = data.config.editor_text_width(ctx.text(), "W");
if let Some(snippet) = data.editor.snippet.as_ref() {
for (_, (start, end)) in snippet {
let paint_start_line = start_line;
let paint_end_line = end_line;
let (start_line, start_col) = data.buffer.offset_to_line_col(
*start.min(end),
data.config.editor.tab_width,
);
let (end_line, end_col) = data.buffer.offset_to_line_col(
*start.max(end),
data.config.editor.tab_width,
);
for line in paint_start_line..paint_end_line {
if line < start_line || line > end_line {
continue;
}
let line_content = data.buffer.line_content(line);
let left_col = match line {
_ if line == start_line => start_col,
_ => 0,
};
let x0 = left_col as f64 * width;
let right_col = match line {
_ if line == end_line => {
let max_col = data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
);
end_col.min(max_col)
}
_ => data.buffer.line_end_col(
line,
true,
data.config.editor.tab_width,
),
};
if !line_content.is_empty() {
let x1 = right_col as f64 * width;
let y0 = line as f64 * line_height;
let y1 = y0 + line_height;
ctx.stroke(
Rect::new(x0, y0, x1, y1).inflate(1.0, -0.5),
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND),
1.0,
);
}
}
}
}
}
fn paint_diagnostics(data: &LapceEditorBufferData, ctx: &mut PaintCtx) {
let line_height = data.config.editor.line_height as f64;
let start_line =
(data.editor.scroll_offset.y / line_height).floor() as usize;
let end_line = ((data.editor.size.borrow().height
+ data.editor.scroll_offset.y)
/ line_height)
.ceil() as usize;
let width = data.config.editor_text_width(ctx.text(), "W");
let mut current = None;
let cursor_offset = data.editor.cursor.offset();
if let Some(diagnostics) = data.diagnostics() {
for diagnostic in diagnostics.iter() {
let start = diagnostic.diagnositc.range.start;
let end = diagnostic.diagnositc.range.end;
if (start.line as usize) <= end_line
&& (end.line as usize) >= start_line
{
let start_offset = if let Some(range) = diagnostic.range {
range.0
} else {
data.buffer
.offset_of_position(&start, data.config.editor.tab_width)
};
if start_offset == cursor_offset {
current = Some(diagnostic.clone());
}
for line in start.line as usize..end.line as usize + 1 {
if line < start_line {
continue;
}
if line > end_line {
break;
}
let x0 = if line == start.line as usize {
start.character as f64 * width
} else {
let (_, col) = data.buffer.offset_to_line_col(
data.buffer.first_non_blank_character_on_line(line),
data.config.editor.tab_width,
);
col as f64 * width
};
let x1 = if line == end.line as usize {
end.character as f64 * width
} else {
(data.buffer.line_end_col(
line,
false,
data.config.editor.tab_width,
) + 1) as f64
* width
};
let _y1 = (line + 1) as f64 * line_height;
let y0 = (line + 1) as f64 * line_height - 4.0;
let severity = diagnostic
.diagnositc
.severity
.as_ref()
.unwrap_or(&DiagnosticSeverity::Information);
let color = match severity {
DiagnosticSeverity::Error => data
.config
.get_color_unchecked(LapceTheme::LAPCE_ERROR),
DiagnosticSeverity::Warning => data
.config
.get_color_unchecked(LapceTheme::LAPCE_WARN),
_ => data
.config
.get_color_unchecked(LapceTheme::LAPCE_WARN),
};
Self::paint_wave_line(
ctx,
Point::new(x0, y0),
x1 - x0,
color,
);
}
}
}
}
if let Some(diagnostic) = current {
if data.editor.cursor.is_normal() {
let text_layout = ctx
.text()
.new_text_layout(diagnostic.diagnositc.message.clone())
.font(FontFamily::SYSTEM_UI, 14.0)
.text_color(
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone(),
)
.max_width(data.editor.size.borrow().width - 20.0)
.build()
.unwrap();
let text_size = text_layout.size();
let mut text_height = text_size.height;
let related = diagnostic
.diagnositc
.related_information
.map(|related| {
related
.iter()
.map(|i| {
let text_layout = ctx
.text()
.new_text_layout(i.message.clone())
.font(FontFamily::SYSTEM_UI, 14.0)
.text_color(
data.config
.get_color_unchecked(
LapceTheme::EDITOR_FOREGROUND,
)
.clone(),
)
.max_width(
data.editor.size.borrow().width - 20.0,
)
.build()
.unwrap();
text_height += 10.0 + text_layout.size().height;
text_layout
})
.collect::<Vec<PietTextLayout>>()
})
.unwrap_or_else(Vec::new);
let start = diagnostic.diagnositc.range.start;
let rect = Rect::ZERO
.with_origin(Point::new(
0.0,
(start.line + 1) as f64 * line_height,
))
.with_size(Size::new(
data.editor.size.borrow().width,
text_height + 20.0,
));
ctx.fill(
rect,
data.config
.get_color_unchecked(LapceTheme::EDITOR_SELECTION),
);
let severity = diagnostic
.diagnositc
.severity
.as_ref()
.unwrap_or(&DiagnosticSeverity::Information);
let color = match severity {
DiagnosticSeverity::Error => {
data.config.get_color_unchecked(LapceTheme::LAPCE_ERROR)
}
DiagnosticSeverity::Warning => {
data.config.get_color_unchecked(LapceTheme::LAPCE_WARN)
}
_ => data.config.get_color_unchecked(LapceTheme::LAPCE_WARN),
};
ctx.stroke(rect, color, 1.0);
ctx.draw_text(
&text_layout,
Point::new(
10.0 + data.editor.scroll_offset.x,
(start.line + 1) as f64 * line_height + 10.0,
),
);
let mut text_height = text_size.height;
for text in related {
text_height += 10.0;
ctx.draw_text(
&text,
Point::new(
10.0 + data.editor.scroll_offset.x,
(start.line + 1) as f64 * line_height
+ 10.0
+ text_height,
),
);
text_height += text.size().height;
}
}
}
}
fn line_height(data: &LapceEditorBufferData, env: &Env) -> f64 {
if data.editor.content.is_input() {
env.get(LapceTheme::INPUT_LINE_HEIGHT)
} else {
data.config.editor.line_height as f64
}
}
fn line_padding(data: &LapceEditorBufferData, env: &Env) -> f64 {
if data.editor.content.is_input() {
env.get(LapceTheme::INPUT_LINE_PADDING)
} else {
0.0
}
}
fn paint_wave_line(
ctx: &mut PaintCtx,
origin: Point,
max_width: f64,
color: &Color,
) {
let mut path = BezPath::new();
let mut x = 0.0;
let width = 3.5;
let height = 4.0;
path.move_to(origin + (0.0, height / 2.0));
let mut direction = 1.0;
while x < max_width {
let point = origin + (x, height / 2.0);
let p1 = point + (width / 2.0, -height / 2.0 * direction);
let p2 = point + (width, 0.0);
path.quad_to(p1, p2);
x += width;
direction *= -1.0;
}
ctx.stroke(path, color, 1.4);
}
}
impl Widget<LapceTabData> for LapceEditor {
fn event(
&mut self,
ctx: &mut EventCtx,
event: &Event,
data: &mut LapceTabData,
_env: &Env,
) {
match event {
Event::MouseMove(mouse_event) => {
ctx.set_cursor(&druid::Cursor::IBeam);
if mouse_event.pos != self.mouse_pos {
self.mouse_pos = mouse_event.pos;
// Get a new hover timer, overwriting the old one that will just be ignored
// when it is received
self.mouse_hover_timer = ctx.request_timer(
Duration::from_millis(data.config.editor.hover_delay),
);
if ctx.is_active() {
let editor_data = data.editor_view_content(self.view_id);
let new_offset = editor_data.offset_of_mouse(
ctx.text(),
mouse_event.pos,
&data.config,
);
let editor =
data.main_split.editors.get_mut(&self.view_id).unwrap();
let editor = Arc::make_mut(editor);
editor.cursor = editor.cursor.set_offset(
new_offset,
true,
mouse_event.mods.alt(),
);
}
}
}
Event::MouseUp(_mouse_event) => {
ctx.set_active(false);
}
Event::MouseDown(mouse_event) => {
let buffer = data.main_split.editor_buffer(self.view_id);
let editor =
data.main_split.editors.get(&self.view_id).unwrap().clone();
let mut editor_data = data.editor_view_content(self.view_id);
self.mouse_down(ctx, mouse_event, &mut editor_data, &data.config);
data.update_from_editor_buffer_data(editor_data, &editor, &buffer);
// match mouse_event.button {
// druid::MouseButton::Right => {
// let menu_items = vec![
// MenuItem {
// text: LapceCommand::GotoDefinition
// .get_message()
// .unwrap()
// .to_string(),
// command: LapceCommandNew {
// cmd: LapceCommand::GotoDefinition.to_string(),
// palette_desc: None,
// data: None,
// target: CommandTarget::Focus,
// },
// },
// MenuItem {
// text: "Command Palette".to_string(),
// command: LapceCommandNew {
// cmd: LapceWorkbenchCommand::PaletteCommand
// .to_string(),
// palette_desc: None,
// data: None,
// target: CommandTarget::Workbench,
// },
// },
// ];
// let point = mouse_event.pos + editor.window_origin.to_vec2();
// ctx.submit_command(Command::new(
// LAPCE_UI_COMMAND,
// LapceUICommand::ShowMenu(point, Arc::new(menu_items)),
// Target::Auto,
// ));
// }
// _ => {}
// }
}
Event::Timer(id) => {
if self.mouse_hover_timer == *id {
let editor =
data.main_split.editors.get(&self.view_id).unwrap().clone();
let mut editor_data = data.editor_view_content(self.view_id);
let buffer = editor_data.buffer.clone();
let offset = editor_data.offset_of_mouse(
ctx.text(),
self.mouse_pos,
&data.config,
);
editor_data.update_hover(ctx, offset);
data.update_from_editor_buffer_data(
editor_data,
&editor,
&buffer,
);
}
}
Event::Command(cmd) if cmd.is(LAPCE_UI_COMMAND) => {
let command = cmd.get_unchecked(LAPCE_UI_COMMAND);
if let LapceUICommand::UpdateWindowOrigin = command {
let window_origin = ctx.window_origin();
let editor =
data.main_split.editors.get_mut(&self.view_id).unwrap();
if editor.window_origin != window_origin {
Arc::make_mut(editor).window_origin = window_origin;
}
}
}
_ => (),
}
}
fn lifecycle(
&mut self,
ctx: &mut LifeCycleCtx,
event: &LifeCycle,
data: &LapceTabData,
_env: &Env,
) {
if let LifeCycle::Internal(InternalLifeCycle::ParentWindowOrigin) = event {
let editor = data.main_split.editors.get(&self.view_id).unwrap();
if ctx.window_origin() != editor.window_origin {
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateWindowOrigin,
Target::Widget(editor.view_id),
))
}
}
}
fn update(
&mut self,
_ctx: &mut UpdateCtx,
_old_data: &LapceTabData,
_data: &LapceTabData,
_env: &Env,
) {
// let buffer = &data.buffer;
// let old_buffer = &old_data.buffer;
// let line_height = data.config.editor.line_height as f64;
// if data.editor.size != old_data.editor.size {
// ctx.request_paint();
// return;
// }
// if !old_buffer.same(buffer) {
// if buffer.max_len != old_buffer.max_len
// || buffer.num_lines != old_buffer.num_lines
// {
// ctx.request_layout();
// ctx.request_paint();
// return;
// }
// if !buffer.styles.same(&old_buffer.styles) {
// ctx.request_paint();
// }
// if buffer.rev != old_buffer.rev {
// ctx.request_paint();
// }
// }
// if old_data.editor.cursor != data.editor.cursor {
// ctx.request_paint();
// }
// if old_data.current_code_actions().is_some()
// != data.current_code_actions().is_some()
// {
// ctx.request_paint();
// }
// if old_data.on_diagnostic() != data.on_diagnostic() {
// ctx.request_paint();
// }
// if old_data.diagnostics.len() != data.diagnostics.len() {
// ctx.request_paint();
// }
// if (*old_data.main_split.active == self.view_id
// && *data.main_split.active != self.view_id)
// || (*old_data.main_split.active != self.view_id
// && *data.main_split.active == self.view_id)
// {
// ctx.request_paint();
// }
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &LapceTabData,
env: &Env,
) -> Size {
let editor_data = data.editor_view_content(self.view_id);
Self::get_size(&editor_data, ctx.text(), bc.max(), &data.panels, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &LapceTabData, env: &Env) {
let is_focused = data.focus == self.view_id;
let data = data.editor_view_content(self.view_id);
Self::paint_content(&data, ctx, is_focused, self.placeholder.as_ref(), env);
}
}
#[derive(Clone)]
pub struct RegisterContent {
#[allow(dead_code)]
kind: VisualMode,
#[allow(dead_code)]
content: Vec<String>,
}
#[allow(dead_code)]
struct EditorTextLayout {
layout: TextLayout<String>,
text: String,
}
#[derive(Clone)]
pub struct HighlightTextLayout {
pub layout: PietTextLayout,
pub text: String,
pub highlights: Vec<(usize, usize, String)>,
}
#[allow(dead_code)]
fn get_workspace_edit_edits<'a>(
url: &Url,
workspace_edit: &'a WorkspaceEdit,
) -> Option<Vec<&'a TextEdit>> {
match get_workspace_edit_changes_edits(url, workspace_edit) {
Some(x) => Some(x),
None => get_workspace_edit_document_changes_edits(url, workspace_edit),
}
}
fn get_workspace_edit_changes_edits<'a>(
url: &Url,
workspace_edit: &'a WorkspaceEdit,
) -> Option<Vec<&'a TextEdit>> {
let changes = workspace_edit.changes.as_ref()?;
changes.get(url).map(|c| c.iter().collect())
}
fn get_workspace_edit_document_changes_edits<'a>(
url: &Url,
workspace_edit: &'a WorkspaceEdit,
) -> Option<Vec<&'a TextEdit>> {
let changes = workspace_edit.document_changes.as_ref()?;
match changes {
DocumentChanges::Edits(edits) => {
for edit in edits {
if &edit.text_document.uri == url {
let e = edit
.edits
.iter()
.filter_map(|e| match e {
lsp_types::OneOf::Left(edit) => Some(edit),
lsp_types::OneOf::Right(_) => None,
})
.collect();
return Some(e);
}
}
None
}
DocumentChanges::Operations(_) => None,
}
}
#[allow(dead_code)]
fn str_is_pair_right(c: &str) -> bool {
if c.chars().count() == 1 {
let c = c.chars().next().unwrap();
return !matching_pair_direction(c).unwrap_or(true);
}
false
}
| 40.153239 | 95 | 0.390093 |
877c03d2a7318cf429b523bc69be8c041559a1e2 | 456 | struct Context<'s>(&'s str);
// lifetime subtype: s is a lifetime subtype of c
struct Parser<'c, 's: 'c> {
// note here ---------| a lifetime is needed
// |
context: &'c Context<'s>,
}
impl<'c, 's> Parser<'c, 's> {
fn parse(&self) -> Result<(), &'s str> {
Err(&self.context.0[1..])
}
}
fn parse_context(context: Context) -> Result<(), &str> {
Parser { context: &context }.parse()
}
fn main() {
} | 19.826087 | 56 | 0.506579 |
508500c4315609d982155fab078af63ba06b448d | 2,180 |
use std::old_io::{File,IoResult,Reader,Writer};
use std::old_path::Path;
use Mesh;
use mesh::Facet;
use vector::Vector3D;
pub struct POV;
impl POV {
pub fn write(mesh: &Mesh, file_name: &str) -> IoResult<()> {
let out_file_name = file_name.replace("stl", "inc");
let path = Path::new(out_file_name.clone());
let mut file = match File::create(&path) {
Err(why) => { return Err(why); },
Ok(file) => file
};
//let modelname = path.file_name();
let modelname = "m_model";
try!(file.write_str(&format!("// Source file: {}\n", file_name)));
try!(file.write_str(&format!("# declare {} = mesh {{\n", modelname)));
for facet in mesh.facets.iter() {
match file.write_str(&POV::facet_to_povstring(&mesh, facet)) {
Err(why) => { return Err(why); },
Ok(_) => print!(".")
}
}
println!("done.");
try!(file.write_str("}\n"));
let template = POV::read_template();
let first_pass = template.replace("FILE_NAME", &out_file_name);
let second_pass = first_pass.replace("MODEL_NAME", modelname);
let modelfilename = file_name.replace("stl", "pov");
let modelpath = Path::new(modelfilename);
let mut modelfile = match File::create(&modelpath) {
Err(why) => { return Err(why); },
Ok(file) => file
};
try!(modelfile.write_str(&second_pass));
Ok(())
}
fn facet_to_povstring(mesh: &Mesh, facet: &Facet) -> String {
let v1 = mesh.vertices[facet.v1];
let v2 = mesh.vertices[facet.v2];
let v3 = mesh.vertices[facet.v3];
format!(" triangle {{\n {},\n {},\n {}\n }}\n",
POV::vertex_to_povstring(v1),
POV::vertex_to_povstring(v2),
POV::vertex_to_povstring(v3))
}
fn vertex_to_povstring(vector: Vector3D) -> String {
format!(" <{}, {}, {}>", vector.y, vector.x, vector.z)
}
fn read_template() -> String {
File::open(&Path::new("templates/model.pov")).read_to_string().unwrap()
}
}
| 31.142857 | 82 | 0.538991 |
895a8b95a045a1818ff74fbcaf28ab93f4975f2c | 8,753 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Defines physical expressions for `first_value`, `last_value`, and `nth_value`
//! that can evaluated at runtime during query execution
use crate::error::{DataFusionError, Result};
use crate::physical_plan::PhysicalExpr;
use crate::scalar::ScalarValue;
use arrow::array::{new_null_array, ArrayRef};
use arrow::compute::kernels::window::shift;
use arrow::datatypes::{DataType, Field};
use arrow::record_batch::RecordBatch;
use datafusion_physical_expr::window::BuiltInWindowFunctionExpr;
use datafusion_physical_expr::window::PartitionEvaluator;
use std::any::Any;
use std::iter;
use std::ops::Range;
use std::sync::Arc;
/// nth_value kind
#[derive(Debug, Copy, Clone)]
enum NthValueKind {
First,
Last,
Nth(u32),
}
/// nth_value expression
#[derive(Debug)]
pub struct NthValue {
name: String,
expr: Arc<dyn PhysicalExpr>,
data_type: DataType,
kind: NthValueKind,
}
impl NthValue {
/// Create a new FIRST_VALUE window aggregate function
pub fn first(
name: impl Into<String>,
expr: Arc<dyn PhysicalExpr>,
data_type: DataType,
) -> Self {
Self {
name: name.into(),
expr,
data_type,
kind: NthValueKind::First,
}
}
/// Create a new LAST_VALUE window aggregate function
pub fn last(
name: impl Into<String>,
expr: Arc<dyn PhysicalExpr>,
data_type: DataType,
) -> Self {
Self {
name: name.into(),
expr,
data_type,
kind: NthValueKind::Last,
}
}
/// Create a new NTH_VALUE window aggregate function
pub fn nth(
name: impl Into<String>,
expr: Arc<dyn PhysicalExpr>,
data_type: DataType,
n: u32,
) -> Result<Self> {
match n {
0 => Err(DataFusionError::Execution(
"nth_value expect n to be > 0".to_owned(),
)),
_ => Ok(Self {
name: name.into(),
expr,
data_type,
kind: NthValueKind::Nth(n),
}),
}
}
}
impl BuiltInWindowFunctionExpr for NthValue {
/// Return a reference to Any that can be used for downcasting
fn as_any(&self) -> &dyn Any {
self
}
fn field(&self) -> Result<Field> {
let nullable = true;
Ok(Field::new(&self.name, self.data_type.clone(), nullable))
}
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![self.expr.clone()]
}
fn name(&self) -> &str {
&self.name
}
fn create_evaluator(
&self,
batch: &RecordBatch,
) -> Result<Box<dyn PartitionEvaluator>> {
let values = self
.expressions()
.iter()
.map(|e| e.evaluate(batch))
.map(|r| r.map(|v| v.into_array(batch.num_rows())))
.collect::<Result<Vec<_>>>()?;
Ok(Box::new(NthValueEvaluator {
kind: self.kind,
values,
}))
}
}
/// Value evaluator for nth_value functions
pub(crate) struct NthValueEvaluator {
kind: NthValueKind,
values: Vec<ArrayRef>,
}
impl PartitionEvaluator for NthValueEvaluator {
fn include_rank(&self) -> bool {
true
}
fn evaluate_partition(&self, _partition: Range<usize>) -> Result<ArrayRef> {
unreachable!("first, last, and nth_value evaluation must be called with evaluate_partition_with_rank")
}
fn evaluate_partition_with_rank(
&self,
partition: Range<usize>,
ranks_in_partition: &[Range<usize>],
) -> Result<ArrayRef> {
let arr = &self.values[0];
let num_rows = partition.end - partition.start;
match self.kind {
NthValueKind::First => {
let value = ScalarValue::try_from_array(arr, partition.start)?;
Ok(value.to_array_of_size(num_rows))
}
NthValueKind::Last => {
// because the default window frame is between unbounded preceding and current
// row with peer evaluation, hence the last rows expands until the end of the peers
let values = ranks_in_partition
.iter()
.map(|range| {
let len = range.end - range.start;
let value = ScalarValue::try_from_array(arr, range.end - 1)?;
Ok(iter::repeat(value).take(len))
})
.collect::<Result<Vec<_>>>()?
.into_iter()
.flatten();
ScalarValue::iter_to_array(values)
}
NthValueKind::Nth(n) => {
let index = (n as usize) - 1;
if index >= num_rows {
Ok(new_null_array(arr.data_type(), num_rows))
} else {
let value =
ScalarValue::try_from_array(arr, partition.start + index)?;
let arr = value.to_array_of_size(num_rows);
// because the default window frame is between unbounded preceding and current
// row, hence the shift because for values with indices < index they should be
// null. This changes when window frames other than default is implemented
shift(arr.as_ref(), index as i64).map_err(DataFusionError::ArrowError)
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
use crate::from_slice::FromSlice;
use crate::physical_plan::expressions::Column;
use arrow::record_batch::RecordBatch;
use arrow::{array::*, datatypes::*};
fn test_i32_result(expr: NthValue, expected: Int32Array) -> Result<()> {
let arr: ArrayRef =
Arc::new(Int32Array::from_slice(&[1, -2, 3, -4, 5, -6, 7, 8]));
let values = vec![arr];
let schema = Schema::new(vec![Field::new("arr", DataType::Int32, false)]);
let batch = RecordBatch::try_new(Arc::new(schema), values.clone())?;
let result = expr
.create_evaluator(&batch)?
.evaluate_with_rank(vec![0..8], vec![0..8])?;
assert_eq!(1, result.len());
let result = result[0].as_any().downcast_ref::<Int32Array>().unwrap();
assert_eq!(expected, *result);
Ok(())
}
#[test]
fn first_value() -> Result<()> {
let first_value = NthValue::first(
"first_value".to_owned(),
Arc::new(Column::new("arr", 0)),
DataType::Int32,
);
test_i32_result(first_value, Int32Array::from_iter_values(vec![1; 8]))?;
Ok(())
}
#[test]
fn last_value() -> Result<()> {
let last_value = NthValue::last(
"last_value".to_owned(),
Arc::new(Column::new("arr", 0)),
DataType::Int32,
);
test_i32_result(last_value, Int32Array::from_iter_values(vec![8; 8]))?;
Ok(())
}
#[test]
fn nth_value_1() -> Result<()> {
let nth_value = NthValue::nth(
"nth_value".to_owned(),
Arc::new(Column::new("arr", 0)),
DataType::Int32,
1,
)?;
test_i32_result(nth_value, Int32Array::from_iter_values(vec![1; 8]))?;
Ok(())
}
#[test]
fn nth_value_2() -> Result<()> {
let nth_value = NthValue::nth(
"nth_value".to_owned(),
Arc::new(Column::new("arr", 0)),
DataType::Int32,
2,
)?;
test_i32_result(
nth_value,
Int32Array::from(vec![
None,
Some(-2),
Some(-2),
Some(-2),
Some(-2),
Some(-2),
Some(-2),
Some(-2),
]),
)?;
Ok(())
}
}
| 31.260714 | 110 | 0.55124 |
647edfe244a6fff335ee7e4a03c4fed0f73e49c2 | 5,486 |
///
/// Declares [eager!](macro.eager.html)-enabled macros.
///
/// # Usage
///
/// Wraps the usual `macro_rules!` syntax. First an identifier must be given, preceded by '$'.
/// Then any number of macro declarations can be given using the usual `macro_rules!` syntax.
/// Documentation and attributes are also given in the
/// usual way just before each `macro_rules!`, i.e. inside `eager_macro_rules!`.
///
/// Some restrictions apply to the `macro_rules!` declarations:
///
/// * The identifier given at the beginning must not collide with any macro variable name
/// used in any rule in any macro to be declared.
/// * No rules should accept `@eager` as the first token, as this could conflict with the
/// implementation of `eager!`. Wildcards are acceptable, as `eager_macro_rules!` will automatically
/// resolve the ambiguity with the `eager!` implementation.
///
/// # `eager!`-enabling example
///
/// [eager!](macro.eager.html)-enabling the following macro:
/// ```
/// /// Some documentation
/// #[macro_export]
/// macro_rules! some_macro{
/// ()=>{};
/// }
/// ```
/// is done by wrapping it in `eager_macro_rules!` as follows:
/// ```
/// #[macro_use] extern crate dmutil;
/// eager_macro_rules!{ $eager_1
/// /// Some documentation
/// #[macro_export]
/// macro_rules! some_macro{
/// ()=>{};
/// }
/// }
/// ```
/// where `()=>{};` is the list of rules that comprise the macro, and no macro variable is called
/// `$eager_1`.
///
#[macro_export]
macro_rules! eager_macro_rules{
// Start by decoding the initial values
(
$dollar1:tt $id_1:ident
$(
$(#[$($metas:tt)*])*
macro_rules! $macro_name:ident {
$($rules:tt => $expansions:tt);* $(;)*
}
)+
)=>{
$(
eager_macro_rules_internal!{
@first[
$(#[$($metas)*])*
$macro_name $dollar1 $id_1
]
$($rules => $expansions)*
}
)+
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! eager_macro_rules_internal{
// If there are no more rules, finish
(
@first[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$($prev_grammar:tt => $prev_expansion:tt)*
]
) => {
eager_macro_rules_internal!{
@final[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$($prev_grammar => $prev_expansion)*
]
}
};
//Handle the 3 different block type before the '=>'
(
@first[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$($prev_grammar:tt => $prev_expansion:tt)*
]
{$($next_grammar:tt)*} $($rest:tt)+
) => {
eager_macro_rules_internal!{
@expansion[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$($prev_grammar => $prev_expansion)*
[$($next_grammar)*]
]
$($rest)+
}
};
(
@first[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$($prev_grammar:tt => $prev_expansion:tt)*
]
($($next_grammar:tt)*) $($rest:tt)+
) => {
eager_macro_rules_internal!{
@expansion[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$($prev_grammar => $prev_expansion)*
[$($next_grammar)*]
]
$($rest)+
}
};
(
@first[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$($prev_grammar:tt => $prev_expansion:tt)*
]
[$($next_grammar:tt)*] $($rest:tt)+
) => {
eager_macro_rules_internal!{
@expansion[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$($prev_grammar => $prev_expansion)*
[$($next_grammar)*]
]
$($rest)+
}
};
// Handle the 3 different block types after the '=>'
(
@expansion[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$({$($prev_grammar:tt)*} => $prev_expansion:tt)*
[$($next_grammar:tt)*]
]
=> {$($next_expansion:tt)*} $($rest:tt)*
) => {
eager_macro_rules_internal!{
@first[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$({$($prev_grammar)*} => $prev_expansion)*
{$($next_grammar)*} => {$($next_expansion)*}
]
$($rest)*
}
};
(
@expansion[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$({$($prev_grammar:tt)*} => $prev_expansion:tt)*
[$($next_grammar:tt)*]
]
=> ($($next_expansion:tt)*) $($rest:tt)*
) => {
eager_macro_rules_internal!{
@first[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$({$($prev_grammar)*} => $prev_expansion)*
{$($next_grammar)*} => {$($next_expansion)*}
]
$($rest)*
}
};
(
@expansion[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$({$($prev_grammar:tt)*} => $prev_expansion:tt)*
[$($next_grammar:tt)*]
]
=> [$($next_expansion:tt)*] $($rest:tt)*
) => {
eager_macro_rules_internal!{
@first[
$(#[$($metas)*])*
$macro_name$dollar1 $id_1
$({$($prev_grammar)*} => $prev_expansion)*
{$($next_grammar)*} => {$($next_expansion)*}
]
$($rest)*
}
};
// Output
( @final[
$(#[$($metas:tt)*])*
$macro_name:ident $dollar1:tt $id_1:ident
$({$($rules_grammar:tt)*} => {$($rules_expansion:tt)*})+
]
)=>{
$(#[$($metas)*])*
macro_rules! $macro_name{
$(
// First the eager supporting version
{
@eager[$dollar1($dollar1 $id_1:tt)*]
$($rules_grammar)*
} => {
eager_internal!{
@from_macro[$dollar1($dollar1 $id_1)*]
$($rules_expansion)*
}
};
)+
$(
// Then the pure version. We put the pure versions
// last such that if it contains a '$($all:tt)*' rule,
// the pure version will not catch an eager call.
{$($rules_grammar)*} => {$($rules_expansion)*};
)+
}
};
}
| 23.147679 | 100 | 0.572731 |
ede3c75e5c13a56a12039b9d8e94ac96529b4275 | 594 | #![no_std]
pub extern crate atmega168_hal as hal;
/// See [`avr_device::entry`](https://docs.rs/avr-device/latest/avr_device/attr.entry.html).
#[cfg(feature = "rt")]
pub use hal::entry;
mod pins;
pub use crate::atmega168::Peripherals;
pub use crate::pins::*;
pub use atmega168_hal::adc;
pub use atmega168_hal::atmega168;
pub use atmega168_hal::prelude;
pub use atmega168_hal::pwm;
pub use atmega168_hal::spi;
pub type Delay = hal::delay::Delay<hal::clock::MHz16>;
pub type Serial<IMODE> = hal::usart::Usart0<hal::clock::MHz16, IMODE>;
pub type I2c<M> = hal::i2c::I2c<hal::clock::MHz16, M>;
| 28.285714 | 92 | 0.717172 |
142a6fac3035d2c0b31ee0e7711c6a09a32806db | 51,106 | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::AUX1 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `TMRB1EN23`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRB1EN23R {
#[doc = "Disable enhanced functions. value."]
DIS,
#[doc = "Enable enhanced functions. value."]
EN,
}
impl TMRB1EN23R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRB1EN23R::DIS => true,
TMRB1EN23R::EN => false,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRB1EN23R {
match value {
true => TMRB1EN23R::DIS,
false => TMRB1EN23R::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRB1EN23R::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == TMRB1EN23R::EN
}
}
#[doc = "Possible values of the field `TMRB1POL23`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRB1POL23R {
#[doc = "Upper output normal polarity value."]
NORM,
#[doc = "Upper output inverted polarity. value."]
INV,
}
impl TMRB1POL23R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRB1POL23R::NORM => false,
TMRB1POL23R::INV => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRB1POL23R {
match value {
false => TMRB1POL23R::NORM,
true => TMRB1POL23R::INV,
}
}
#[doc = "Checks if the value of the field is `NORM`"]
#[inline]
pub fn is_norm(&self) -> bool {
*self == TMRB1POL23R::NORM
}
#[doc = "Checks if the value of the field is `INV`"]
#[inline]
pub fn is_inv(&self) -> bool {
*self == TMRB1POL23R::INV
}
}
#[doc = "Possible values of the field `TMRB1TINV`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRB1TINVR {
#[doc = "Disable invert on trigger value."]
DIS,
#[doc = "Enable invert on trigger value."]
EN,
}
impl TMRB1TINVR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRB1TINVR::DIS => false,
TMRB1TINVR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRB1TINVR {
match value {
false => TMRB1TINVR::DIS,
true => TMRB1TINVR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRB1TINVR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == TMRB1TINVR::EN
}
}
#[doc = "Possible values of the field `TMRB1NOSYNC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRB1NOSYNCR {
#[doc = "Synchronization on source clock value."]
DIS,
#[doc = "No synchronization on source clock value."]
NOSYNC,
}
impl TMRB1NOSYNCR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRB1NOSYNCR::DIS => false,
TMRB1NOSYNCR::NOSYNC => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRB1NOSYNCR {
match value {
false => TMRB1NOSYNCR::DIS,
true => TMRB1NOSYNCR::NOSYNC,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRB1NOSYNCR::DIS
}
#[doc = "Checks if the value of the field is `NOSYNC`"]
#[inline]
pub fn is_nosync(&self) -> bool {
*self == TMRB1NOSYNCR::NOSYNC
}
}
#[doc = "Possible values of the field `TMRB1TRIG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRB1TRIGR {
#[doc = "Trigger source is disabled. value."]
DIS,
#[doc = "Trigger source is CTIMERA1 OUT. value."]
A1OUT,
#[doc = "Trigger source is CTIMERB3 OUT. value."]
B3OUT,
#[doc = "Trigger source is CTIMERA3 OUT. value."]
A3OUT,
#[doc = "Trigger source is CTIMERA6 OUT. value."]
A6OUT,
#[doc = "Trigger source is CTIMERB6 OUT. value."]
B6OUT,
#[doc = "Trigger source is CTIMERA0 OUT. value."]
A0OUT,
#[doc = "Trigger source is CTIMERB0 OUT. value."]
B0OUT,
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
B3OUT2,
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
A3OUT2,
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
A4OUT2,
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
B4OUT2,
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
A6OUT2DUAL,
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
A7OUT2DUAL,
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
B5OUT2DUAL,
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
A5OUT2DUAL,
}
impl TMRB1TRIGR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
TMRB1TRIGR::DIS => 0,
TMRB1TRIGR::A1OUT => 1,
TMRB1TRIGR::B3OUT => 2,
TMRB1TRIGR::A3OUT => 3,
TMRB1TRIGR::A6OUT => 4,
TMRB1TRIGR::B6OUT => 5,
TMRB1TRIGR::A0OUT => 6,
TMRB1TRIGR::B0OUT => 7,
TMRB1TRIGR::B3OUT2 => 8,
TMRB1TRIGR::A3OUT2 => 9,
TMRB1TRIGR::A4OUT2 => 10,
TMRB1TRIGR::B4OUT2 => 11,
TMRB1TRIGR::A6OUT2DUAL => 12,
TMRB1TRIGR::A7OUT2DUAL => 13,
TMRB1TRIGR::B5OUT2DUAL => 14,
TMRB1TRIGR::A5OUT2DUAL => 15,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> TMRB1TRIGR {
match value {
0 => TMRB1TRIGR::DIS,
1 => TMRB1TRIGR::A1OUT,
2 => TMRB1TRIGR::B3OUT,
3 => TMRB1TRIGR::A3OUT,
4 => TMRB1TRIGR::A6OUT,
5 => TMRB1TRIGR::B6OUT,
6 => TMRB1TRIGR::A0OUT,
7 => TMRB1TRIGR::B0OUT,
8 => TMRB1TRIGR::B3OUT2,
9 => TMRB1TRIGR::A3OUT2,
10 => TMRB1TRIGR::A4OUT2,
11 => TMRB1TRIGR::B4OUT2,
12 => TMRB1TRIGR::A6OUT2DUAL,
13 => TMRB1TRIGR::A7OUT2DUAL,
14 => TMRB1TRIGR::B5OUT2DUAL,
15 => TMRB1TRIGR::A5OUT2DUAL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRB1TRIGR::DIS
}
#[doc = "Checks if the value of the field is `A1OUT`"]
#[inline]
pub fn is_a1out(&self) -> bool {
*self == TMRB1TRIGR::A1OUT
}
#[doc = "Checks if the value of the field is `B3OUT`"]
#[inline]
pub fn is_b3out(&self) -> bool {
*self == TMRB1TRIGR::B3OUT
}
#[doc = "Checks if the value of the field is `A3OUT`"]
#[inline]
pub fn is_a3out(&self) -> bool {
*self == TMRB1TRIGR::A3OUT
}
#[doc = "Checks if the value of the field is `A6OUT`"]
#[inline]
pub fn is_a6out(&self) -> bool {
*self == TMRB1TRIGR::A6OUT
}
#[doc = "Checks if the value of the field is `B6OUT`"]
#[inline]
pub fn is_b6out(&self) -> bool {
*self == TMRB1TRIGR::B6OUT
}
#[doc = "Checks if the value of the field is `A0OUT`"]
#[inline]
pub fn is_a0out(&self) -> bool {
*self == TMRB1TRIGR::A0OUT
}
#[doc = "Checks if the value of the field is `B0OUT`"]
#[inline]
pub fn is_b0out(&self) -> bool {
*self == TMRB1TRIGR::B0OUT
}
#[doc = "Checks if the value of the field is `B3OUT2`"]
#[inline]
pub fn is_b3out2(&self) -> bool {
*self == TMRB1TRIGR::B3OUT2
}
#[doc = "Checks if the value of the field is `A3OUT2`"]
#[inline]
pub fn is_a3out2(&self) -> bool {
*self == TMRB1TRIGR::A3OUT2
}
#[doc = "Checks if the value of the field is `A4OUT2`"]
#[inline]
pub fn is_a4out2(&self) -> bool {
*self == TMRB1TRIGR::A4OUT2
}
#[doc = "Checks if the value of the field is `B4OUT2`"]
#[inline]
pub fn is_b4out2(&self) -> bool {
*self == TMRB1TRIGR::B4OUT2
}
#[doc = "Checks if the value of the field is `A6OUT2DUAL`"]
#[inline]
pub fn is_a6out2dual(&self) -> bool {
*self == TMRB1TRIGR::A6OUT2DUAL
}
#[doc = "Checks if the value of the field is `A7OUT2DUAL`"]
#[inline]
pub fn is_a7out2dual(&self) -> bool {
*self == TMRB1TRIGR::A7OUT2DUAL
}
#[doc = "Checks if the value of the field is `B5OUT2DUAL`"]
#[inline]
pub fn is_b5out2dual(&self) -> bool {
*self == TMRB1TRIGR::B5OUT2DUAL
}
#[doc = "Checks if the value of the field is `A5OUT2DUAL`"]
#[inline]
pub fn is_a5out2dual(&self) -> bool {
*self == TMRB1TRIGR::A5OUT2DUAL
}
}
#[doc = r" Value of the field"]
pub struct TMRB1LMTR {
bits: u8,
}
impl TMRB1LMTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = "Possible values of the field `TMRA1EN23`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRA1EN23R {
#[doc = "Disable enhanced functions. value."]
DIS,
#[doc = "Enable enhanced functions. value."]
EN,
}
impl TMRA1EN23R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRA1EN23R::DIS => true,
TMRA1EN23R::EN => false,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRA1EN23R {
match value {
true => TMRA1EN23R::DIS,
false => TMRA1EN23R::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRA1EN23R::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == TMRA1EN23R::EN
}
}
#[doc = "Possible values of the field `TMRA1POL23`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRA1POL23R {
#[doc = "Upper output normal polarity value."]
NORMAL,
#[doc = "Upper output inverted polarity. value."]
INV,
}
impl TMRA1POL23R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRA1POL23R::NORMAL => false,
TMRA1POL23R::INV => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRA1POL23R {
match value {
false => TMRA1POL23R::NORMAL,
true => TMRA1POL23R::INV,
}
}
#[doc = "Checks if the value of the field is `NORMAL`"]
#[inline]
pub fn is_normal(&self) -> bool {
*self == TMRA1POL23R::NORMAL
}
#[doc = "Checks if the value of the field is `INV`"]
#[inline]
pub fn is_inv(&self) -> bool {
*self == TMRA1POL23R::INV
}
}
#[doc = "Possible values of the field `TMRA1TINV`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRA1TINVR {
#[doc = "Disable invert on trigger value."]
DIS,
#[doc = "Enable invert on trigger value."]
EN,
}
impl TMRA1TINVR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRA1TINVR::DIS => false,
TMRA1TINVR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRA1TINVR {
match value {
false => TMRA1TINVR::DIS,
true => TMRA1TINVR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRA1TINVR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == TMRA1TINVR::EN
}
}
#[doc = "Possible values of the field `TMRA1NOSYNC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRA1NOSYNCR {
#[doc = "Synchronization on source clock value."]
DIS,
#[doc = "No synchronization on source clock value."]
NOSYNC,
}
impl TMRA1NOSYNCR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
TMRA1NOSYNCR::DIS => false,
TMRA1NOSYNCR::NOSYNC => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> TMRA1NOSYNCR {
match value {
false => TMRA1NOSYNCR::DIS,
true => TMRA1NOSYNCR::NOSYNC,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRA1NOSYNCR::DIS
}
#[doc = "Checks if the value of the field is `NOSYNC`"]
#[inline]
pub fn is_nosync(&self) -> bool {
*self == TMRA1NOSYNCR::NOSYNC
}
}
#[doc = "Possible values of the field `TMRA1TRIG`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TMRA1TRIGR {
#[doc = "Trigger source is disabled. value."]
DIS,
#[doc = "Trigger source is CTIMERB1 OUT. value."]
B1OUT,
#[doc = "Trigger source is CTIMERB3 OUT. value."]
B3OUT,
#[doc = "Trigger source is CTIMERA3 OUT. value."]
A3OUT,
#[doc = "Trigger source is CTIMERA0 OUT. value."]
A0OUT,
#[doc = "Trigger source is CTIMERB0 OUT. value."]
B0OUT,
#[doc = "Trigger source is CTIMERA5 OUT. value."]
A5OUT,
#[doc = "Trigger source is CTIMERB5 OUT. value."]
B5OUT,
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
B3OUT2,
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
A3OUT2,
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
A4OUT2,
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
B4OUT2,
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
A6OUT2DUAL,
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
A7OUT2DUAL,
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
B5OUT2DUAL,
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
A5OUT2DUAL,
}
impl TMRA1TRIGR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
TMRA1TRIGR::DIS => 0,
TMRA1TRIGR::B1OUT => 1,
TMRA1TRIGR::B3OUT => 2,
TMRA1TRIGR::A3OUT => 3,
TMRA1TRIGR::A0OUT => 4,
TMRA1TRIGR::B0OUT => 5,
TMRA1TRIGR::A5OUT => 6,
TMRA1TRIGR::B5OUT => 7,
TMRA1TRIGR::B3OUT2 => 8,
TMRA1TRIGR::A3OUT2 => 9,
TMRA1TRIGR::A4OUT2 => 10,
TMRA1TRIGR::B4OUT2 => 11,
TMRA1TRIGR::A6OUT2DUAL => 12,
TMRA1TRIGR::A7OUT2DUAL => 13,
TMRA1TRIGR::B5OUT2DUAL => 14,
TMRA1TRIGR::A5OUT2DUAL => 15,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> TMRA1TRIGR {
match value {
0 => TMRA1TRIGR::DIS,
1 => TMRA1TRIGR::B1OUT,
2 => TMRA1TRIGR::B3OUT,
3 => TMRA1TRIGR::A3OUT,
4 => TMRA1TRIGR::A0OUT,
5 => TMRA1TRIGR::B0OUT,
6 => TMRA1TRIGR::A5OUT,
7 => TMRA1TRIGR::B5OUT,
8 => TMRA1TRIGR::B3OUT2,
9 => TMRA1TRIGR::A3OUT2,
10 => TMRA1TRIGR::A4OUT2,
11 => TMRA1TRIGR::B4OUT2,
12 => TMRA1TRIGR::A6OUT2DUAL,
13 => TMRA1TRIGR::A7OUT2DUAL,
14 => TMRA1TRIGR::B5OUT2DUAL,
15 => TMRA1TRIGR::A5OUT2DUAL,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == TMRA1TRIGR::DIS
}
#[doc = "Checks if the value of the field is `B1OUT`"]
#[inline]
pub fn is_b1out(&self) -> bool {
*self == TMRA1TRIGR::B1OUT
}
#[doc = "Checks if the value of the field is `B3OUT`"]
#[inline]
pub fn is_b3out(&self) -> bool {
*self == TMRA1TRIGR::B3OUT
}
#[doc = "Checks if the value of the field is `A3OUT`"]
#[inline]
pub fn is_a3out(&self) -> bool {
*self == TMRA1TRIGR::A3OUT
}
#[doc = "Checks if the value of the field is `A0OUT`"]
#[inline]
pub fn is_a0out(&self) -> bool {
*self == TMRA1TRIGR::A0OUT
}
#[doc = "Checks if the value of the field is `B0OUT`"]
#[inline]
pub fn is_b0out(&self) -> bool {
*self == TMRA1TRIGR::B0OUT
}
#[doc = "Checks if the value of the field is `A5OUT`"]
#[inline]
pub fn is_a5out(&self) -> bool {
*self == TMRA1TRIGR::A5OUT
}
#[doc = "Checks if the value of the field is `B5OUT`"]
#[inline]
pub fn is_b5out(&self) -> bool {
*self == TMRA1TRIGR::B5OUT
}
#[doc = "Checks if the value of the field is `B3OUT2`"]
#[inline]
pub fn is_b3out2(&self) -> bool {
*self == TMRA1TRIGR::B3OUT2
}
#[doc = "Checks if the value of the field is `A3OUT2`"]
#[inline]
pub fn is_a3out2(&self) -> bool {
*self == TMRA1TRIGR::A3OUT2
}
#[doc = "Checks if the value of the field is `A4OUT2`"]
#[inline]
pub fn is_a4out2(&self) -> bool {
*self == TMRA1TRIGR::A4OUT2
}
#[doc = "Checks if the value of the field is `B4OUT2`"]
#[inline]
pub fn is_b4out2(&self) -> bool {
*self == TMRA1TRIGR::B4OUT2
}
#[doc = "Checks if the value of the field is `A6OUT2DUAL`"]
#[inline]
pub fn is_a6out2dual(&self) -> bool {
*self == TMRA1TRIGR::A6OUT2DUAL
}
#[doc = "Checks if the value of the field is `A7OUT2DUAL`"]
#[inline]
pub fn is_a7out2dual(&self) -> bool {
*self == TMRA1TRIGR::A7OUT2DUAL
}
#[doc = "Checks if the value of the field is `B5OUT2DUAL`"]
#[inline]
pub fn is_b5out2dual(&self) -> bool {
*self == TMRA1TRIGR::B5OUT2DUAL
}
#[doc = "Checks if the value of the field is `A5OUT2DUAL`"]
#[inline]
pub fn is_a5out2dual(&self) -> bool {
*self == TMRA1TRIGR::A5OUT2DUAL
}
}
#[doc = r" Value of the field"]
pub struct TMRA1LMTR {
bits: u8,
}
impl TMRA1LMTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = "Values that can be written to the field `TMRB1EN23`"]
pub enum TMRB1EN23W {
#[doc = "Disable enhanced functions. value."]
DIS,
#[doc = "Enable enhanced functions. value."]
EN,
}
impl TMRB1EN23W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRB1EN23W::DIS => true,
TMRB1EN23W::EN => false,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRB1EN23W<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1EN23W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRB1EN23W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable enhanced functions. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRB1EN23W::DIS)
}
#[doc = "Enable enhanced functions. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(TMRB1EN23W::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRB1POL23`"]
pub enum TMRB1POL23W {
#[doc = "Upper output normal polarity value."]
NORM,
#[doc = "Upper output inverted polarity. value."]
INV,
}
impl TMRB1POL23W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRB1POL23W::NORM => false,
TMRB1POL23W::INV => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRB1POL23W<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1POL23W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRB1POL23W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Upper output normal polarity value."]
#[inline]
pub fn norm(self) -> &'a mut W {
self.variant(TMRB1POL23W::NORM)
}
#[doc = "Upper output inverted polarity. value."]
#[inline]
pub fn inv(self) -> &'a mut W {
self.variant(TMRB1POL23W::INV)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRB1TINV`"]
pub enum TMRB1TINVW {
#[doc = "Disable invert on trigger value."]
DIS,
#[doc = "Enable invert on trigger value."]
EN,
}
impl TMRB1TINVW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRB1TINVW::DIS => false,
TMRB1TINVW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRB1TINVW<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1TINVW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRB1TINVW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable invert on trigger value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRB1TINVW::DIS)
}
#[doc = "Enable invert on trigger value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(TMRB1TINVW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 28;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRB1NOSYNC`"]
pub enum TMRB1NOSYNCW {
#[doc = "Synchronization on source clock value."]
DIS,
#[doc = "No synchronization on source clock value."]
NOSYNC,
}
impl TMRB1NOSYNCW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRB1NOSYNCW::DIS => false,
TMRB1NOSYNCW::NOSYNC => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRB1NOSYNCW<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1NOSYNCW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRB1NOSYNCW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Synchronization on source clock value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRB1NOSYNCW::DIS)
}
#[doc = "No synchronization on source clock value."]
#[inline]
pub fn nosync(self) -> &'a mut W {
self.variant(TMRB1NOSYNCW::NOSYNC)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRB1TRIG`"]
pub enum TMRB1TRIGW {
#[doc = "Trigger source is disabled. value."]
DIS,
#[doc = "Trigger source is CTIMERA1 OUT. value."]
A1OUT,
#[doc = "Trigger source is CTIMERB3 OUT. value."]
B3OUT,
#[doc = "Trigger source is CTIMERA3 OUT. value."]
A3OUT,
#[doc = "Trigger source is CTIMERA6 OUT. value."]
A6OUT,
#[doc = "Trigger source is CTIMERB6 OUT. value."]
B6OUT,
#[doc = "Trigger source is CTIMERA0 OUT. value."]
A0OUT,
#[doc = "Trigger source is CTIMERB0 OUT. value."]
B0OUT,
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
B3OUT2,
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
A3OUT2,
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
A4OUT2,
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
B4OUT2,
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
A6OUT2DUAL,
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
A7OUT2DUAL,
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
B5OUT2DUAL,
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
A5OUT2DUAL,
}
impl TMRB1TRIGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
TMRB1TRIGW::DIS => 0,
TMRB1TRIGW::A1OUT => 1,
TMRB1TRIGW::B3OUT => 2,
TMRB1TRIGW::A3OUT => 3,
TMRB1TRIGW::A6OUT => 4,
TMRB1TRIGW::B6OUT => 5,
TMRB1TRIGW::A0OUT => 6,
TMRB1TRIGW::B0OUT => 7,
TMRB1TRIGW::B3OUT2 => 8,
TMRB1TRIGW::A3OUT2 => 9,
TMRB1TRIGW::A4OUT2 => 10,
TMRB1TRIGW::B4OUT2 => 11,
TMRB1TRIGW::A6OUT2DUAL => 12,
TMRB1TRIGW::A7OUT2DUAL => 13,
TMRB1TRIGW::B5OUT2DUAL => 14,
TMRB1TRIGW::A5OUT2DUAL => 15,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRB1TRIGW<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1TRIGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRB1TRIGW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Trigger source is disabled. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRB1TRIGW::DIS)
}
#[doc = "Trigger source is CTIMERA1 OUT. value."]
#[inline]
pub fn a1out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A1OUT)
}
#[doc = "Trigger source is CTIMERB3 OUT. value."]
#[inline]
pub fn b3out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B3OUT)
}
#[doc = "Trigger source is CTIMERA3 OUT. value."]
#[inline]
pub fn a3out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A3OUT)
}
#[doc = "Trigger source is CTIMERA6 OUT. value."]
#[inline]
pub fn a6out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A6OUT)
}
#[doc = "Trigger source is CTIMERB6 OUT. value."]
#[inline]
pub fn b6out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B6OUT)
}
#[doc = "Trigger source is CTIMERA0 OUT. value."]
#[inline]
pub fn a0out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A0OUT)
}
#[doc = "Trigger source is CTIMERB0 OUT. value."]
#[inline]
pub fn b0out(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B0OUT)
}
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
#[inline]
pub fn b3out2(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B3OUT2)
}
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
#[inline]
pub fn a3out2(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A3OUT2)
}
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
#[inline]
pub fn a4out2(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A4OUT2)
}
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
#[inline]
pub fn b4out2(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B4OUT2)
}
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
#[inline]
pub fn a6out2dual(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A6OUT2DUAL)
}
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
#[inline]
pub fn a7out2dual(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A7OUT2DUAL)
}
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
#[inline]
pub fn b5out2dual(self) -> &'a mut W {
self.variant(TMRB1TRIGW::B5OUT2DUAL)
}
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
#[inline]
pub fn a5out2dual(self) -> &'a mut W {
self.variant(TMRB1TRIGW::A5OUT2DUAL)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 23;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TMRB1LMTW<'a> {
w: &'a mut W,
}
impl<'a> _TMRB1LMTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 63;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRA1EN23`"]
pub enum TMRA1EN23W {
#[doc = "Disable enhanced functions. value."]
DIS,
#[doc = "Enable enhanced functions. value."]
EN,
}
impl TMRA1EN23W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRA1EN23W::DIS => true,
TMRA1EN23W::EN => false,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRA1EN23W<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1EN23W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRA1EN23W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable enhanced functions. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRA1EN23W::DIS)
}
#[doc = "Enable enhanced functions. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(TMRA1EN23W::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 14;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRA1POL23`"]
pub enum TMRA1POL23W {
#[doc = "Upper output normal polarity value."]
NORMAL,
#[doc = "Upper output inverted polarity. value."]
INV,
}
impl TMRA1POL23W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRA1POL23W::NORMAL => false,
TMRA1POL23W::INV => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRA1POL23W<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1POL23W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRA1POL23W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Upper output normal polarity value."]
#[inline]
pub fn normal(self) -> &'a mut W {
self.variant(TMRA1POL23W::NORMAL)
}
#[doc = "Upper output inverted polarity. value."]
#[inline]
pub fn inv(self) -> &'a mut W {
self.variant(TMRA1POL23W::INV)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRA1TINV`"]
pub enum TMRA1TINVW {
#[doc = "Disable invert on trigger value."]
DIS,
#[doc = "Enable invert on trigger value."]
EN,
}
impl TMRA1TINVW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRA1TINVW::DIS => false,
TMRA1TINVW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRA1TINVW<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1TINVW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRA1TINVW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable invert on trigger value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRA1TINVW::DIS)
}
#[doc = "Enable invert on trigger value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(TMRA1TINVW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRA1NOSYNC`"]
pub enum TMRA1NOSYNCW {
#[doc = "Synchronization on source clock value."]
DIS,
#[doc = "No synchronization on source clock value."]
NOSYNC,
}
impl TMRA1NOSYNCW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
TMRA1NOSYNCW::DIS => false,
TMRA1NOSYNCW::NOSYNC => true,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRA1NOSYNCW<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1NOSYNCW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRA1NOSYNCW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Synchronization on source clock value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRA1NOSYNCW::DIS)
}
#[doc = "No synchronization on source clock value."]
#[inline]
pub fn nosync(self) -> &'a mut W {
self.variant(TMRA1NOSYNCW::NOSYNC)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `TMRA1TRIG`"]
pub enum TMRA1TRIGW {
#[doc = "Trigger source is disabled. value."]
DIS,
#[doc = "Trigger source is CTIMERB1 OUT. value."]
B1OUT,
#[doc = "Trigger source is CTIMERB3 OUT. value."]
B3OUT,
#[doc = "Trigger source is CTIMERA3 OUT. value."]
A3OUT,
#[doc = "Trigger source is CTIMERA0 OUT. value."]
A0OUT,
#[doc = "Trigger source is CTIMERB0 OUT. value."]
B0OUT,
#[doc = "Trigger source is CTIMERA5 OUT. value."]
A5OUT,
#[doc = "Trigger source is CTIMERB5 OUT. value."]
B5OUT,
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
B3OUT2,
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
A3OUT2,
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
A4OUT2,
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
B4OUT2,
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
A6OUT2DUAL,
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
A7OUT2DUAL,
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
B5OUT2DUAL,
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
A5OUT2DUAL,
}
impl TMRA1TRIGW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
TMRA1TRIGW::DIS => 0,
TMRA1TRIGW::B1OUT => 1,
TMRA1TRIGW::B3OUT => 2,
TMRA1TRIGW::A3OUT => 3,
TMRA1TRIGW::A0OUT => 4,
TMRA1TRIGW::B0OUT => 5,
TMRA1TRIGW::A5OUT => 6,
TMRA1TRIGW::B5OUT => 7,
TMRA1TRIGW::B3OUT2 => 8,
TMRA1TRIGW::A3OUT2 => 9,
TMRA1TRIGW::A4OUT2 => 10,
TMRA1TRIGW::B4OUT2 => 11,
TMRA1TRIGW::A6OUT2DUAL => 12,
TMRA1TRIGW::A7OUT2DUAL => 13,
TMRA1TRIGW::B5OUT2DUAL => 14,
TMRA1TRIGW::A5OUT2DUAL => 15,
}
}
}
#[doc = r" Proxy"]
pub struct _TMRA1TRIGW<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1TRIGW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: TMRA1TRIGW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Trigger source is disabled. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(TMRA1TRIGW::DIS)
}
#[doc = "Trigger source is CTIMERB1 OUT. value."]
#[inline]
pub fn b1out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B1OUT)
}
#[doc = "Trigger source is CTIMERB3 OUT. value."]
#[inline]
pub fn b3out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B3OUT)
}
#[doc = "Trigger source is CTIMERA3 OUT. value."]
#[inline]
pub fn a3out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A3OUT)
}
#[doc = "Trigger source is CTIMERA0 OUT. value."]
#[inline]
pub fn a0out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A0OUT)
}
#[doc = "Trigger source is CTIMERB0 OUT. value."]
#[inline]
pub fn b0out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B0OUT)
}
#[doc = "Trigger source is CTIMERA5 OUT. value."]
#[inline]
pub fn a5out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A5OUT)
}
#[doc = "Trigger source is CTIMERB5 OUT. value."]
#[inline]
pub fn b5out(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B5OUT)
}
#[doc = "Trigger source is CTIMERB3 OUT2. value."]
#[inline]
pub fn b3out2(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B3OUT2)
}
#[doc = "Trigger source is CTIMERA3 OUT2. value."]
#[inline]
pub fn a3out2(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A3OUT2)
}
#[doc = "Trigger source is CTIMERA4 OUT2. value."]
#[inline]
pub fn a4out2(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A4OUT2)
}
#[doc = "Trigger source is CTIMERB4 OUT2. value."]
#[inline]
pub fn b4out2(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B4OUT2)
}
#[doc = "Trigger source is CTIMERA6 OUT2, dual edge. value."]
#[inline]
pub fn a6out2dual(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A6OUT2DUAL)
}
#[doc = "Trigger source is CTIMERA7 OUT2, dual edge. value."]
#[inline]
pub fn a7out2dual(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A7OUT2DUAL)
}
#[doc = "Trigger source is CTIMERB5 OUT2, dual edge. value."]
#[inline]
pub fn b5out2dual(self) -> &'a mut W {
self.variant(TMRA1TRIGW::B5OUT2DUAL)
}
#[doc = "Trigger source is CTIMERA5 OUT2, dual edge. value."]
#[inline]
pub fn a5out2dual(self) -> &'a mut W {
self.variant(TMRA1TRIGW::A5OUT2DUAL)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 15;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TMRA1LMTW<'a> {
w: &'a mut W,
}
impl<'a> _TMRA1LMTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 127;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 30 - Counter/Timer B1 Upper compare enable."]
#[inline]
pub fn tmrb1en23(&self) -> TMRB1EN23R {
TMRB1EN23R::_from({
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 29 - Upper output polarity"]
#[inline]
pub fn tmrb1pol23(&self) -> TMRB1POL23R {
TMRB1POL23R::_from({
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 28 - Counter/Timer B1 Invert on trigger."]
#[inline]
pub fn tmrb1tinv(&self) -> TMRB1TINVR {
TMRB1TINVR::_from({
const MASK: bool = true;
const OFFSET: u8 = 28;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 27 - Source clock synchronization control."]
#[inline]
pub fn tmrb1nosync(&self) -> TMRB1NOSYNCR {
TMRB1NOSYNCR::_from({
const MASK: bool = true;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 23:26 - Counter/Timer B1 Trigger Select."]
#[inline]
pub fn tmrb1trig(&self) -> TMRB1TRIGR {
TMRB1TRIGR::_from({
const MASK: u8 = 15;
const OFFSET: u8 = 23;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 16:21 - Counter/Timer B1 Pattern Limit Count."]
#[inline]
pub fn tmrb1lmt(&self) -> TMRB1LMTR {
let bits = {
const MASK: u8 = 63;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u8
};
TMRB1LMTR { bits }
}
#[doc = "Bit 14 - Counter/Timer A1 Upper compare enable."]
#[inline]
pub fn tmra1en23(&self) -> TMRA1EN23R {
TMRA1EN23R::_from({
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 13 - Counter/Timer A1 Upper output polarity"]
#[inline]
pub fn tmra1pol23(&self) -> TMRA1POL23R {
TMRA1POL23R::_from({
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 12 - Counter/Timer A1 Invert on trigger."]
#[inline]
pub fn tmra1tinv(&self) -> TMRA1TINVR {
TMRA1TINVR::_from({
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 11 - Source clock synchronization control."]
#[inline]
pub fn tmra1nosync(&self) -> TMRA1NOSYNCR {
TMRA1NOSYNCR::_from({
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bits 7:10 - Counter/Timer A1 Trigger Select."]
#[inline]
pub fn tmra1trig(&self) -> TMRA1TRIGR {
TMRA1TRIGR::_from({
const MASK: u8 = 15;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 0:6 - Counter/Timer A1 Pattern Limit Count."]
#[inline]
pub fn tmra1lmt(&self) -> TMRA1LMTR {
let bits = {
const MASK: u8 = 127;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
};
TMRA1LMTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 30 - Counter/Timer B1 Upper compare enable."]
#[inline]
pub fn tmrb1en23(&mut self) -> _TMRB1EN23W {
_TMRB1EN23W { w: self }
}
#[doc = "Bit 29 - Upper output polarity"]
#[inline]
pub fn tmrb1pol23(&mut self) -> _TMRB1POL23W {
_TMRB1POL23W { w: self }
}
#[doc = "Bit 28 - Counter/Timer B1 Invert on trigger."]
#[inline]
pub fn tmrb1tinv(&mut self) -> _TMRB1TINVW {
_TMRB1TINVW { w: self }
}
#[doc = "Bit 27 - Source clock synchronization control."]
#[inline]
pub fn tmrb1nosync(&mut self) -> _TMRB1NOSYNCW {
_TMRB1NOSYNCW { w: self }
}
#[doc = "Bits 23:26 - Counter/Timer B1 Trigger Select."]
#[inline]
pub fn tmrb1trig(&mut self) -> _TMRB1TRIGW {
_TMRB1TRIGW { w: self }
}
#[doc = "Bits 16:21 - Counter/Timer B1 Pattern Limit Count."]
#[inline]
pub fn tmrb1lmt(&mut self) -> _TMRB1LMTW {
_TMRB1LMTW { w: self }
}
#[doc = "Bit 14 - Counter/Timer A1 Upper compare enable."]
#[inline]
pub fn tmra1en23(&mut self) -> _TMRA1EN23W {
_TMRA1EN23W { w: self }
}
#[doc = "Bit 13 - Counter/Timer A1 Upper output polarity"]
#[inline]
pub fn tmra1pol23(&mut self) -> _TMRA1POL23W {
_TMRA1POL23W { w: self }
}
#[doc = "Bit 12 - Counter/Timer A1 Invert on trigger."]
#[inline]
pub fn tmra1tinv(&mut self) -> _TMRA1TINVW {
_TMRA1TINVW { w: self }
}
#[doc = "Bit 11 - Source clock synchronization control."]
#[inline]
pub fn tmra1nosync(&mut self) -> _TMRA1NOSYNCW {
_TMRA1NOSYNCW { w: self }
}
#[doc = "Bits 7:10 - Counter/Timer A1 Trigger Select."]
#[inline]
pub fn tmra1trig(&mut self) -> _TMRA1TRIGW {
_TMRA1TRIGW { w: self }
}
#[doc = "Bits 0:6 - Counter/Timer A1 Pattern Limit Count."]
#[inline]
pub fn tmra1lmt(&mut self) -> _TMRA1LMTW {
_TMRA1LMTW { w: self }
}
}
| 28.727375 | 65 | 0.536473 |
0117cc7db6ffd1b5aff300e826e17280ace061f1 | 1,649 | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::prelude_internal::*;
/// Methods from the [OpenThread "Radio" Module][1].
///
/// [1]: https://openthread.io/reference/group/radio-operation
pub trait Radio {
/// Functional equivalent of
/// [`otsys::otPlatRadioGetRssi`](crate::otsys::otPlatRadioGetRssi).
fn get_rssi(&self) -> Decibels;
/// Functional equivalent of
/// [`otsys::otPlatRadioGetRegion`](crate::otsys::otPlatRadioGetRegion).
fn get_region(&self) -> Result<RadioRegion>;
/// Functional equivalent of
/// [`otsys::otPlatRadioSetRegion`](crate::otsys::otPlatRadioSetRegion).
fn set_region(&self, region: RadioRegion) -> Result;
}
impl<T: Radio + Boxable> Radio for ot::Box<T> {
fn get_rssi(&self) -> Decibels {
self.as_ref().get_rssi()
}
fn get_region(&self) -> Result<RadioRegion> {
self.as_ref().get_region()
}
fn set_region(&self, region: RadioRegion) -> Result {
self.as_ref().set_region(region)
}
}
impl Radio for Instance {
fn get_rssi(&self) -> Decibels {
unsafe { otPlatRadioGetRssi(self.as_ot_ptr()) }
}
fn get_region(&self) -> Result<RadioRegion> {
let mut ret = 0u16;
Error::from(unsafe { otPlatRadioGetRegion(self.as_ot_ptr(), &mut ret as *mut u16) })
.into_result()?;
Ok(ret.into())
}
fn set_region(&self, region: RadioRegion) -> Result {
Error::from(unsafe { otPlatRadioSetRegion(self.as_ot_ptr(), region.into()) }).into()
}
}
| 30.537037 | 92 | 0.64342 |
146c7e8186d9fce1a68738e8b9055b8a16119e98 | 32,833 | use crate::{
mock_time,
types::{
arbitrary,
ids::{canister_test_id, message_test_id, subnet_test_id, user_test_id},
messages::SignedIngressBuilder,
},
};
use ic_base_types::NumSeconds;
use ic_btc_types_internal::BitcoinAdapterRequestWrapper;
use ic_ic00_types::CanisterStatusType;
use ic_registry_routing_table::{CanisterIdRange, RoutingTable};
use ic_registry_subnet_features::SubnetFeatures;
use ic_registry_subnet_type::SubnetType;
use ic_replicated_state::{
bitcoin_state::BitcoinState,
canister_state::{
execution_state::{CustomSection, CustomSectionType, WasmBinary, WasmMetadata},
testing::new_canister_queues_for_test,
QUEUE_INDEX_NONE,
},
metadata_state::Stream,
page_map::PageMap,
testing::{CanisterQueuesTesting, ReplicatedStateTesting, SystemStateTesting},
CallContext, CallOrigin, CanisterState, CanisterStatus, ExecutionState, ExportedFunctions,
InputQueueType, Memory, NumWasmPages, ReplicatedState, SchedulerState, SystemState,
};
use ic_types::messages::CallbackId;
use ic_types::methods::{Callback, WasmClosure};
use ic_types::{
messages::{Ingress, Request, RequestOrResponse},
xnet::{QueueId, StreamHeader, StreamIndex, StreamIndexedQueue},
CanisterId, ComputeAllocation, Cycles, ExecutionRound, MemoryAllocation, NumBytes, PrincipalId,
QueueIndex, SubnetId, Time,
};
use ic_wasm_types::CanisterModule;
use proptest::prelude::*;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::convert::TryFrom;
use std::sync::Arc;
const WASM_PAGE_SIZE_BYTES: usize = 65536;
const DEFAULT_FREEZE_THRESHOLD: NumSeconds = NumSeconds::new(1 << 30);
const INITIAL_CYCLES: Cycles = Cycles::new(5_000_000_000_000);
pub struct ReplicatedStateBuilder {
canisters: Vec<CanisterState>,
subnet_type: SubnetType,
subnet_id: SubnetId,
batch_time: Time,
time_of_last_allocation_charge: Time,
subnet_features: SubnetFeatures,
bitcoin_state: BitcoinState,
bitcoin_adapter_requests: Vec<BitcoinAdapterRequestWrapper>,
}
impl ReplicatedStateBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn with_subnet_id(mut self, subnet_id: SubnetId) -> Self {
self.subnet_id = subnet_id;
self
}
pub fn with_canister(mut self, canister: CanisterState) -> Self {
self.canisters.push(canister);
self
}
pub fn with_subnet_type(mut self, subnet_type: SubnetType) -> Self {
self.subnet_type = subnet_type;
self
}
pub fn with_time(mut self, time: Time) -> Self {
self.batch_time = time;
self
}
pub fn with_time_of_last_allocation(mut self, time: Time) -> Self {
self.time_of_last_allocation_charge = time;
self
}
pub fn with_subnet_features(mut self, subnet_features: SubnetFeatures) -> Self {
self.subnet_features = subnet_features;
self
}
pub fn with_bitcoin_adapter_requests(
mut self,
bitcoin_adapter_requests: Vec<BitcoinAdapterRequestWrapper>,
) -> Self {
self.bitcoin_adapter_requests = bitcoin_adapter_requests;
self
}
pub fn with_bitcoin_state(mut self, state: BitcoinState) -> Self {
self.bitcoin_state = state;
self
}
pub fn build(self) -> ReplicatedState {
let mut state =
ReplicatedState::new_rooted_at(self.subnet_id, self.subnet_type, "Initial".into());
for canister in self.canisters {
state.put_canister_state(canister);
}
let mut routing_table = RoutingTable::new();
routing_table
.insert(
CanisterIdRange {
start: CanisterId::from(0),
end: CanisterId::from(u64::MAX),
},
self.subnet_id,
)
.unwrap();
state.metadata.network_topology.routing_table = Arc::new(routing_table);
state.metadata.batch_time = self.batch_time;
state.metadata.time_of_last_allocation_charge = self.time_of_last_allocation_charge;
state.metadata.own_subnet_features = self.subnet_features;
state.put_bitcoin_state(self.bitcoin_state);
for request in self.bitcoin_adapter_requests.into_iter() {
state.push_request_bitcoin(request).unwrap();
}
state
}
}
impl Default for ReplicatedStateBuilder {
fn default() -> Self {
Self {
canisters: Vec::new(),
subnet_type: SubnetType::Application,
subnet_id: subnet_test_id(1),
batch_time: mock_time(),
time_of_last_allocation_charge: mock_time(),
subnet_features: SubnetFeatures::default(),
bitcoin_state: BitcoinState::default(),
bitcoin_adapter_requests: Vec::new(),
}
}
}
pub struct CanisterStateBuilder {
canister_id: CanisterId,
controller: PrincipalId,
cycles: Cycles,
stable_memory: Option<Vec<u8>>,
wasm: Option<Vec<u8>>,
memory_allocation: MemoryAllocation,
compute_allocation: ComputeAllocation,
ingress_queue: Vec<Ingress>,
status: CanisterStatusType,
freeze_threshold: NumSeconds,
call_contexts: Vec<CallContext>,
inputs: Vec<RequestOrResponse>,
}
impl CanisterStateBuilder {
pub fn new() -> Self {
// Initialize with sensible defaults.
Self::default()
}
pub fn with_canister_id(mut self, canister_id: CanisterId) -> Self {
self.canister_id = canister_id;
self
}
pub fn with_controller<P: Into<PrincipalId>>(mut self, controller: P) -> Self {
self.controller = controller.into();
self
}
pub fn with_stable_memory(mut self, data: Vec<u8>) -> Self {
self.stable_memory = Some(data);
self
}
pub fn with_cycles<C: Into<Cycles>>(mut self, cycles: C) -> Self {
self.cycles = cycles.into();
self
}
pub fn with_wasm(mut self, wasm: Vec<u8>) -> Self {
self.wasm = Some(wasm);
self
}
pub fn with_memory_allocation<B: Into<NumBytes>>(mut self, num_bytes: B) -> Self {
self.memory_allocation = MemoryAllocation::try_from(num_bytes.into()).unwrap();
self
}
pub fn with_compute_allocation(mut self, allocation: ComputeAllocation) -> Self {
self.compute_allocation = allocation;
self
}
pub fn with_ingress(mut self, ingress: Ingress) -> Self {
self.ingress_queue.push(ingress);
self
}
pub fn with_status(mut self, status: CanisterStatusType) -> Self {
self.status = status;
self
}
pub fn with_freezing_threshold<S: Into<NumSeconds>>(mut self, ft: S) -> Self {
self.freeze_threshold = ft.into();
self
}
pub fn with_call_context(mut self, call_context: CallContext) -> Self {
self.call_contexts.push(call_context);
self
}
pub fn with_input(mut self, input: RequestOrResponse) -> Self {
self.inputs.push(input);
self
}
pub fn with_canister_request(mut self, request: Request) -> Self {
self.inputs.push(RequestOrResponse::Request(request));
self
}
pub fn build(self) -> CanisterState {
let mut system_state = match self.status {
CanisterStatusType::Running => SystemState::new_running(
self.canister_id,
self.controller,
self.cycles,
self.freeze_threshold,
),
CanisterStatusType::Stopping => SystemState::new_stopping(
self.canister_id,
self.controller,
self.cycles,
self.freeze_threshold,
),
CanisterStatusType::Stopped => SystemState::new_stopped(
self.canister_id,
self.controller,
self.cycles,
self.freeze_threshold,
),
};
system_state.memory_allocation = self.memory_allocation;
// Add ingress messages to the canister's queues.
for ingress in self.ingress_queue.into_iter() {
system_state.queues_mut().push_ingress(ingress)
}
// Set call contexts. Because there is no way pass in a `CallContext`
// object to `CallContextManager`, we have to construct them in this
// bizarre way.
for call_context in self.call_contexts.into_iter() {
let call_context_manager = system_state.call_context_manager_mut().unwrap();
let call_context_id = call_context_manager.new_call_context(
call_context.call_origin().clone(),
call_context.available_cycles(),
call_context.time().unwrap(),
);
let call_context_in_call_context_manager = call_context_manager
.call_context_mut(call_context_id)
.unwrap();
if call_context.has_responded() {
call_context_in_call_context_manager.mark_responded();
}
if call_context.is_deleted() {
call_context_in_call_context_manager.mark_deleted();
}
}
// Add inputs to the input queue.
for input in self.inputs {
system_state
.queues_mut()
.push_input(QUEUE_INDEX_NONE, input, InputQueueType::RemoteSubnet)
.unwrap();
}
let stable_memory = if let Some(data) = self.stable_memory {
Memory::new(
PageMap::from(&data[..]),
NumWasmPages::new((data.len() / WASM_PAGE_SIZE_BYTES) as usize + 1),
)
} else {
Memory::default()
};
let execution_state = match self.wasm {
Some(wasm_binary) => {
let mut ee = initial_execution_state();
ee.wasm_binary = WasmBinary::new(CanisterModule::new(wasm_binary));
ee.stable_memory = stable_memory;
Some(ee)
}
None => None,
};
CanisterState {
system_state,
execution_state,
scheduler_state: SchedulerState {
compute_allocation: self.compute_allocation,
..SchedulerState::default()
},
}
}
}
impl Default for CanisterStateBuilder {
fn default() -> Self {
Self {
canister_id: canister_test_id(0),
controller: user_test_id(0).get(),
cycles: INITIAL_CYCLES,
stable_memory: None,
wasm: None,
memory_allocation: MemoryAllocation::BestEffort,
compute_allocation: ComputeAllocation::zero(),
ingress_queue: Vec::default(),
status: CanisterStatusType::Running,
freeze_threshold: DEFAULT_FREEZE_THRESHOLD,
call_contexts: Vec::default(),
inputs: Vec::default(),
}
}
}
pub struct SystemStateBuilder {
system_state: SystemState,
}
impl Default for SystemStateBuilder {
fn default() -> Self {
Self {
system_state: SystemState::new_running(
canister_test_id(42),
user_test_id(24).get(),
INITIAL_CYCLES,
DEFAULT_FREEZE_THRESHOLD,
),
}
}
}
impl SystemStateBuilder {
pub fn new() -> Self {
Self {
system_state: SystemState::new_running(
canister_test_id(42),
user_test_id(24).get(),
INITIAL_CYCLES,
DEFAULT_FREEZE_THRESHOLD,
),
}
}
pub fn initial_cycles(mut self, cycles: Cycles) -> Self {
*self.system_state.balance_mut() = cycles;
self
}
pub fn canister_id(mut self, canister_id: CanisterId) -> Self {
self.system_state.set_canister_id(canister_id);
self
}
pub fn memory_allocation(mut self, memory_allocation: NumBytes) -> Self {
self.system_state.memory_allocation =
MemoryAllocation::try_from(memory_allocation).unwrap();
self
}
pub fn freeze_threshold(mut self, threshold: NumSeconds) -> Self {
self.system_state.freeze_threshold = threshold;
self
}
pub fn build(self) -> SystemState {
self.system_state
}
}
pub struct CallContextBuilder {
call_origin: CallOrigin,
responded: bool,
time: Time,
}
impl CallContextBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn with_call_origin(mut self, call_origin: CallOrigin) -> Self {
self.call_origin = call_origin;
self
}
pub fn with_responded(mut self, responded: bool) -> Self {
self.responded = responded;
self
}
pub fn with_time(mut self, time: Time) -> Self {
self.time = time;
self
}
pub fn build(self) -> CallContext {
CallContext::new(
self.call_origin,
self.responded,
false,
Cycles::from(0),
self.time,
)
}
}
impl Default for CallContextBuilder {
fn default() -> Self {
Self {
call_origin: CallOrigin::Ingress(user_test_id(0), message_test_id(0)),
responded: false,
time: Time::from_nanos_since_unix_epoch(0),
}
}
}
pub fn initial_execution_state() -> ExecutionState {
let mut metadata: BTreeMap<String, CustomSection> = BTreeMap::new();
metadata.insert(
String::from("candid"),
CustomSection {
visibility: CustomSectionType::Private,
content: vec![0, 2],
},
);
metadata.insert(
String::from("dummy"),
CustomSection {
visibility: CustomSectionType::Public,
content: vec![2, 1],
},
);
let wasm_metadata = WasmMetadata::new(metadata);
ExecutionState {
canister_root: "NOT_USED".into(),
session_nonce: None,
wasm_binary: WasmBinary::new(CanisterModule::new(vec![])),
wasm_memory: Memory::default(),
stable_memory: Memory::default(),
exported_globals: vec![],
exports: ExportedFunctions::new(BTreeSet::new()),
metadata: wasm_metadata,
last_executed_round: ExecutionRound::from(0),
}
}
pub fn canister_from_exec_state(
execution_state: ExecutionState,
canister_id: CanisterId,
) -> CanisterState {
CanisterState {
system_state: SystemStateBuilder::new()
.memory_allocation(NumBytes::new(8 * 1024 * 1024 * 1024)) // 8GiB
.canister_id(canister_id)
.initial_cycles(INITIAL_CYCLES)
.build(),
execution_state: Some(execution_state),
scheduler_state: Default::default(),
}
}
pub fn get_running_canister_with_balance(
canister_id: CanisterId,
initial_balance: Cycles,
) -> CanisterState {
get_running_canister_with_args(canister_id, user_test_id(1).get(), initial_balance)
}
pub fn get_running_canister(canister_id: CanisterId) -> CanisterState {
get_running_canister_with_balance(canister_id, INITIAL_CYCLES)
}
pub fn get_running_canister_with_args(
canister_id: CanisterId,
controller: PrincipalId,
initial_cycles: Cycles,
) -> CanisterState {
CanisterState {
system_state: SystemState::new_running(
canister_id,
controller,
initial_cycles,
DEFAULT_FREEZE_THRESHOLD,
),
execution_state: None,
scheduler_state: Default::default(),
}
}
pub fn get_stopping_canister(canister_id: CanisterId) -> CanisterState {
get_stopping_canister_with_controller(canister_id, user_test_id(1).get())
}
pub fn get_stopping_canister_on_nns(canister_id: CanisterId) -> CanisterState {
get_stopping_canister_with_controller(canister_id, user_test_id(1).get())
}
pub fn get_stopping_canister_with_controller(
canister_id: CanisterId,
controller: PrincipalId,
) -> CanisterState {
CanisterState {
system_state: SystemState::new_stopping(
canister_id,
controller,
INITIAL_CYCLES,
DEFAULT_FREEZE_THRESHOLD,
),
execution_state: None,
scheduler_state: Default::default(),
}
}
pub fn get_stopped_canister_on_system_subnet(canister_id: CanisterId) -> CanisterState {
get_stopped_canister_with_controller(canister_id, user_test_id(1).get())
}
pub fn get_stopped_canister(canister_id: CanisterId) -> CanisterState {
get_stopped_canister_with_controller(canister_id, user_test_id(1).get())
}
pub fn get_stopped_canister_with_controller(
canister_id: CanisterId,
controller: PrincipalId,
) -> CanisterState {
CanisterState {
system_state: SystemState::new_stopped(
canister_id,
controller,
INITIAL_CYCLES,
DEFAULT_FREEZE_THRESHOLD,
),
execution_state: None,
scheduler_state: Default::default(),
}
}
/// Convert a running canister into a stopped canister. This functionality
/// is added here since it is only allowed in tests.
pub fn running_canister_into_stopped(mut canister: CanisterState) -> CanisterState {
canister.system_state.status = CanisterStatus::Stopped;
canister
}
/// Returns a `ReplicatedState` with SubnetType::Application, variable amount of canisters, input
/// messages per canister and methods that are to be called.
pub fn get_initial_state(canister_num: u64, message_num_per_canister: u64) -> ReplicatedState {
get_initial_state_with_balance(
canister_num,
message_num_per_canister,
INITIAL_CYCLES,
SubnetType::Application,
)
}
/// Returns a `ReplicatedState` with SubnetType::System, variable amount of canisters, input
/// messages per canister and methods that are to be called.
pub fn get_initial_system_subnet_state(
canister_num: u64,
message_num_per_canister: u64,
) -> ReplicatedState {
get_initial_state_with_balance(
canister_num,
message_num_per_canister,
INITIAL_CYCLES,
SubnetType::System,
)
}
pub fn get_initial_state_with_balance(
canister_num: u64,
message_num_per_canister: u64,
initial_cycles: Cycles,
own_subnet_type: SubnetType,
) -> ReplicatedState {
let mut state =
ReplicatedState::new_rooted_at(subnet_test_id(1), own_subnet_type, "Initial".into());
for canister_id in 0..canister_num {
let mut canister_state_builder = CanisterStateBuilder::new()
.with_canister_id(canister_test_id(canister_id))
.with_cycles(initial_cycles)
.with_wasm(vec![]);
for i in 0..message_num_per_canister {
canister_state_builder = canister_state_builder.with_ingress(
SignedIngressBuilder::new()
.canister_id(canister_test_id(canister_id))
.nonce(i)
.build()
.into(),
);
}
state.put_canister_state(canister_state_builder.build());
}
state.metadata.network_topology.routing_table = Arc::new({
let mut rt = ic_registry_routing_table::RoutingTable::new();
rt.insert(
ic_registry_routing_table::CanisterIdRange {
start: CanisterId::from(0),
end: CanisterId::from(u64::MAX),
},
subnet_test_id(1),
)
.unwrap();
rt
});
state
}
/// Returns the ordered IDs of the canisters contained within `state`.
pub fn canister_ids(state: &ReplicatedState) -> Vec<CanisterId> {
state
.canisters_iter()
.map(|canister_state| canister_state.canister_id())
.collect()
}
pub fn new_canister_state(
canister_id: CanisterId,
controller: PrincipalId,
initial_cycles: Cycles,
freeze_threshold: NumSeconds,
) -> CanisterState {
let scheduler_state = SchedulerState::default();
let system_state =
SystemState::new_running(canister_id, controller, initial_cycles, freeze_threshold);
CanisterState::new(system_state, None, scheduler_state)
}
/// Helper function to register a callback.
pub fn register_callback(
canister_state: &mut CanisterState,
originator: CanisterId,
respondent: CanisterId,
callback_id: CallbackId,
) {
let call_context_manager = canister_state
.system_state
.call_context_manager_mut()
.unwrap();
let call_context_id = call_context_manager.new_call_context(
CallOrigin::CanisterUpdate(originator, callback_id),
Cycles::zero(),
Time::from_nanos_since_unix_epoch(0),
);
call_context_manager.register_callback(Callback::new(
call_context_id,
Some(originator),
Some(respondent),
Cycles::zero(),
WasmClosure::new(0, 2),
WasmClosure::new(0, 2),
None,
));
}
/// Helper function to insert a canister in the provided `ReplicatedState`.
pub fn insert_dummy_canister(
state: &mut ReplicatedState,
canister_id: CanisterId,
controller: PrincipalId,
) {
let wasm = CanisterModule::new(vec![]);
let mut canister_state = new_canister_state(
canister_id,
controller,
INITIAL_CYCLES,
NumSeconds::from(100_000),
);
let mut execution_state = initial_execution_state();
execution_state.wasm_binary = WasmBinary::new(wasm);
canister_state.execution_state = Some(execution_state);
state.put_canister_state(canister_state);
}
prop_compose! {
/// Creates a `ReplicatedState` with a variable amount of canisters and input messages
/// per canister based on a uniform distribution of the input parameters.
/// Each canister has a variable `allocation` and
/// `last_full_execution_round` and a minimal Wasm module.
///
/// Example:
///
/// ```no_run
/// use ic_test_utilities::state::arb_replicated_state;
/// use proptest::prelude::*;
///
/// proptest! {
/// #[test]
/// fn dummy_test(state in arb_replicated_state(10, 10, 5)) {
/// println!("{:?}", state);
/// }
/// }
/// ```
pub fn arb_replicated_state(
canister_num_max: u64,
message_num_per_canister_max: u64,
last_round_max: u64,
)
(
canister_states in prop::collection::vec(
arb_canister_state(last_round_max), 1..canister_num_max as usize
),
message_num_per_canister in 1..message_num_per_canister_max,
) -> ReplicatedState {
let mut state = ReplicatedStateBuilder::new().build();
for (_, mut canister_state) in canister_states.into_iter().enumerate() {
let canister_id = canister_state.canister_id();
for i in 0..message_num_per_canister {
canister_state.push_ingress(
SignedIngressBuilder::new()
.canister_id(canister_id)
.nonce(i)
.build()
.into()
);
}
state.put_canister_state(canister_state);
}
state
}
}
prop_compose! {
fn arb_canister_state(
last_round_max: u64,
)
(
(allocation, round) in arb_compute_allocation_and_last_round(last_round_max)
) -> CanisterState {
let mut execution_state = initial_execution_state();
execution_state.wasm_binary = WasmBinary::new(CanisterModule::new(wabt::wat2wasm(r#"(module)"#).unwrap()));
let scheduler_state = SchedulerState::default();
let system_state = SystemState::new_running(
canister_test_id(0),
user_test_id(24).get(),
INITIAL_CYCLES,
DEFAULT_FREEZE_THRESHOLD,
);
let mut canister_state = CanisterState::new(
system_state,
Some(execution_state),
scheduler_state
);
canister_state.scheduler_state.compute_allocation = allocation;
canister_state.scheduler_state.last_full_execution_round = round;
canister_state
}
}
prop_compose! {
fn arb_compute_allocation_and_last_round(
last_round_max: u64
)
(
a in -100..120,
round in 0..last_round_max,
) -> (ComputeAllocation, ExecutionRound) {
// Clamp `a` to [0, 100], but with high probability for 0 and somewhat
// higher probability for 100.
let a = if a < 0 {
0
} else if a > 100 {
100
} else {
a
};
(
ComputeAllocation::try_from(a as u64).unwrap(),
ExecutionRound::from(round),
)
}
}
prop_compose! {
/// Produces a strategy that generates an arbitrary `signals_end` and between
/// `[sig_min_size, sig_max_size]` reject signals .
pub fn arb_reject_signals(sig_min_size: usize, sig_max_size: usize)(
sig_start in 0..10000u64,
sigs in prop::collection::btree_set(arbitrary::stream_index(100 + sig_max_size as u64), sig_min_size..=sig_max_size),
sig_end_delta in 0..10u64,
) -> (StreamIndex, VecDeque<StreamIndex>) {
let mut reject_signals = VecDeque::with_capacity(sigs.len());
let sig_start = sig_start.into();
for s in sigs {
reject_signals.push_back(s + sig_start);
}
let sig_end = sig_start + reject_signals.back().unwrap_or(&0.into()).increment() + sig_end_delta.into();
(sig_end, reject_signals)
}
}
prop_compose! {
/// Produces a strategy that generates a stream with between
/// `[min_size, max_size]` messages and between `[sig_min_size, sig_max_size]`
/// reject signals.
pub fn arb_stream(min_size: usize, max_size: usize, sig_min_size: usize, sig_max_size: usize)(
msg_start in 0..10000u64,
reqs in prop::collection::vec(arbitrary::request(), min_size..=max_size),
(signals_end, reject_signals) in arb_reject_signals(sig_min_size, sig_max_size),
) -> Stream {
let mut messages = StreamIndexedQueue::with_begin(StreamIndex::from(msg_start));
for r in reqs {
messages.push(r.into())
}
Stream::with_signals(messages, signals_end, reject_signals)
}
}
prop_compose! {
/// Produces a strategy consisting of an arbitrary stream and valid slice begin and message
/// count values for extracting a slice from the stream.
pub fn arb_stream_slice(min_size: usize, max_size: usize, sig_min_size: usize, sig_max_size: usize)(
stream in arb_stream(min_size, max_size, sig_min_size, sig_max_size),
from_percent in -20..120i64,
percent_above_min_size in 0..120i64,
) -> (Stream, StreamIndex, usize) {
let from_percent = from_percent.max(0).min(100) as usize;
let percent_above_min_size = percent_above_min_size.max(0).min(100) as usize;
let msg_count = min_size +
(stream.messages().len() - min_size) * percent_above_min_size / 100;
let from = stream.messages_begin() +
(((stream.messages().len() - msg_count) * from_percent / 100) as u64).into();
(stream, from, msg_count)
}
}
prop_compose! {
pub fn arb_stream_header(sig_min_size: usize, sig_max_size: usize)(
msg_start in 0..10000u64,
msg_len in 0..10000u64,
(signals_end, reject_signals) in arb_reject_signals(sig_min_size, sig_max_size),
) -> StreamHeader {
let begin = StreamIndex::from(msg_start);
let end = StreamIndex::from(msg_start + msg_len);
StreamHeader {
begin,
end,
signals_end,
reject_signals,
}
}
}
prop_compose! {
/// Strategy that generates an arbitrary number (of receivers) between 1 and the
/// provided value, if `Some`; or else `usize::MAX` (standing for unlimited
/// receivers).
pub fn arb_num_receivers(max_receivers: Option<usize>) (
random in 0..usize::MAX,
) -> usize {
match max_receivers {
Some(max_receivers) if max_receivers <= 1 => 1,
Some(max_receivers) => 1 + random % (max_receivers - 1),
None => usize::MAX,
}
}
}
/// Produces a `ReplicatedState` with the given subnet ID and the given output
/// requests. First group of requests are enqueud into the subnet queues; a
/// canister is created for each following group. Each group's requests are
/// routed round-robin to one of `num_receivers`.
///
/// Returns the generated `ReplicatedState`; the requests grouped by canister,
/// in expected iteration order; and the total number of requests.
fn new_replicated_state_for_test(
own_subnet_id: SubnetId,
mut output_requests: Vec<Vec<Request>>,
num_receivers: usize,
) -> (
ReplicatedState,
VecDeque<VecDeque<RequestOrResponse>>,
usize,
) {
let mut total_requests = 0;
let mut requests = VecDeque::new();
let subnet_queues = if let Some(reqs) = output_requests.pop() {
let (queues, raw_requests) =
new_canister_queues_for_test(reqs, CanisterId::from(own_subnet_id), num_receivers);
total_requests += raw_requests.len();
requests.push_back(raw_requests);
Some(queues)
} else {
None
};
let canister_states: BTreeMap<_, _> = output_requests
.into_iter()
.enumerate()
.map(|(i, reqs)| {
let canister_id = CanisterId::from_u64(i as u64);
let mut canister = CanisterStateBuilder::new()
.with_canister_id(canister_id)
.build();
let (queues, raw_requests) =
new_canister_queues_for_test(reqs, canister_test_id(i as u64), num_receivers);
canister.system_state.put_queues(queues);
total_requests += raw_requests.len();
requests.push_back(raw_requests);
(canister_id, canister)
})
.collect();
let mut replicated_state = ReplicatedStateBuilder::new().build();
let mut routing_table = RoutingTable::new();
routing_table
.insert(
CanisterIdRange {
start: CanisterId::from(0),
end: CanisterId::from(u64::MAX),
},
own_subnet_id,
)
.unwrap();
replicated_state.metadata.network_topology.routing_table = Arc::new(routing_table);
replicated_state.put_canister_states(canister_states);
if let Some(subnet_queues) = subnet_queues {
replicated_state.put_subnet_queues(subnet_queues);
}
(replicated_state, requests, total_requests)
}
prop_compose! {
pub fn arb_replicated_state_with_queues(
own_subnet_id: SubnetId,
max_canisters: usize,
max_requests_per_canister: usize,
max_receivers: Option<usize>,
) (
time in 1..1000_u64,
request_queues in prop::collection::vec(prop::collection::vec(arbitrary::request(), 0..=max_requests_per_canister), 0..=max_canisters),
num_receivers in arb_num_receivers(max_receivers)
) -> (ReplicatedState, VecDeque<VecDeque<RequestOrResponse>>, usize) {
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
let (mut replicated_state, mut raw_requests, total_requests) = new_replicated_state_for_test(own_subnet_id, request_queues, num_receivers);
// We pseudorandomly rotate the queues to match the rotation applied by the iterator.
// Note that subnet queues are always at the front which is why we need to pop them
// before the rotation and push them to the front afterwards.
let subnet_queue_requests = raw_requests.pop_front();
let mut raw_requests : VecDeque<_> = raw_requests.into_iter().filter(|requests| !requests.is_empty()).collect();
replicated_state.metadata.batch_time = Time::from_nanos_since_unix_epoch(time);
let mut rng = ChaChaRng::seed_from_u64(time);
let rotation = rng.gen_range(0, raw_requests.len().max(1));
raw_requests.rotate_left(rotation);
if let Some(requests) = subnet_queue_requests {
raw_requests.push_front(requests);
}
(replicated_state, raw_requests, total_requests)
}
}
/// Asserts that the values returned by `next()` match the ones returned by
/// `peek()` before.
pub fn assert_next_eq(
peek: (QueueId, QueueIndex, Arc<RequestOrResponse>),
next: Option<(QueueId, QueueIndex, RequestOrResponse)>,
) {
let next =
next.unwrap_or_else(|| panic!("Peek returned a message {:?}, while pop didn't", peek));
assert_eq!((peek.0, peek.1, peek.2.as_ref()), (next.0, next.1, &next.2));
}
| 32.443676 | 147 | 0.632321 |
f5228c247b0305039507d4dca8b5b1e2abd03e65 | 1,960 | mod machine_run;
#[test]
#[cfg_attr(miri, ignore)] // takes at least 9 hours
pub fn test_b_extension() {
machine_run::int_v1_imcb("tests/programs/b_extension");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/b_extension");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/b_extension");
}
#[test]
pub fn test_clzw_bug() {
machine_run::int_v1_imcb("tests/programs/clzw_bug");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/clzw_bug");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/clzw_bug");
}
#[test]
pub fn test_packw_signextend() {
machine_run::int_v1_imcb("tests/programs/packw_signextend");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/packw_signextend");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/packw_signextend");
}
#[test]
pub fn test_single_bit_signextend() {
machine_run::int_v1_imcb("tests/programs/single_bit_signextend");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/single_bit_signextend");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/single_bit_signextend");
}
#[test]
pub fn test_sbinvi_aot_load_imm_bug() {
machine_run::int_v1_imcb("tests/programs/sbinvi_aot_load_imm_bug");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/sbinvi_aot_load_imm_bug");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/sbinvi_aot_load_imm_bug");
}
#[test]
pub fn test_rorw_in_end_of_aot_block() {
// The 1024th instruction will use one more temporary register than normal.
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/rorw_in_end_of_aot_block");
}
#[test]
pub fn test_fsri_decode_bug() {
machine_run::int_v1_imcb("tests/programs/fsri_decode_bug");
#[cfg(has_asm)]
machine_run::asm_v1_imcb("tests/programs/fsri_decode_bug");
#[cfg(has_asm)]
machine_run::aot_v1_imcb("tests/programs/fsri_decode_bug");
}
| 30.625 | 79 | 0.728571 |
fb8cd6563c7f5d12f8db33826665b859079455ec | 6,690 | use std::ops::{Add, Mul, MulAssign, Sub};
// f64 complex
#[derive(Clone, Copy, Debug)]
pub struct Complex64 {
pub re: f64,
pub im: f64,
}
impl Complex64 {
#[inline]
pub fn new(re: f64, im: f64) -> Self {
Self { re, im }
}
#[inline]
pub fn square_norm(&self) -> f64 {
self.re * self.re + self.im * self.im
}
#[inline]
pub fn norm(&self) -> f64 {
self.square_norm().sqrt()
}
#[inline]
pub fn inverse(&self) -> Complex64 {
let nrm = self.square_norm();
Complex64 {
re: self.re / nrm,
im: -self.im / nrm,
}
}
}
impl Default for Complex64 {
#[inline]
fn default() -> Self {
Self { re: 0.0, im: 0.0 }
}
}
impl Add<Complex64> for Complex64 {
type Output = Complex64;
#[inline]
fn add(self, other: Complex64) -> Complex64 {
Complex64 {
re: self.re + other.re,
im: self.im + other.im,
}
}
}
impl Sub<Complex64> for Complex64 {
type Output = Complex64;
#[inline]
fn sub(self, other: Complex64) -> Complex64 {
Complex64 {
re: self.re - other.re,
im: self.im - other.im,
}
}
}
impl Mul<Complex64> for Complex64 {
type Output = Complex64;
#[inline]
fn mul(self, other: Complex64) -> Complex64 {
Complex64 {
re: self.re * other.re - self.im * other.im,
im: self.re * other.im + self.im * other.re,
}
}
}
impl MulAssign<Complex64> for Complex64 {
#[inline]
fn mul_assign(&mut self, other: Complex64) {
let tmp = self.re * other.im + self.im * other.re;
self.re = self.re * other.re - self.im * other.im;
self.im = tmp;
}
}
pub fn fast_fourieir_transform_input_permutation(length: usize) -> Vec<usize> {
let mut result = Vec::new();
result.reserve_exact(length);
for i in 0..length {
result.push(i);
}
let mut reverse = 0_usize;
let mut position = 1_usize;
while position < length {
let mut bit = length >> 1;
while bit & reverse != 0 {
reverse ^= bit;
bit >>= 1;
}
reverse ^= bit;
// This is equivalent to adding 1 to a reversed number
if position < reverse {
// Only swap each element once
result.swap(position, reverse);
}
position += 1;
}
result
}
pub fn fast_fourier_transform(input: &[f64], input_permutation: &[usize]) -> Vec<Complex64> {
let n = input.len();
let mut result = Vec::new();
result.reserve_exact(n);
for position in input_permutation {
result.push(Complex64::new(input[*position], 0.0));
}
let mut segment_length = 1_usize;
while segment_length < n {
segment_length <<= 1;
let angle: f64 = std::f64::consts::TAU / segment_length as f64;
let w_len = Complex64::new(angle.cos(), angle.sin());
for segment_start in (0..n).step_by(segment_length) {
let mut w = Complex64::new(1.0, 0.0);
for position in segment_start..(segment_start + segment_length / 2) {
let a = result[position];
let b = result[position + segment_length / 2] * w;
result[position] = a + b;
result[position + segment_length / 2] = a - b;
w *= w_len;
}
}
}
result
}
pub fn inverse_fast_fourier_transform(
input: &[Complex64],
input_permutation: &[usize],
) -> Vec<f64> {
let n = input.len();
let mut result = Vec::new();
result.reserve_exact(n);
for position in input_permutation {
result.push(input[*position]);
}
let mut segment_length = 1_usize;
while segment_length < n {
segment_length <<= 1;
let angle: f64 = -std::f64::consts::TAU / segment_length as f64;
let w_len = Complex64::new(angle.cos(), angle.sin());
for segment_start in (0..n).step_by(segment_length) {
let mut w = Complex64::new(1.0, 0.0);
for position in segment_start..(segment_start + segment_length / 2) {
let a = result[position];
let b = result[position + segment_length / 2] * w;
result[position] = a + b;
result[position + segment_length / 2] = a - b;
w *= w_len;
}
}
}
let scale = 1.0 / n as f64;
result.iter().map(|x| x.re * scale).collect()
}
#[cfg(test)]
mod tests {
use super::*;
fn almost_equal(a: f64, b: f64, epsilon: f64) -> bool {
(a - b).abs() < epsilon
}
const EPSILON: f64 = 1e-6;
#[test]
fn small_polynomial_returns_self() {
let polynomial = vec![1.0f64, 1.0, 0.0, 2.5];
let permutation = fast_fourieir_transform_input_permutation(polynomial.len());
let fft = fast_fourier_transform(&polynomial, &permutation);
let ifft = inverse_fast_fourier_transform(&fft, &permutation);
for (x, y) in ifft.iter().zip(polynomial.iter()) {
assert!(almost_equal(*x, *y, EPSILON));
}
}
#[test]
fn square_small_polynomial() {
let mut polynomial = vec![1.0f64, 1.0, 0.0, 2.0];
polynomial.append(&mut vec![0.0; 4]);
let permutation = fast_fourieir_transform_input_permutation(polynomial.len());
let mut fft = fast_fourier_transform(&polynomial, &permutation);
fft.iter_mut().for_each(|num| *num *= *num);
let ifft = inverse_fast_fourier_transform(&fft, &permutation);
let expected = vec![1.0, 2.0, 1.0, 4.0, 4.0, 0.0, 4.0, 0.0, 0.0];
for (x, y) in ifft.iter().zip(expected.iter()) {
assert!(almost_equal(*x, *y, EPSILON));
}
}
#[test]
#[ignore]
fn square_big_polynomial() {
// This test case takes ~1050ms on my machine in unoptimized mode,
// but it takes ~70ms in release mode.
let n = 1 << 17; // ~100_000
let mut polynomial = vec![1.0f64; n];
polynomial.append(&mut vec![0.0f64; n]);
let permutation = fast_fourieir_transform_input_permutation(polynomial.len());
let mut fft = fast_fourier_transform(&polynomial, &permutation);
fft.iter_mut().for_each(|num| *num *= *num);
let ifft = inverse_fast_fourier_transform(&fft, &permutation);
let mut expected = vec![0.0; n << 1];
for i in 0..((n << 1) - 1) {
expected[i] = std::cmp::min(i + 1, (n << 1) - 1 - i) as f64;
}
for (x, y) in ifft.iter().zip(expected.iter()) {
assert!(almost_equal(*x, *y, EPSILON));
}
}
}
| 29.866071 | 93 | 0.549776 |
d59a407f040fada4baf2c3a85a6592311625923f | 12,616 | #![recursion_limit = "2048"]
extern crate proc_macro;
#[macro_use]
extern crate quote;
use std::convert::TryFrom;
use syn::{Ident, Item, ItemEnum, spanned::Spanned, parse_macro_input};
use proc_macro2::{TokenStream, Span};
use proc_macro::TokenTree;
struct Flag {
name: Ident,
span: Span,
value: FlagValue,
}
enum FlagValue {
Literal(u128),
Deferred,
Inferred,
}
#[proc_macro_attribute]
pub fn bitflags_internal(
attr: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let defaults = if attr.is_empty() { vec![] } else {parse_defaults(attr)};
let ast = parse_macro_input!(input as Item);
let output = match ast {
Item::Enum(ref item_enum) => gen_enumflags(item_enum, defaults),
_ => Err(syn::Error::new_spanned(&ast,
"#[bitflags] requires an enum")),
};
output.unwrap_or_else(|err| {
let error = err.to_compile_error();
quote! {
#ast
#error
}
}).into()
}
fn parse_defaults(attr: proc_macro::TokenStream) -> Vec<proc_macro::Ident> {
let mut attr = attr.into_iter();
// this unwrap is fine, because it must contains at least one element, because it is not empty
let default = attr.next().unwrap();
match default {
TokenTree::Ident(default) => {
if default.to_string() != "default" {
panic!("only default parameter allowed right now");
}
let eq = attr.next();
if eq.is_none() {
panic!("default must be followed by '='");
}
let eq = eq.unwrap();
if eq.to_string() != "=" {
panic!("default must be followed by '='");
}
let mut defaults = vec![];
loop {
let default = match attr.next() {
None => break,
Some(default) => default,
};
match default {
TokenTree::Ident(default) => {
defaults.push(default);
}
default =>
panic!("default must be followed by '=' \
and at least one variant separated by '|'
'{}' is not valid identifier of an variant", default),
}
match attr.next() {
None => break,
Some(separator) => {
if separator.to_string() != "|" {
panic!("default must be followed by '=' \
and at least one variant separated by '|'
'{}' is not a valid separator", separator);
}
}
}
}
if let Some(not_a_separator) = attr.next() {
panic!("default must be followed by '=' \
and at least one variant separated by '|'
'{}' is not a valid separator", not_a_separator);
}
if defaults.is_empty() {
panic!("default must be followed by '=' \
and at least one variant separated by '|'");
}
defaults
},
_ => {
panic!("only default parameter allowed right now");
}
}
}
/// Try to evaluate the expression given.
fn fold_expr(expr: &syn::Expr) -> Option<u128> {
use syn::Expr;
match expr {
Expr::Lit(ref expr_lit) => {
match expr_lit.lit {
syn::Lit::Int(ref lit_int) => lit_int.base10_parse().ok(),
_ => None,
}
},
Expr::Binary(ref expr_binary) => {
let l = fold_expr(&expr_binary.left)?;
let r = fold_expr(&expr_binary.right)?;
match &expr_binary.op {
syn::BinOp::Shl(_) => {
u32::try_from(r).ok()
.and_then(|r| l.checked_shl(r))
}
_ => None,
}
}
_ => None,
}
}
fn collect_flags<'a>(variants: impl Iterator<Item=&'a syn::Variant>)
-> Result<Vec<Flag>, syn::Error>
{
variants
.map(|variant| {
// MSRV: Would this be cleaner with `matches!`?
match variant.fields {
syn::Fields::Unit => (),
_ => return Err(syn::Error::new_spanned(&variant.fields,
"Bitflag variants cannot contain additional data")),
}
let value = if let Some(ref expr) = variant.discriminant {
if let Some(n) = fold_expr(&expr.1) {
FlagValue::Literal(n)
} else {
FlagValue::Deferred
}
} else {
FlagValue::Inferred
};
Ok(Flag {
name: variant.ident.clone(),
span: variant.span(),
value,
})
})
.collect()
}
/// Given a list of attributes, find the `repr`, if any, and return the integer
/// type specified.
fn extract_repr(attrs: &[syn::Attribute])
-> Result<Option<Ident>, syn::Error>
{
use syn::{Meta, NestedMeta};
attrs.iter()
.find_map(|attr| {
match attr.parse_meta() {
Err(why) => {
Some(Err(syn::Error::new_spanned(attr,
format!("Couldn't parse attribute: {}", why))))
}
Ok(Meta::List(ref meta)) if meta.path.is_ident("repr") => {
meta.nested.iter()
.find_map(|mi| match mi {
NestedMeta::Meta(Meta::Path(path)) => {
path.get_ident().cloned()
.map(Ok)
}
_ => None
})
}
Ok(_) => None
}
})
.transpose()
}
/// Check the repr and return the number of bits available
fn type_bits(ty: &Ident) -> Result<u8, syn::Error> {
// This would be so much easier if we could just match on an Ident...
if ty == "usize" {
Err(syn::Error::new_spanned(ty,
"#[repr(usize)] is not supported. Use u32 or u64 instead."))
}
else if ty == "i8" || ty == "i16" || ty == "i32"
|| ty == "i64" || ty == "i128" || ty == "isize" {
Err(syn::Error::new_spanned(ty,
"Signed types in a repr are not supported."))
}
else if ty == "u8" { Ok(8) }
else if ty == "u16" { Ok(16) }
else if ty == "u32" { Ok(32) }
else if ty == "u64" { Ok(64) }
else if ty == "u128" { Ok(128) }
else {
Err(syn::Error::new_spanned(ty,
"repr must be an integer type for #[bitflags]."))
}
}
/// Returns deferred checks
fn check_flag(
type_name: &Ident,
flag: &Flag,
) -> Result<Option<TokenStream>, syn::Error> {
use FlagValue::*;
match flag.value {
Literal(n) => {
if !n.is_power_of_two() {
Err(syn::Error::new(flag.span,
"Flags must have exactly one set bit"))
} else {
Ok(None)
}
}
Inferred => {
Err(syn::Error::new(flag.span,
"Please add an explicit discriminant"))
}
Deferred => {
let variant_name = &flag.name;
// MSRV: Use an unnamed constant (`const _: ...`).
let assertion_name = syn::Ident::new(
&format!("__enumflags_assertion_{}_{}",
type_name, flag.name),
Span::call_site()); // call_site because def_site is unstable
Ok(Some(quote_spanned!(flag.span =>
#[doc(hidden)]
const #assertion_name:
<<[(); (
(#type_name::#variant_name as u128).wrapping_sub(1) &
(#type_name::#variant_name as u128) == 0 &&
(#type_name::#variant_name as u128) != 0
) as usize] as enumflags2::_internal::AssertionHelper>
::Status as enumflags2::_internal::ExactlyOneBitSet>::X
= ();
)))
}
}
}
fn gen_enumflags(ast: &ItemEnum, defaults: Vec<proc_macro::Ident>)
-> Result<TokenStream, syn::Error>
{
let ident = &ast.ident;
let span = Span::call_site();
// for quote! interpolation
let variant_names =
ast.variants.iter()
.map(|v| &v.ident)
.collect::<Vec<_>>();
let repeated_name = vec![&ident; ast.variants.len()];
let variants = collect_flags(ast.variants.iter())?;
let deferred = variants.iter()
.flat_map(|variant| check_flag(ident, variant).transpose())
.collect::<Result<Vec<_>, _>>()?;
let ty = extract_repr(&ast.attrs)?
.ok_or_else(|| syn::Error::new_spanned(&ident,
"repr attribute missing. Add #[repr(u64)] or a similar attribute to specify the size of the bitfield."))?;
let bits = type_bits(&ty)?;
if (bits as usize) < variants.len() {
return Err(syn::Error::new_spanned(&ty,
format!("Not enough bits for {} flags", variants.len())));
}
let std_path = quote_spanned!(span => ::enumflags2::_internal::core);
let mut default = 0u128;
for d in defaults {
match ast.variants
.iter()
.find(|v| v.ident == d.to_string()) {
None => panic!("{:?} is not valid varian of {:?}", d, ast.ident),
Some(v) => {
if let Some(ref expr) = v.discriminant {
if let Some(n) = fold_expr(&expr.1) {
default |= n
} else {
unimplemented!("Deferred flag value not yet supported as default");
}
} else {
unimplemented!("Inferred flag value not yet supported as default");
}
}
}
}
Ok(quote_spanned! {
span =>
#ast
#(#deferred)*
impl #std_path::ops::Not for #ident {
type Output = ::enumflags2::BitFlags<#ident>;
#[inline(always)]
fn not(self) -> Self::Output {
use ::enumflags2::{BitFlags, _internal::RawBitFlags};
unsafe { BitFlags::from_bits_unchecked(self.bits()).not() }
}
}
impl #std_path::ops::BitOr for #ident {
type Output = ::enumflags2::BitFlags<#ident>;
#[inline(always)]
fn bitor(self, other: Self) -> Self::Output {
use ::enumflags2::{BitFlags, _internal::RawBitFlags};
unsafe { BitFlags::from_bits_unchecked(self.bits() | other.bits())}
}
}
impl #std_path::ops::BitAnd for #ident {
type Output = ::enumflags2::BitFlags<#ident>;
#[inline(always)]
fn bitand(self, other: Self) -> Self::Output {
use ::enumflags2::{BitFlags, _internal::RawBitFlags};
unsafe { BitFlags::from_bits_unchecked(self.bits() & other.bits())}
}
}
impl #std_path::ops::BitXor for #ident {
type Output = ::enumflags2::BitFlags<#ident>;
#[inline(always)]
fn bitxor(self, other: Self) -> Self::Output {
#std_path::convert::Into::<Self::Output>::into(self) ^ #std_path::convert::Into::<Self::Output>::into(other)
}
}
impl ::enumflags2::_internal::RawBitFlags for #ident {
type Numeric = #ty;
const EMPTY: Self::Numeric = 0;
const DEFAULT: Self::Numeric = #default as #ty;
const ALL_BITS: Self::Numeric =
0 #(| (#repeated_name::#variant_names as #ty))*;
const FLAG_LIST: &'static [Self] =
&[#(#repeated_name::#variant_names),*];
const BITFLAGS_TYPE_NAME : &'static str =
concat!("BitFlags<", stringify!(#ident), ">");
fn bits(self) -> Self::Numeric {
self as #ty
}
}
impl ::enumflags2::BitFlag for #ident {}
})
}
| 34.376022 | 130 | 0.471465 |
9c9b4ec36c40d4e394e8141efde02b8789a915b6 | 1,749 |
pub struct IconCameraIndoor {
props: crate::Props,
}
impl yew::Component for IconCameraIndoor {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><path d="M0,0h24v24H0V0z" fill="none"/></g><g><g><path d="M6,10v9h12v-9l-6-4.5L6,10z M14,12v1l2-1.06v4.12L14,15v1c0,0.55-0.45,1-1,1H9c-0.55,0-1-0.45-1-1v-4 c0-0.55,0.45-1,1-1h4C13.55,11,14,11.45,14,12z" opacity=".3"/><path d="M8,12v4c0,0.55,0.45,1,1,1h4c0.55,0,1-0.45,1-1v-1l2,1.06v-4.12L14,13v-1c0-0.55-0.45-1-1-1H9C8.45,11,8,11.45,8,12z"/><path d="M12,3L4,9v12h16V9L12,3z M18,19H6v-9l6-4.5l6,4.5V19z"/></g></g></svg>
</svg>
}
}
}
| 38.021739 | 550 | 0.588908 |
56c57c63b740bb3c2ed4992128943628d34bdff9 | 4,856 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
//
// @generated SignedSource<<78f951afa824b4eaadcb4f0bf04d2ef8>>
//
// To regenerate this file, run:
// hphp/hack/src/oxidized_regen.sh
use arena_trait::TrivialDrop;
use no_pos_hash::NoPosHash;
use ocamlrep_derive::FromOcamlRepIn;
use ocamlrep_derive::ToOcamlRep;
use serde::Serialize;
#[allow(unused_imports)]
use crate::*;
pub use typing_defs::*;
pub use typing_defs::ConstDecl;
#[derive(
Clone,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct ShallowClassConst<'a> {
pub abstract_: bool,
pub name: typing_defs::PosId<'a>,
/// This field is used for two different meanings in two different places...
/// enum class A:arraykey {int X="a";} -- here X.scc_type=\HH\MemberOf<A,int>
/// enum B:int as arraykey {X="a"; Y=1; Z=B::X;} -- here X.scc_type=string, Y.scc_type=int, Z.scc_type=TAny
/// In the later case, the scc_type is just a simple syntactic attempt to retrieve the type from the initializer.
pub type_: &'a Ty<'a>,
/// This is a list of all scope-resolution operators "A::B" that are mentioned in the const initializer,
/// for members of regular-enums and enum-class-enums to detect circularity of initializers.
/// We don't yet have a similar mechanism for top-level const initializers.
pub refs: &'a [typing_defs::ClassConstRef<'a>],
}
impl<'a> TrivialDrop for ShallowClassConst<'a> {}
#[derive(
Clone,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct ShallowTypeconst<'a> {
pub abstract_: TypeconstAbstractKind<'a>,
pub as_constraint: Option<&'a Ty<'a>>,
pub super_constraint: Option<&'a Ty<'a>>,
pub name: typing_defs::PosId<'a>,
pub type_: Option<&'a Ty<'a>>,
pub enforceable: (&'a pos_or_decl::PosOrDecl<'a>, bool),
pub reifiable: Option<&'a pos_or_decl::PosOrDecl<'a>>,
}
impl<'a> TrivialDrop for ShallowTypeconst<'a> {}
#[derive(
Clone,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct ShallowProp<'a> {
pub name: typing_defs::PosId<'a>,
pub xhp_attr: Option<XhpAttr>,
pub type_: Option<&'a Ty<'a>>,
pub visibility: oxidized::ast_defs::Visibility,
pub flags: prop_flags::PropFlags,
}
impl<'a> TrivialDrop for ShallowProp<'a> {}
#[derive(
Clone,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct ShallowMethod<'a> {
pub name: typing_defs::PosId<'a>,
pub type_: &'a Ty<'a>,
pub visibility: oxidized::ast_defs::Visibility,
pub deprecated: Option<&'a str>,
pub flags: method_flags::MethodFlags,
}
impl<'a> TrivialDrop for ShallowMethod<'a> {}
#[derive(
Clone,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub struct ShallowClass<'a> {
pub mode: oxidized::file_info::Mode,
pub final_: bool,
pub is_xhp: bool,
pub has_xhp_keyword: bool,
pub kind: oxidized::ast_defs::ClassKind,
pub name: typing_defs::PosId<'a>,
pub tparams: &'a [&'a Tparam<'a>],
pub where_constraints: &'a [&'a WhereConstraint<'a>],
pub extends: &'a [&'a Ty<'a>],
pub uses: &'a [&'a Ty<'a>],
pub xhp_attr_uses: &'a [&'a Ty<'a>],
pub req_extends: &'a [&'a Ty<'a>],
pub req_implements: &'a [&'a Ty<'a>],
pub implements: &'a [&'a Ty<'a>],
pub implements_dynamic: bool,
pub consts: &'a [&'a ShallowClassConst<'a>],
pub typeconsts: &'a [&'a ShallowTypeconst<'a>],
pub props: &'a [&'a ShallowProp<'a>],
pub sprops: &'a [&'a ShallowProp<'a>],
pub constructor: Option<&'a ShallowMethod<'a>>,
pub static_methods: &'a [&'a ShallowMethod<'a>],
pub methods: &'a [&'a ShallowMethod<'a>],
pub user_attributes: &'a [&'a UserAttribute<'a>],
pub enum_type: Option<&'a EnumType<'a>>,
}
impl<'a> TrivialDrop for ShallowClass<'a> {}
pub type FunDecl<'a> = FunElt<'a>;
pub type ClassDecl<'a> = ShallowClass<'a>;
pub type RecordDecl<'a> = RecordDefType<'a>;
pub type TypedefDecl<'a> = TypedefType<'a>;
#[derive(
Clone,
Copy,
Debug,
Eq,
FromOcamlRepIn,
Hash,
NoPosHash,
Ord,
PartialEq,
PartialOrd,
Serialize,
ToOcamlRep
)]
pub enum Decl<'a> {
Class(&'a ClassDecl<'a>),
Fun(&'a FunDecl<'a>),
Record(&'a RecordDecl<'a>),
Typedef(&'a TypedefDecl<'a>),
Const(&'a ConstDecl<'a>),
}
impl<'a> TrivialDrop for Decl<'a> {}
| 25.424084 | 117 | 0.636326 |
0e3da6ca3380b402b36942bd3378c94bccd2f36f | 1,288 | use piece::*;
use seqq::*;
mod seqq {
use seq::seq;
seq!(N in 2..65 {
#(
pub fn sum~N<T>(tuple: seq!(P in 0..N { (#(T,)*) })) -> T
where
T: std::ops::Add<Output = T>,
{
let mut sum = tuple.0;
seq!(Q in 1..N {
sum = sum + tuple.Q;
});
sum
}
)*
});
seq!(N in 0..8 {
pub enum Cpu {
#(
Variant~N,
)*
}
});
}
mod piece {
use bitfield::{bitfield, specifier};
#[bitfield(filler = 3)]
#[derive(Debug)]
pub struct Piece {
#[bits = 3]
pub ty: Type,
#[bits = 1]
pub color: Color,
pub moved: bool,
}
#[specifier(bits = 3)]
pub enum Type {
Pawn,
Knight,
Bishop,
Rook,
Queen,
King,
Empty,
}
#[specifier(bits = 1)]
pub enum Color {
White,
Black,
}
}
fn main() {
let sum = sum64(seq::seq!(N in 0..64 { (#(N,)*) }));
println!("{sum}");
let mut piece = Piece::new();
piece.set_ty(Type::Queen);
piece.set_color(Color::Black);
piece.set_moved(true);
println!("{piece:?}");
}
| 17.888889 | 69 | 0.387422 |
edb2212c5290afe386f6913f3d68df691e56d505 | 1,264 | //! Device electronic signature
//!
//! Ref: Section 1.5, GD32VF103 User Manual
//!
//! TODO: verify all functions in this module
// should this module be named `signature`? this name may be too long
const UNIQUE_ID: *const [u8; 96] = 0x1FFF_F7E8 as *const _;
const MEMORY_DENSITY: *const u16 = 0x1FFF_F7E0 as *const _;
/// Factory programed unique device id.
///
/// According to section 1.5.2 of the Manual, this value
/// can never be altered by user.
#[inline]
pub fn unique_id() -> &'static [u8; 96] {
// note(unsafe): static read-only value
unsafe { &*UNIQUE_ID }
}
/// Flash memory density in KBytes.
///
/// This value indicates the flash memory density of the device in KBytes.
/// For example, `0x0020` means 32 KBytes.
///
/// Ref: Section 1.5.1, the Manual
#[inline]
pub fn flash_density() -> u16 {
// note(unsafe): static read-only value
unsafe { *MEMORY_DENSITY } // read bits [15:0]
}
/// On-chip SRAM density in KBytes.
///
/// This value indicates the on-chip SRAM density of the device in KBytes.
/// For example, `0x0008` means 8 KBytes.
///
/// Ref: Section 1.5.1, the Manual
#[inline]
pub fn sram_density() -> u16 {
// note(unsafe): static read-only value
unsafe { *(MEMORY_DENSITY.add(1)) } // read bits [31:16]
}
| 28.727273 | 74 | 0.66693 |
9cdd8d6845e9ae9c2d47117d8105d1c048ce2b62 | 3,178 | use std::ptr::null_mut;
// スタックのノード。リスト構造で管理 <1>
#[repr(C)]
struct Node<T> {
next: *mut Node<T>,
data: T,
}
// スタックの先頭 <2>
#[repr(C)]
pub struct StackHead<T> {
head: *mut Node<T>,
}
impl<T> StackHead<T> {
fn new() -> Self {
StackHead { head: null_mut() }
}
pub fn push(&mut self, v: T) { // <3>
// 追加するノードを作成
let node = Box::new(Node {
next: null_mut(),
data: v,
});
// Box型の値からポインタを取り出す
let ptr = Box::into_raw(node) as *mut u8 as usize;
// ポインタのポインタを取得
// headの格納されているメモリをLL/SC
let head = &mut self.head as *mut *mut Node<T> as *mut u8 as usize;
// LL/SCを用いたpush <4>
unsafe {
asm!("1:
ldxr {next}, [{head}] // next = *head
str {next}, [{ptr}] // *ptr = next
stlxr w10, {ptr}, [{head}] // *head = ptr
// if tmp != 0 then goto 1
cbnz w10, 1b",
next = out(reg) _,
ptr = in(reg) ptr,
head = in(reg) head,
out("w10") _)
};
}
pub fn pop(&mut self) -> Option<T> { // <5>
unsafe {
// ポインタのポインタを取得
// headの格納されているメモリをLL/SC
let head = &mut self.head as *mut *mut Node<T> as *mut u8 as usize;
// popしたノードへのアドレスを格納
let mut result: usize;
// LL/SCを用いたpop <6>
asm!("1:
ldaxr {result}, [{head}] // result = *head
// if result != NULL then goto 2
cbnz {result}, 2f
// if NULL
clrex // clear exclusive
b 3f // goto 3
// if not NULL
2:
ldr {next}, [{result}] // next = *result
stxr w10, {next}, [{head}] // *head = next
// if tmp != 0 then goto 1
cbnz w10, 1b
3:",
next = out(reg) _,
result = out(reg) result,
head = in(reg) head,
out("w10") _);
if result == 0 {
None
} else {
// ポインタをBoxに戻して、中の値をリターン
let ptr = result as *mut u8 as *mut Node<T>;
let head = Box::from_raw(ptr);
Some((*head).data)
}
}
}
}
impl<T> Drop for StackHead<T> {
fn drop(&mut self) {
// データ削除
let mut node = self.head;
while node != null_mut() {
// ポインタをBoxに戻す操作を繰り返す
let n = unsafe { Box::from_raw(node) };
node = n.next;
}
}
}
use std::cell::UnsafeCell;
// StackHeadをUnsafeCellで保持するのみ
pub struct Stack<T> {
data: UnsafeCell<StackHead<T>>,
}
impl<T> Stack<T> {
pub fn new() -> Self {
Stack {
data: UnsafeCell::new(StackHead::new()),
}
}
pub fn get_mut(&self) -> &mut StackHead<T> {
unsafe { &mut *self.data.get() }
}
}
// スレッド間のデータ共有と、チャネルを使った送受信が可能と設定
unsafe impl<T> Sync for Stack<T> {}
unsafe impl<T> Send for Stack<T> {} | 25.023622 | 79 | 0.430459 |
61bf3ec63350f346a1a22cd595624595a070b4e4 | 4,904 | use futures::channel::mpsc::Sender;
use futures::channel::oneshot;
use futures::SinkExt;
use std::cmp::{Eq, PartialEq};
use std::fmt::Debug;
use std::hash::Hash;
use crate::anyhow::Result;
#[cfg(not(target_arch = "wasm32"))]
use crate::runtime::buffer::circular::Circular;
#[cfg(target_arch = "wasm32")]
use crate::runtime::buffer::slab::Slab;
use crate::runtime::buffer::BufferBuilder;
use crate::runtime::buffer::BufferWriter;
use crate::runtime::AsyncKernel;
use crate::runtime::AsyncMessage;
use crate::runtime::Block;
use crate::runtime::Pmt;
use crate::runtime::SyncKernel;
use crate::runtime::Topology;
/// The main component of any FutureSDR program.
///
/// A [Flowgraph] is what drives the entire program. It is composed of a set of blocks and connections between them.
/// There is at least one source and one sink in every Flowgraph.
pub struct Flowgraph {
pub(crate) topology: Option<Topology>,
}
impl Flowgraph {
/// Creates a new [Flowgraph] with an empty [Topology]
pub fn new() -> Flowgraph {
Flowgraph {
topology: Some(Topology::new()),
}
}
pub fn add_block(&mut self, block: Block) -> usize {
self.topology.as_mut().unwrap().add_block(block)
}
pub fn connect_stream(
&mut self,
src_block: usize,
src_port: &str,
dst_block: usize,
dst_port: &str,
) -> Result<()> {
self.topology.as_mut().unwrap().connect_stream(
src_block,
src_port,
dst_block,
dst_port,
DefaultBuffer::new(),
)
}
pub fn connect_stream_with_type<B: BufferBuilder + Debug + Eq + Hash>(
&mut self,
src_block: usize,
src_port: &str,
dst_block: usize,
dst_port: &str,
buffer: B,
) -> Result<()> {
self.topology
.as_mut()
.unwrap()
.connect_stream(src_block, src_port, dst_block, dst_port, buffer)
}
pub fn connect_message(
&mut self,
src_block: usize,
src_port: &str,
dst_block: usize,
dst_port: &str,
) -> Result<()> {
self.topology
.as_mut()
.unwrap()
.connect_message(src_block, src_port, dst_block, dst_port)
}
pub fn block_async<T: AsyncKernel + 'static>(&self, id: usize) -> Option<&T> {
self.topology
.as_ref()
.and_then(|t| t.block_ref(id))
.and_then(|b| b.as_async())
}
pub fn block_async_mut<T: AsyncKernel + 'static>(&mut self, id: usize) -> Option<&T> {
self.topology
.as_mut()
.and_then(|t| t.block_mut(id))
.and_then(|b| b.as_async_mut())
}
pub fn block_sync<T: SyncKernel + 'static>(&self, id: usize) -> Option<&T> {
self.topology
.as_ref()
.and_then(|t| t.block_ref(id))
.and_then(|b| b.as_sync())
}
pub fn block_sync_mut<T: SyncKernel + 'static>(&mut self, id: usize) -> Option<&T> {
self.topology
.as_mut()
.and_then(|t| t.block_mut(id))
.and_then(|b| b.as_sync_mut())
}
}
impl Default for Flowgraph {
fn default() -> Self {
Self::new()
}
}
pub struct FlowgraphHandle {
inbox: Sender<AsyncMessage>,
}
impl FlowgraphHandle {
pub(crate) fn new(inbox: Sender<AsyncMessage>) -> FlowgraphHandle {
FlowgraphHandle { inbox }
}
pub async fn call(&mut self, block_id: usize, port_id: usize, data: Pmt) -> Result<()> {
self.inbox
.send(AsyncMessage::BlockCall {
block_id,
port_id,
data,
})
.await?;
Ok(())
}
pub async fn callback(&mut self, block_id: usize, port_id: usize, data: Pmt) -> Result<Pmt> {
let (tx, rx) = oneshot::channel::<Pmt>();
self.inbox
.send(AsyncMessage::BlockCallback {
block_id,
port_id,
data,
tx,
})
.await?;
let p = rx.await?;
Ok(p)
}
}
#[derive(Debug, PartialEq, Hash)]
pub struct DefaultBuffer;
impl Eq for DefaultBuffer {}
impl DefaultBuffer {
fn new() -> DefaultBuffer {
DefaultBuffer
}
}
impl BufferBuilder for DefaultBuffer {
#[cfg(not(target_arch = "wasm32"))]
fn build(
&self,
item_size: usize,
writer_inbox: Sender<AsyncMessage>,
writer_output_id: usize,
) -> BufferWriter {
Circular::new().build(item_size, writer_inbox, writer_output_id)
}
#[cfg(target_arch = "wasm32")]
fn build(
&self,
item_size: usize,
writer_inbox: Sender<AsyncMessage>,
writer_output_id: usize,
) -> BufferWriter {
Slab::new().build(item_size, writer_inbox, writer_output_id)
}
}
| 26.365591 | 116 | 0.564029 |
d77d686eb0fd7b57292c9421d35d97158c514b08 | 1,985 | use postgres::{Connection, GenericConnection, TlsMode};
use postgres::params::IntoConnectParams;
use chrono::prelude::{DateTime, Utc};
use super::migration;
use super::result::Result;
pub struct Migration {
db: Connection,
}
impl Migration {
pub fn new<T>(params: T, tls: TlsMode) -> Result<Self>
where
T: IntoConnectParams,
{
let db = try!(Connection::connect(params, tls));
Ok(Migration { db: db })
}
fn check<T: GenericConnection>(&self, t: &T) -> Result<()> {
try!(t.execute(
"CREATE TABLE IF NOT EXISTS schema_migrations(version VARCHAR(255) PRIMARY KEY, created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW())",
&[]
));
return Ok(());
}
}
impl migration::Migration for Migration {
fn name(&self) -> &'static str {
"postgresql"
}
fn up(&self, name: &String, script: &String) -> Result<()> {
let t = try!(self.db.transaction());
try!(self.check(&t));
try!(t.batch_execute(script));
try!(t.execute("INSERT INTO schema_migrations(version) VALUES($1)", &[name]));
try!(t.commit());
Ok(())
}
fn down(&self, name: &String, script: &String) -> Result<()> {
let t = try!(self.db.transaction());
try!(self.check(&t));
try!(t.batch_execute(script));
try!(t.execute("DELETE FROM schema_migrations WHERE version = $1", &[name]));
try!(t.commit());
Ok(())
}
fn versions(&self) -> Result<Vec<(String, DateTime<Utc>)>> {
try!(self.check(&self.db));
let mut items = Vec::new();
for row in &try!(self.db.query(
"SELECT version, created_at FROM schema_migrations ORDER BY version ASC",
&[],
)) {
let version: String = row.get("version");
let created_at: DateTime<Utc> = row.get("created_at");
items.push((version, created_at))
}
Ok(items)
}
}
| 30.075758 | 153 | 0.562217 |
f7920a5903294a3e9cc731d19bae5d9e8c5eba63 | 480 | use std::{io, mem};
use super::cvt;
use nix::libc::{c_int, c_ushort, ioctl, TIOCGWINSZ};
#[repr(C)]
struct TermSize {
row: c_ushort,
col: c_ushort,
_x: c_ushort,
_y: c_ushort,
}
/// Get the size of the terminal.
pub fn terminal_size(fd: c_int) -> io::Result<(usize, usize)> {
unsafe {
let mut size: TermSize = mem::zeroed();
cvt(ioctl(fd, TIOCGWINSZ.into(), &mut size as *mut _))?;
Ok((size.col as usize, size.row as usize))
}
}
| 21.818182 | 64 | 0.597917 |
fc51b52f725620e640b22a260907e6b44f4bdf04 | 6,787 | use super::{Context, Module, RootModuleConfig, Shell};
use crate::configs::shell::ShellConfig;
use crate::formatter::StringFormatter;
pub fn module<'a>(context: &'a Context) -> Option<Module<'a>> {
let mut module = context.new_module("shell");
let config: ShellConfig = ShellConfig::try_load(module.config);
if config.disabled {
return None;
}
let shell = context.shell;
let parsed = StringFormatter::new(config.format).and_then(|formatter| {
formatter
.map_meta(|variable, _| match variable {
"indicator" => match shell {
Shell::Bash => Some(config.bash_indicator),
Shell::Fish => Some(config.fish_indicator),
Shell::Zsh => Some(config.zsh_indicator),
Shell::PowerShell => Some(config.powershell_indicator),
Shell::Ion => Some(config.ion_indicator),
Shell::Elvish => Some(config.elvish_indicator),
Shell::Tcsh => Some(config.tcsh_indicator),
Shell::Unknown => None,
},
_ => None,
})
.parse(None)
});
module.set_segments(match parsed {
Ok(segments) => segments,
Err(error) => {
log::warn!("Error in module `shell`: \n{}", error);
return None;
}
});
Some(module)
}
#[cfg(test)]
mod tests {
use crate::context::Shell;
use crate::test::ModuleRenderer;
use ansi_term::Color;
#[test]
fn test_none_if_disabled() {
let expected = None;
let actual = ModuleRenderer::new("shell").shell(Shell::Bash).collect();
assert_eq!(expected, actual);
}
#[test]
fn test_none_if_unknown_shell() {
let expected = None;
let actual = ModuleRenderer::new("shell").shell(Shell::Unknown).collect();
assert_eq!(expected, actual);
}
#[test]
fn test_bash_default_format() {
let expected = Some(format!("{} ", "bsh"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Bash)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_bash_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("bash")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Bash)
.config(toml::toml! {
[shell]
bash_indicator = "[bash](bold cyan)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_fish_default_format() {
let expected = Some(format!("{} ", "fsh"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Fish)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_fish_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("fish")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Fish)
.config(toml::toml! {
[shell]
fish_indicator = "[fish](cyan bold)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_zsh_default_format() {
let expected = Some(format!("{} ", "zsh"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Zsh)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_zsh_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("zsh")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Bash)
.config(toml::toml! {
[shell]
bash_indicator = "[zsh](bold cyan)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_powershell_default_format() {
let expected = Some(format!("{} ", "psh"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::PowerShell)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_powershell_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("powershell")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::PowerShell)
.config(toml::toml! {
[shell]
powershell_indicator = "[powershell](bold cyan)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_ion_default_format() {
let expected = Some(format!("{} ", "ion"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Ion)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_ion_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("ion")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Ion)
.config(toml::toml! {
[shell]
ion_indicator = "[ion](bold cyan)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_elvish_default_format() {
let expected = Some(format!("{} ", "esh"));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Elvish)
.config(toml::toml! {
[shell]
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
#[test]
fn test_elvish_custom_format() {
let expected = Some(format!("{} ", Color::Cyan.bold().paint("elvish")));
let actual = ModuleRenderer::new("shell")
.shell(Shell::Elvish)
.config(toml::toml! {
[shell]
elvish_indicator = "[elvish](bold cyan)"
disabled = false
})
.collect();
assert_eq!(expected, actual);
}
}
| 28.161826 | 86 | 0.490497 |
d69319302912d57ae3de8ee50f341ccd3505c6a6 | 4,612 | mod gizmo;
mod tools;
use asset::{PropID, TextureID};
use cgmath::vec3;
use winit::event::VirtualKeyCode;
use crate::{
button,
data::PropInfoContainer,
graphics::{
structures::GroundVertex, Canvas, Graphics, GroundMesh, GroundMeshDescriptor, Share,
},
Host, ToHost,
};
use self::tools::{CameraTool, Tool};
use super::{
camera::Camera,
elements::ElementKind,
input::Input,
scene::{self, Action, Scene},
};
pub struct Editor {
tool: Box<dyn Tool>,
old_tool: Option<Box<dyn Tool>>,
ground: Ground,
mode: ElementKind,
grid: i32,
texture: TextureID,
prop: PropID,
}
impl Editor {
pub fn init(ctx: Context) -> Self {
Self {
tool: Box::new(CameraTool::new(ctx.graphics)),
old_tool: None,
ground: Ground::new(ctx.graphics),
mode: ElementKind::Solid,
grid: 128,
texture: TextureID(2),
prop: PropID(0),
}
}
pub fn process(&mut self, ctx: Context) {
if ctx.mario {
return;
}
self.old_tool = None;
let new = self.tool.process(tools::Context {
input: ctx.input,
graphics: ctx.graphics,
prop_infos: ctx.prop_infos,
camera: ctx.camera,
scene: ctx.scene,
delta: ctx.delta,
mode: self.mode,
grid: &mut self.grid,
texture: self.texture,
prop: self.prop,
});
if let Some(new) = new {
if self.tool.keep_old() {
self.old_tool = Some(std::mem::replace(&mut self.tool, new));
} else {
self.tool = new;
}
}
if self.tool.can_switch() {
for (key, mode, button) in [
(VirtualKeyCode::Key1, ElementKind::Solid, button::SOLID),
(VirtualKeyCode::Key2, ElementKind::Face, button::FACE),
(VirtualKeyCode::Key3, ElementKind::Point, button::POINT),
(VirtualKeyCode::Key4, ElementKind::Prop, button::PROP),
] {
if ctx.input.is_key_down_once(key) {
if self.mode != mode {
ctx.scene.act(
scene::Context {
graphics: ctx.graphics,
},
Action::DeselectAll(self.mode),
);
self.mode = mode;
ctx.host.callback(ToHost::Button(button));
println!("[wasm] button {}", button);
}
break;
}
}
}
}
pub fn set_texture(&mut self, texture: TextureID) {
self.texture = texture;
}
pub fn set_prop(&mut self, prop: PropID) {
self.prop = prop;
}
pub fn set_mode(&mut self, ctx: Context, mode: ElementKind) {
if self.mode != mode {
ctx.scene.act(
scene::Context {
graphics: ctx.graphics,
},
Action::DeselectAll(self.mode),
);
self.mode = mode;
}
}
pub fn mode(&self) -> ElementKind {
self.mode
}
pub fn render(&self, canvas: &mut Canvas) {
canvas.set_grid_len(self.grid);
let tool = if let Some(old) = &self.old_tool {
old
} else {
&self.tool
};
tool.render(canvas);
self.ground.render(canvas);
}
}
pub struct Context<'a> {
pub host: &'a dyn Host,
pub input: &'a Input,
pub graphics: &'a Graphics,
pub prop_infos: &'a PropInfoContainer,
pub camera: &'a mut Camera,
pub scene: &'a mut Scene,
pub delta: f32,
pub mario: bool,
}
struct Ground {
mesh: GroundMesh,
}
impl Ground {
fn new(graphics: &Graphics) -> Self {
let mesh = {
const POSITIONS: [[f32; 2]; 4] = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]];
let vertices = POSITIONS.map(|pos| GroundVertex {
position: vec3(pos[0] - 0.5, 0.0, pos[1] - 0.5) * 500.0,
texcoord: pos.into(),
});
graphics.create_ground_mesh(GroundMeshDescriptor {
texture: TextureID(1),
vertices: &vertices,
triangles: &[[0, 1, 2], [0, 2, 3]],
})
};
Self { mesh }
}
fn render(&self, canvas: &mut Canvas) {
canvas.draw_ground(self.mesh.share());
}
}
| 25.765363 | 94 | 0.481353 |
de55e7b0a473afd0067fd09772993f2b21244f09 | 1,022 | /*
* Metal API
*
* This is the API for Equinix Metal Product. Interact with your devices, user account, and projects.
*
* The version of the OpenAPI document: 1.0.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServerUnavailableInfo {
#[serde(rename = "facility", skip_serializing_if = "Option::is_none")]
pub facility: Option<String>,
#[serde(rename = "plan", skip_serializing_if = "Option::is_none")]
pub plan: Option<String>,
#[serde(rename = "quantity", skip_serializing_if = "Option::is_none")]
pub quantity: Option<String>,
#[serde(rename = "available", skip_serializing_if = "Option::is_none")]
pub available: Option<bool>,
}
impl ServerUnavailableInfo {
pub fn new() -> ServerUnavailableInfo {
ServerUnavailableInfo {
facility: None,
plan: None,
quantity: None,
available: None,
}
}
}
| 26.894737 | 101 | 0.65362 |
4b3c64a7192de4b3be06c72826291aa2e06c6735 | 413 | use num_prime::nt_funcs::{is_prime, primes};
/// Find all mersenne primes 2^p-1 where p < 128, return a list of p
fn list_mersenne() -> Vec<u64> {
primes(128)
.into_iter()
.filter(|p| is_prime(&(2u128.pow(*p as u32) - 1), None).probably())
.collect()
}
fn main() {
println!("Mersenne primes under 2^128:");
for p in list_mersenne() {
println!("2^{} - 1", p);
}
}
| 24.294118 | 75 | 0.566586 |
082a5db97435190557c0d289f1083c6f42482f5d | 6,982 | // Copyright 2020 the Deno authors. All rights reserved. MIT license.
use super::Context;
use super::LintRule;
use crate::swc_util::extract_regex;
use swc_common::Span;
use swc_ecmascript::ast::{CallExpr, Expr, ExprOrSuper, NewExpr, Regex};
use swc_ecmascript::visit::noop_visit_type;
use swc_ecmascript::visit::Node;
use swc_ecmascript::visit::Visit;
use std::sync::Arc;
pub struct NoRegexSpaces;
impl LintRule for NoRegexSpaces {
fn new() -> Box<Self> {
Box::new(NoRegexSpaces)
}
fn code(&self) -> &'static str {
"no-regex-spaces"
}
fn lint_module(
&self,
context: Arc<Context>,
module: &swc_ecmascript::ast::Module,
) {
let mut visitor = NoRegexSpacesVisitor::new(context);
visitor.visit_module(module, module);
}
}
struct NoRegexSpacesVisitor {
context: Arc<Context>,
}
impl NoRegexSpacesVisitor {
fn new(context: Arc<Context>) -> Self {
Self { context }
}
fn check_regex(&self, regex: &str, span: Span) {
lazy_static! {
static ref DOUBLE_SPACE: regex::Regex =
regex::Regex::new(r"(?u) {2}").unwrap();
static ref BRACKETS: regex::Regex =
regex::Regex::new(r"\[.*?[^\\]\]").unwrap();
static ref SPACES: regex::Regex =
regex::Regex::new(r#"(?u)( {2,})(?: [+*{?]|[^+*{?]|$)"#).unwrap();
}
if !DOUBLE_SPACE.is_match(regex) {
return;
}
let mut character_classes = vec![];
for mtch in BRACKETS.find_iter(regex) {
character_classes.push((mtch.start(), mtch.end()));
}
for mtch in SPACES.find_iter(regex) {
let not_in_classes = &character_classes
.iter()
.all(|ref v| mtch.start() < v.0 || v.1 <= mtch.start());
if *not_in_classes {
self.context.add_diagnostic(
span,
"no-regex-spaces",
"more than one consecutive spaces in RegExp is not allowed",
);
return;
}
}
}
}
impl Visit for NoRegexSpacesVisitor {
noop_visit_type!();
fn visit_regex(&mut self, regex: &Regex, parent: &dyn Node) {
self.check_regex(regex.exp.to_string().as_str(), regex.span);
swc_ecmascript::visit::visit_regex(self, regex, parent);
}
fn visit_new_expr(&mut self, new_expr: &NewExpr, parent: &dyn Node) {
if let Expr::Ident(ident) = &*new_expr.callee {
if let Some(args) = &new_expr.args {
if let Some(regex) = extract_regex(&self.context.scope, ident, args) {
self.check_regex(regex.as_str(), new_expr.span);
}
}
}
swc_ecmascript::visit::visit_new_expr(self, new_expr, parent);
}
fn visit_call_expr(&mut self, call_expr: &CallExpr, parent: &dyn Node) {
if let ExprOrSuper::Expr(expr) = &call_expr.callee {
if let Expr::Ident(ident) = expr.as_ref() {
if let Some(regex) =
extract_regex(&self.context.scope, ident, &call_expr.args)
{
self.check_regex(regex.as_str(), call_expr.span);
}
}
}
swc_ecmascript::visit::visit_call_expr(self, call_expr, parent);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_util::*;
#[test]
fn no_regex_spaces_valid() {
assert_lint_ok_n::<NoRegexSpaces>(vec![
"var foo = /foo/;",
"var foo = RegExp('foo')",
"var foo = / /;",
"var foo = RegExp(' ')",
"var foo = / a b c d /;",
"var foo = /bar {3}baz/g;",
"var foo = RegExp('bar {3}baz', 'g')",
"var foo = new RegExp('bar {3}baz')",
"var foo = /bar\t\t\tbaz/;",
"var foo = RegExp('bar\t\t\tbaz');",
"var foo = new RegExp('bar\t\t\tbaz');",
"var foo = / +/;",
"var foo = / ?/;",
"var foo = / */;",
"var foo = / {2}/;",
// don't report if RegExp shadowed
"var RegExp = function() {}; var foo = new RegExp('bar baz');",
"var RegExp = function() {}; var foo = RegExp('bar baz');",
// don't report if there are no consecutive spaces in the source code
r"var foo = /bar \\ baz/;",
r"var foo = /bar\\ \\ baz/;",
r"var foo = /bar \\u0020 baz/;",
r"var foo = /bar\\u0020\\u0020baz/;",
r"var foo = new RegExp('bar \\ baz')",
r"var foo = new RegExp('bar\\ \\ baz')",
r"var foo = new RegExp('bar \\\\ baz')",
r"var foo = new RegExp('bar \\u0020 baz')",
r"var foo = new RegExp('bar\\u0020\\u0020baz')",
r"var foo = new RegExp('bar \\\\u0020 baz')",
// don't report spaces in character classes
"var foo = /[ ]/;",
"var foo = /[ ]/;",
"var foo = / [ ] /;",
"var foo = / [ ] [ ] /;",
"var foo = new RegExp('[ ]');",
"var foo = new RegExp('[ ]');",
"var foo = new RegExp(' [ ] ');",
"var foo = RegExp(' [ ] [ ] ');",
"var foo = new RegExp(' \\[ \\] ');",
// TODO(@disizali) invalid regexes must handled on separated rule called `no-invalid-regexp`.
// "var foo = new RegExp('[ ');",
// "var foo = new RegExp('{ ', 'u');",
// "var foo = new RegExp(' \\[ ');",
]);
}
#[test]
fn no_regex_spaces_invalid() {
assert_lint_err::<NoRegexSpaces>("let foo = /bar baz/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /bar baz/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = / a b c d /;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp(' a b c d ');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp('bar baz');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = new RegExp('bar baz');", 10);
assert_lint_err::<NoRegexSpaces>(
"{ let RegExp = function() {}; } var foo = RegExp('bar baz');",
42,
);
assert_lint_err::<NoRegexSpaces>("let foo = /bar {3}baz/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /bar ?baz/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp('bar +baz')", 10);
assert_lint_err::<NoRegexSpaces>("let foo = new RegExp('bar ');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /bar\\ baz/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /[ ] /;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = / [ ] /;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = new RegExp('[ ] ');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp(' [ ]');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /\\[ /;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /\\[ \\]/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /(?: )/;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp('^foo(?= )');", 10);
assert_lint_err::<NoRegexSpaces>("let foo = /\\ /", 10);
assert_lint_err::<NoRegexSpaces>("let foo = / \\ /", 10);
assert_lint_err::<NoRegexSpaces>("let foo = / foo /;", 10);
assert_lint_err::<NoRegexSpaces>("let foo = new RegExp('\\\\d ')", 10);
assert_lint_err::<NoRegexSpaces>("let foo = RegExp('\\u0041 ')", 10);
assert_lint_err::<NoRegexSpaces>(
"let foo = new RegExp('\\\\[ \\\\]');",
10,
);
}
}
| 34.91 | 99 | 0.563735 |
76f8df2c9223a3ac4549f11a8461b43d9add00dc | 7,170 | use crate::{FixedOutput, FixedOutputReset, Update};
use crypto_common::{InvalidLength, Key, KeyInit, Output, OutputSizeUser, Reset};
#[cfg(feature = "rand_core")]
use crate::rand_core::{CryptoRng, RngCore};
use core::fmt;
use crypto_common::typenum::Unsigned;
use subtle::{Choice, ConstantTimeEq};
/// Marker trait for Message Authentication algorithms.
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
pub trait MacMarker {}
/// Convinience wrapper trait covering functionality of Message Authentication algorithms.
///
/// This trait wraps [`KeyInit`], [`Update`], [`FixedOutput`], and [`MacMarker`]
/// traits and provides additional convenience methods.
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
pub trait Mac: OutputSizeUser + Sized {
/// Create new value from fixed size key.
fn new(key: &Key<Self>) -> Self
where
Self: KeyInit;
/// Generate random key using the provided [`CryptoRng`].
#[cfg(feature = "rand_core")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
fn generate_key(rng: impl CryptoRng + RngCore) -> Key<Self>
where
Self: KeyInit;
/// Create new value from variable size key.
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength>
where
Self: KeyInit;
/// Update state using the provided data.
fn update(&mut self, data: &[u8]);
/// Process input data in a chained manner.
#[must_use]
fn chain_update(self, data: impl AsRef<[u8]>) -> Self;
/// Obtain the result of a [`Mac`] computation as a [`CtOutput`] and consume
/// [`Mac`] instance.
fn finalize(self) -> CtOutput<Self>;
/// Obtain the result of a [`Mac`] computation as a [`CtOutput`] and reset
/// [`Mac`] instance.
fn finalize_reset(&mut self) -> CtOutput<Self>
where
Self: FixedOutputReset;
/// Reset MAC instance to its initial state.
fn reset(&mut self)
where
Self: Reset;
/// Check if tag/code value is correct for the processed input.
fn verify(self, tag: &Output<Self>) -> Result<(), MacError>;
/// Check truncated tag correctness using all bytes
/// of calculated tag.
///
/// Returns `Error` if `tag` is not valid or not equal in length
/// to MAC's output.
fn verify_slice(self, tag: &[u8]) -> Result<(), MacError>;
/// Check truncated tag correctness using left side bytes
/// (i.e. `tag[..n]`) of calculated tag.
///
/// Returns `Error` if `tag` is not valid or empty.
fn verify_truncated_left(self, tag: &[u8]) -> Result<(), MacError>;
/// Check truncated tag correctness using right side bytes
/// (i.e. `tag[n..]`) of calculated tag.
///
/// Returns `Error` if `tag` is not valid or empty.
fn verify_truncated_right(self, tag: &[u8]) -> Result<(), MacError>;
}
impl<T: Update + FixedOutput + MacMarker> Mac for T {
#[inline(always)]
fn new(key: &Key<Self>) -> Self
where
Self: KeyInit,
{
KeyInit::new(key)
}
#[inline(always)]
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength>
where
Self: KeyInit,
{
KeyInit::new_from_slice(key)
}
#[inline]
fn update(&mut self, data: &[u8]) {
Update::update(self, data);
}
#[inline]
fn chain_update(mut self, data: impl AsRef<[u8]>) -> Self {
Update::update(&mut self, data.as_ref());
self
}
#[inline]
fn finalize(self) -> CtOutput<Self> {
CtOutput::new(self.finalize_fixed())
}
#[inline(always)]
fn finalize_reset(&mut self) -> CtOutput<Self>
where
Self: FixedOutputReset,
{
CtOutput::new(self.finalize_fixed_reset())
}
#[inline]
fn reset(&mut self)
where
Self: Reset,
{
Reset::reset(self)
}
#[inline]
fn verify(self, tag: &Output<Self>) -> Result<(), MacError> {
if self.finalize() == tag.into() {
Ok(())
} else {
Err(MacError)
}
}
#[inline]
fn verify_slice(self, tag: &[u8]) -> Result<(), MacError> {
let n = tag.len();
if n != Self::OutputSize::USIZE {
return Err(MacError);
}
let choice = self.finalize_fixed().ct_eq(tag);
if choice.unwrap_u8() == 1 {
Ok(())
} else {
Err(MacError)
}
}
fn verify_truncated_left(self, tag: &[u8]) -> Result<(), MacError> {
let n = tag.len();
if n == 0 || n > Self::OutputSize::USIZE {
return Err(MacError);
}
let choice = self.finalize_fixed()[..n].ct_eq(tag);
if choice.unwrap_u8() == 1 {
Ok(())
} else {
Err(MacError)
}
}
fn verify_truncated_right(self, tag: &[u8]) -> Result<(), MacError> {
let n = tag.len();
if n == 0 || n > Self::OutputSize::USIZE {
return Err(MacError);
}
let m = Self::OutputSize::USIZE - n;
let choice = self.finalize_fixed()[m..].ct_eq(tag);
if choice.unwrap_u8() == 1 {
Ok(())
} else {
Err(MacError)
}
}
#[cfg(feature = "rand_core")]
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
#[inline]
fn generate_key(rng: impl CryptoRng + RngCore) -> Key<Self>
where
Self: KeyInit,
{
<T as KeyInit>::generate_key(rng)
}
}
/// Fixed size output value which provides a safe [`Eq`] implementation that
/// runs in constant time.
///
/// It is useful for implementing Message Authentication Codes (MACs).
#[derive(Clone)]
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
pub struct CtOutput<T: OutputSizeUser> {
bytes: Output<T>,
}
impl<T: OutputSizeUser> CtOutput<T> {
/// Create a new [`CtOutput`] value.
#[inline(always)]
pub fn new(bytes: Output<T>) -> Self {
Self { bytes }
}
/// Get the inner [`Output`] array this type wraps.
#[inline(always)]
pub fn into_bytes(self) -> Output<T> {
self.bytes
}
}
impl<T: OutputSizeUser> From<Output<T>> for CtOutput<T> {
#[inline(always)]
fn from(bytes: Output<T>) -> Self {
Self { bytes }
}
}
impl<'a, T: OutputSizeUser> From<&'a Output<T>> for CtOutput<T> {
#[inline(always)]
fn from(bytes: &'a Output<T>) -> Self {
bytes.clone().into()
}
}
impl<T: OutputSizeUser> ConstantTimeEq for CtOutput<T> {
#[inline(always)]
fn ct_eq(&self, other: &Self) -> Choice {
self.bytes.ct_eq(&other.bytes)
}
}
impl<T: OutputSizeUser> PartialEq for CtOutput<T> {
#[inline(always)]
fn eq(&self, x: &CtOutput<T>) -> bool {
self.ct_eq(x).unwrap_u8() == 1
}
}
impl<T: OutputSizeUser> Eq for CtOutput<T> {}
/// Error type for when the [`Output`] of a [`Mac`]
/// is not equal to the expected value.
#[derive(Default, Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
pub struct MacError;
impl fmt::Display for MacError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("MAC tag mismatch")
}
}
#[cfg(feature = "std")]
impl std::error::Error for MacError {}
| 27.366412 | 90 | 0.580753 |
f89e4a787ec24cbca5fa27ba5314ee5ce40068db | 2,611 | /*!
One-line description.
More detailed description, with
# Example
*/
use crate::forms::library::LibraryName;
use crate::scheme::ID_LIB_SCHEME;
use schemer_lang::error::Error;
use schemer_lang::eval::environment::Exports;
use schemer_lang::eval::{Environment, Expression, Procedure};
use schemer_lang::types::{Identifier, MutableRef};
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
library_name!(
ID_LIB_SCHEME_WRITE,
"write",
ID_LIB_SCHEME,
scheme_write_name
);
pub fn scheme_write_exports() -> Exports {
let mut exports = Exports::default();
export_builtin!(exports, "display" => display "obj" ; "output-port");
export_builtin!(exports, "write" => write "obj" ; "output-port");
export_builtin!(exports, "write-simple" => write_simple "obj" ; "output-port");
export_builtin!(exports, "write-shared" => write_shared "obj" ; "output-port");
exports
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Functions
// ------------------------------------------------------------------------------------------------
fn display(_: Vec<Expression>, _: &mut MutableRef<Environment>) -> Result<Expression, Error> {
todo!()
}
fn write(_: Vec<Expression>, _: &mut MutableRef<Environment>) -> Result<Expression, Error> {
todo!()
}
fn write_simple(_: Vec<Expression>, _: &mut MutableRef<Environment>) -> Result<Expression, Error> {
todo!()
}
fn write_shared(_: Vec<Expression>, _: &mut MutableRef<Environment>) -> Result<Expression, Error> {
todo!()
}
// ------------------------------------------------------------------------------------------------
// Modules
// ------------------------------------------------------------------------------------------------
| 35.283784 | 99 | 0.37342 |
efacc4b4baebb5a22c402002182a4525bebd62c5 | 3,674 | //! # Functions
//!
//! Functions on expressions that might be useful.
//!
use crate::prelude::*;
use polars_core::prelude::*;
/// Compute the covariance between two columns.
pub fn cov(a: Expr, b: Expr) -> Expr {
let name = "cov";
let function = move |a: Series, b: Series| {
let s = match a.dtype() {
DataType::Float32 => {
let ca_a = a.f32().unwrap();
let ca_b = b.f32().unwrap();
Series::new(name, &[polars_core::functions::cov(ca_a, ca_b)])
}
DataType::Float64 => {
let ca_a = a.f64().unwrap();
let ca_b = b.f64().unwrap();
Series::new(name, &[polars_core::functions::cov(ca_a, ca_b)])
}
_ => {
let a = a.cast::<Float64Type>()?;
let b = b.cast::<Float64Type>()?;
let ca_a = a.f64().unwrap();
let ca_b = b.f64().unwrap();
Series::new(name, &[polars_core::functions::cov(ca_a, ca_b)])
}
};
Ok(s)
};
map_binary(a, b, function, Some(Field::new(name, DataType::Float32))).alias(name)
}
/// Compute the pearson correlation between two columns.
pub fn pearson_corr(a: Expr, b: Expr) -> Expr {
let name = "pearson_corr";
let function = move |a: Series, b: Series| {
let s = match a.dtype() {
DataType::Float32 => {
let ca_a = a.f32().unwrap();
let ca_b = b.f32().unwrap();
Series::new(name, &[polars_core::functions::pearson_corr(ca_a, ca_b)])
}
DataType::Float64 => {
let ca_a = a.f64().unwrap();
let ca_b = b.f64().unwrap();
Series::new(name, &[polars_core::functions::pearson_corr(ca_a, ca_b)])
}
_ => {
let a = a.cast::<Float64Type>()?;
let b = b.cast::<Float64Type>()?;
let ca_a = a.f64().unwrap();
let ca_b = b.f64().unwrap();
Series::new(name, &[polars_core::functions::pearson_corr(ca_a, ca_b)])
}
};
Ok(s)
};
map_binary(a, b, function, Some(Field::new(name, DataType::Float32))).alias(name)
}
/// Find the indexes that would sort these series in order of appearance.
/// That means that the first `Series` will be used to determine the ordering
/// until duplicates are found. Once duplicates are found, the next `Series` will
/// be used and so on.
pub fn argsort_by(by: Vec<Expr>, reverse: &[bool]) -> Expr {
let reverse = reverse.to_vec();
let function = NoEq::new(Arc::new(move |by: &mut [Series]| {
polars_core::functions::argsort_by(by, &reverse).map(|ca| ca.into_series())
}) as Arc<dyn SeriesUdf>);
Expr::Function {
input: by,
function,
output_type: Some(DataType::UInt32),
options: FunctionOptions {
collect_groups: ApplyOptions::ApplyFlat,
input_wildcard_expansion: false,
},
}
}
#[cfg(feature = "concat_str")]
/// Concat string columns in linear time
pub fn concat_str(s: Vec<Expr>, delimiter: &str) -> Expr {
let delimiter = delimiter.to_string();
let function = NoEq::new(Arc::new(move |s: &mut [Series]| {
polars_core::functions::concat_str(s, &delimiter).map(|ca| ca.into_series())
}) as Arc<dyn SeriesUdf>);
Expr::Function {
input: s,
function,
output_type: Some(DataType::Utf8),
options: FunctionOptions {
collect_groups: ApplyOptions::ApplyFlat,
input_wildcard_expansion: true,
},
}
}
| 36.019608 | 86 | 0.538378 |
9cb61c24922eed25c1404f9274b77aab1bf98734 | 1,030 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that free regions ordering only goes one way. That is,
// we have `&'a Node<'b, T>`, which implies that `'a <= 'b`,
// but not `'b <= 'a`. Hence returning `&self.val` (which has lifetime
// `'a`) where `'b` is expected yields an error.
//
// This test began its life as a test for issue #4325.
struct Node<'b, T:'b> {
val: T,
next: Option<&'b Node<'b, T>>
}
impl<'b, T> Node<'b, T> {
fn get<'a>(&'a self) -> &'b T {
match self.next {
Some(ref next) => next.get(),
None => &self.val //~ ERROR cannot infer
}
}
}
fn main() {}
| 31.212121 | 70 | 0.643689 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.