hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
aba7320ba4293597dab0b0a8258bf38a8145e8e9
193
use iapyx::cli::args::stats::IapyxStatsCommand; use structopt::StructOpt; pub fn main() { std::env::set_var("RUST_BACKTRACE", "full"); IapyxStatsCommand::from_args().exec().unwrap() }
24.125
50
0.694301
290c60177cede70b169e401be65bc193038ef7a1
3,406
#[doc = "Register `DMA_RawIntErrorStatus` reader"] pub struct R(crate::R<DMA_RAWINTERRORSTATUS_SPEC>); impl core::ops::Deref for R { type Target = crate::R<DMA_RAWINTERRORSTATUS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<DMA_RAWINTERRORSTATUS_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<DMA_RAWINTERRORSTATUS_SPEC>) -> Self { R(reader) } } #[doc = "Register `DMA_RawIntErrorStatus` writer"] pub struct W(crate::W<DMA_RAWINTERRORSTATUS_SPEC>); impl core::ops::Deref for W { type Target = crate::W<DMA_RAWINTERRORSTATUS_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<DMA_RAWINTERRORSTATUS_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<DMA_RAWINTERRORSTATUS_SPEC>) -> Self { W(writer) } } #[doc = "Field `RawIntErrorStatus` reader - "] pub struct RAWINTERRORSTATUS_R(crate::FieldReader<u8, u8>); impl RAWINTERRORSTATUS_R { pub(crate) fn new(bits: u8) -> Self { RAWINTERRORSTATUS_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RAWINTERRORSTATUS_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RawIntErrorStatus` writer - "] pub struct RAWINTERRORSTATUS_W<'a> { w: &'a mut W, } impl<'a> RAWINTERRORSTATUS_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff); self.w } } impl R { #[doc = "Bits 0:7"] #[inline(always)] pub fn raw_int_error_status(&self) -> RAWINTERRORSTATUS_R { RAWINTERRORSTATUS_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7"] #[inline(always)] pub fn raw_int_error_status(&mut self) -> RAWINTERRORSTATUS_W { RAWINTERRORSTATUS_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "DMA_RawIntErrorStatus.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dma_raw_int_error_status](index.html) module"] pub struct DMA_RAWINTERRORSTATUS_SPEC; impl crate::RegisterSpec for DMA_RAWINTERRORSTATUS_SPEC { type Ux = u32; } #[doc = "`read()` method returns [dma_raw_int_error_status::R](R) reader structure"] impl crate::Readable for DMA_RAWINTERRORSTATUS_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [dma_raw_int_error_status::W](W) writer structure"] impl crate::Writable for DMA_RAWINTERRORSTATUS_SPEC { type Writer = W; } #[doc = "`reset()` method sets DMA_RawIntErrorStatus to value 0"] impl crate::Resettable for DMA_RAWINTERRORSTATUS_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
33.067961
427
0.647387
75ccb3710bc91aa5d89e4d7eb809347574540416
17,594
//! Chapter13 - Intro to Automatic Differentiation - Let's Build A Deep Learning Framework //! //! https://github.com/iamtrask/Grokking-Deep-Learning/blob/master/Chapter13%20-%20Intro%20to%20Automatic%20Differentiation%20-%20Let's%20Build%20A%20Deep%20Learning%20Framework.ipynb use std::collections::{BTreeMap, BTreeSet}; use std::error::Error; use std::iter::FromIterator; use std::ops::Add; use datasets::text::babi_en_single_supporting_fact_task; use datasets::Dataset; use rand::distributions::Uniform; use rulinalg::matrix::{BaseMatrix, Matrix}; use grokking_deep_learning_rs::activations::{Sigmoid, Tanh}; use grokking_deep_learning_rs::layers::{Embedding, Layer, Linear, RNNCell, Sequential}; use grokking_deep_learning_rs::losses::{CrossEntropyLoss, Loss, MSELoss}; use grokking_deep_learning_rs::optimizers::{Optimizer, SGDOptimizer}; use grokking_deep_learning_rs::tensor::{Dot, Sum, Tensor}; use grokking_deep_learning_rs::{argmax, generate_random_vector}; fn main() { println!("\nIntroduction to Tensors\n"); introduction_to_tensors(); println!("\nIntroduction to Autograd\n"); introduction_to_autograd(); introduction_to_autograd_2(); println!("\nAutograd with multiple tensors\n"); autograd_with_multiple_tensors(); autograd_neg(); println!("\nUsing Autograd ot train a Neural Network\n"); training_using_autograd(); println!("\nAdding Automatic Optimization\n"); training_with_automatic_optimization(); println!("\nLayers Which Contain Layers\n"); layers_which_contain_layers(); println!("\nLoss Function Layers\n"); loss_function_layers(); println!("\nNonLinearity Layers\n"); nonlinearity_layers(); println!("\nEmbedding Layers\n"); embedding_layer(); println!("\nThe Embedding Layer\n"); cross_entropy_loss(); println!("\nRecurrent Neural Network\n"); recurrent_neural_network().unwrap(); } fn introduction_to_tensors() { let t1 = BasicTensor1 { data: vec![0.0] }; let t2 = BasicTensor1 { data: vec![1.0] }; println!("{:?}", t1 + t2); } #[derive(Debug)] struct BasicTensor1 { data: Vec<f64>, } impl Add for BasicTensor1 { type Output = BasicTensor1; fn add(self, other: BasicTensor1) -> Self::Output { BasicTensor1 { data: self .data .into_iter() .zip(other.data.into_iter()) .map(|(a, b)| a + b) .collect(), } } } fn introduction_to_autograd() { let x = BasicTensor2::new(vec![1.0, 2.0, 3.0, 4.0, 5.0]); let y = BasicTensor2::new(vec![2.0; 5]); let mut z = x + y; println!("{:?}", z); z.backward(BasicTensor2::new(vec![1.0, 1.0, 1.0, 1.0, 1.0])); let xy = z.creators.unwrap(); println!("{:?}", xy[0].grad); println!("{:?}", xy[1].grad); } #[derive(Debug, Clone)] enum BasicOperation { Add, Const, } #[derive(Debug, Clone)] struct BasicTensor2 { data: Vec<f64>, grad: Option<Box<BasicTensor2>>, creation_op: BasicOperation, creators: Option<Vec<BasicTensor2>>, } impl BasicTensor2 { fn new(data: Vec<f64>) -> Self { BasicTensor2 { data, grad: None, creation_op: BasicOperation::Const, creators: None, } } fn backward(&mut self, grad: BasicTensor2) { match self.creation_op { BasicOperation::Add => { for c in self.creators.as_mut().unwrap().iter_mut() { c.backward(grad.clone()); } } _ => { self.grad = Some(Box::new(grad)); } }; } } impl Add for BasicTensor2 { type Output = BasicTensor2; fn add(self, other: Self) -> BasicTensor2 { BasicTensor2 { data: self .data .iter() .zip(other.data.iter()) .map(|(a, b)| a + b) .collect(), grad: None, creation_op: BasicOperation::Add, creators: Some(vec![self, other]), } } } #[allow(clippy::many_single_char_names)] fn introduction_to_autograd_2() { let a = BasicTensor2::new(vec![1.0, 2.0, 3.0, 4.0, 5.0]); let b = BasicTensor2::new(vec![2.0; 5]); let c = BasicTensor2::new(vec![5.0, 4.0, 3.0, 2.0, 1.0]); let d = BasicTensor2::new(vec![-1.0, -2.0, -3.0, -4.0, -5.0]); let e = a + b; let f = c + d; let mut g = e + f; g.backward(BasicTensor2::new(vec![1.0, 1.0, 1.0, 1.0, 1.0])); println!("{:?}", g); let ef = g.creators.as_ref().unwrap(); let ab = ef[0].creators.as_ref().unwrap(); let a = &ab[0]; println!("{:?}", a.grad); } #[allow(clippy::many_single_char_names)] fn autograd_with_multiple_tensors() { let a = Tensor::new_const(Matrix::new(1, 5, vec![1.0, 2.0, 3.0, 4.0, 5.0])); let b = Tensor::new_const(Matrix::new(1, 5, vec![2.0, 2.0, 2.0, 2.0, 2.0])); let c = Tensor::new_const(Matrix::new(1, 5, vec![5.0, 4.0, 3.0, 2.0, 1.0])); let d = &a + &b; let e = &b + &c; let f = &d + &e; // println!("{:#?}", f); f.backward(Tensor::grad(Matrix::new( 1, 5, vec![1.0, 1.0, 1.0, 1.0, 1.0], ))); println!("{:?}", b.0.borrow().grad); } #[allow(clippy::many_single_char_names)] fn autograd_neg() { let a = Tensor::new_const(Matrix::new(1, 5, vec![1.0, 2.0, 3.0, 4.0, 5.0])); let b = Tensor::new_const(Matrix::new(1, 5, vec![2.0, 2.0, 2.0, 2.0, 2.0])); let c = Tensor::new_const(Matrix::new(1, 5, vec![5.0, 4.0, 3.0, 2.0, 1.0])); let d = &a + &(-&b); let e = &(-&b) + &c; let f = &d + &e; f.backward(Tensor::grad(Matrix::new( 1, 5, vec![1.0, 1.0, 1.0, 1.0, 1.0], ))); println!("{:?}", b.0.borrow().grad); } /// Using Autograd to train a Neural Network fn training_using_autograd() { let data = Tensor::new_const(Matrix::new( 4, 2, vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], )); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let distribution = Uniform::new(0.0, 1.0); let w1 = Tensor::new_const(Matrix::new( 2, 3, generate_random_vector(2 * 3, 1.0, 0.0, &distribution), )); let w2 = Tensor::new_const(Matrix::new( 3, 1, generate_random_vector(3, 1.0, 0.0, &distribution), )); let alpha = 0.1; for _ in 0..10 { let pred = data.dot(&w1).dot(&w2); let loss = (&(&pred - &target) * &(&pred - &target)).sum(0); let (loss_rows, loss_cols) = (1, 1); println!("Loss: {:?}", loss.0.borrow().data); loss.backward(Tensor::grad(Matrix::ones(loss_rows, loss_cols))); { let mut w1 = w1.0.borrow_mut(); let grad = w1.grad.take(); w1.grad = None; let grad = grad.unwrap(); let grad = &grad.borrow().data; for i in 0..w1.data.rows() { for j in 0..w1.data.cols() { w1.data[[i, j]] -= alpha * grad[[i, j]]; } } } { let mut w2 = w2.0.borrow_mut(); let grad = w2.grad.take(); w2.grad = None; let grad = grad.unwrap(); let grad = &grad.borrow().data; for i in 0..w2.data.rows() { for j in 0..w2.data.cols() { w2.data[[i, j]] -= alpha * grad[[i, j]]; } } } } } /// Adding Automatic Optimization fn training_with_automatic_optimization() { let data = Tensor::new_const(Matrix::new( 4, 2, vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], )); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let distribution = Uniform::new(0.0, 1.0); let w1 = Tensor::new_const(Matrix::new( 2, 3, generate_random_vector(2 * 3, 1.0, 0.0, &distribution), )); let w2 = Tensor::new_const(Matrix::new( 3, 1, generate_random_vector(3, 1.0, 0.0, &distribution), )); let alpha = 0.1; let optimizer = SGDOptimizer::new(vec![&w1, &w2], alpha); for _ in 0..10 { // predict let pred = data.dot(&w1).dot(&w2); // compare let loss = (&(&pred - &target) * &(&pred - &target)).sum(0); let (loss_rows, loss_cols) = (1, 1); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(loss_rows, loss_cols))); // learn optimizer.step(true); } } /// Layers Which Contain Layers fn layers_which_contain_layers() { let data = Tensor::new_const(Matrix::new( 4, 2, vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], )); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let model = Sequential::new(vec![ Box::new(Linear::new(2, 3, false)), Box::new(Linear::new(3, 1, false)), ]); let optim = SGDOptimizer::new(model.parameters(), 0.05); for _ in 0..10 { let pred = model.forward(&[&data]); // compare let loss = (&(&pred[0] - &target) * &(&pred[0] - &target)).sum(0); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(1, 1))); // learn optim.step(true); } } fn loss_function_layers() { let data = Tensor::new_const(Matrix::new( 4, 2, vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], )); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let model = Sequential::new(vec![ Box::new(Linear::new(2, 3, false)), Box::new(Linear::new(3, 1, false)), ]); let criterion = MSELoss; let optim = SGDOptimizer::new(model.parameters(), 0.05); for _ in 0..10 { let pred = model.forward(&[&data]); // compare let loss = criterion.forward(&pred[0], &target); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(1, 1))); // learn optim.step(true); } } /// NonLinearity Layers fn nonlinearity_layers() { let data = Tensor::new_const(Matrix::new( 4, 2, vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0], )); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let model = Sequential::new(vec![ Box::new(Linear::new(2, 3, false)), Box::new(Tanh), Box::new(Linear::new(3, 1, false)), Box::new(Sigmoid), ]); let criterion = MSELoss; let optim = SGDOptimizer::new(model.parameters(), 0.5); for _ in 0..10 { let pred = model.forward(&[&data]); // compare let loss = criterion.forward(&pred[0], &target); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(1, 1))); // learn optim.step(true); } } /// The Embedding Layer fn embedding_layer() { let data = Tensor::new_const(Matrix::new(1, 4, vec![1.0, 2.0, 1.0, 2.0])); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let model = Sequential::new(vec![ Box::new(Embedding::new(5, 3)), Box::new(Tanh), Box::new(Linear::new(3, 1, true)), Box::new(Sigmoid), ]); let criterion = MSELoss; let optim = SGDOptimizer::new(model.parameters(), 0.07); for _ in 0..10 { let pred = model.forward(&[&data]); // compare let loss = criterion.forward(&pred[0], &target); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(1, 1))); // learn optim.step(true); } } /// The Cross Entropy Layer fn cross_entropy_loss() { let data = Tensor::new_const(Matrix::new(1, 4, vec![1.0, 2.0, 1.0, 2.0])); let target = Tensor::new_const(Matrix::new(4, 1, vec![0.0, 1.0, 0.0, 1.0])); let model = Sequential::new(vec![ Box::new(Embedding::new(3, 3)), Box::new(Tanh), Box::new(Linear::new(3, 4, true)), ]); let criterion = CrossEntropyLoss; let optim = SGDOptimizer::new(model.parameters(), 0.1); for _ in 0..10 { let pred = model.forward(&[&data]); // println!("pred {}", pred.0.borrow().data); // compare let loss = criterion.forward(&pred[0], &target); println!("Loss: {:?}", loss.0.borrow().data.data()); // calculate difference loss.backward(Tensor::grad(Matrix::ones(1, 1))); // learn optim.step(true); } } #[allow(clippy::needless_range_loop)] fn recurrent_neural_network() -> Result<(), Box<dyn Error>> { let (train_data, _) = babi_en_single_supporting_fact_task()?; let train_data: Vec<Vec<String>> = train_data .map(|v| vec![v.0, v.1 /*, (v.2).0*/]) .flat_map(|v| v.into_iter()) .map(|s| { s.split_whitespace() .map(|w| { w.chars() .filter(|c| (*c >= 'a' && *c <= 'z') || (*c >= 'A' && *c <= 'Z')) .collect() }) .collect() }) .collect(); let total_data_size = train_data.len(); let words = BTreeSet::from_iter(train_data.iter().flat_map(|v| v.iter())); let word_count = words.len(); let word_index = BTreeMap::from_iter(words.into_iter().zip(0..word_count)); let inverted_word_index = BTreeMap::from_iter(word_index.clone().into_iter().map(|(k, v)| (v, k))); let train_data: Vec<Vec<f64>> = train_data .iter() .map(|s| s.iter().map(|w| word_index[w] as f64).collect()) .collect(); let max_len = train_data.iter().map(|s| s.len()).max().unwrap(); let pad = word_index.len() + 1; let batch_size = 250; let train_data: Vec<_> = train_data .into_iter() .batch(batch_size, true) .map(|v: Vec<Vec<f64>>| { let mut ans = vec![vec![0.0; batch_size]; max_len]; for i in 0..batch_size { for j in 0..v[i].len() { ans[j][i] = v[i][j]; } for j in v[i].len()..max_len { ans[j][i] = pad as f64; } } ans }) .collect(); let embedding_size = 16; // net let embed = Embedding::new(word_index.len() + 2, embedding_size); let model = RNNCell::new(embedding_size, 16, word_index.len() + 2, Box::new(Sigmoid)); let criterion = CrossEntropyLoss; let mut parameters = embed.parameters(); parameters.append(&mut model.parameters()); let optim = SGDOptimizer::new(parameters, 0.01); for _ in 0..10 { let mut total_loss = 0.0; let mut total_accuracy = 0.0; for batch in train_data.iter() { let mut hidden = model.create_start_state(batch_size); let mut output = None; let len = batch.len(); for row in batch.iter().take(len - 1) { let input = Tensor::new_const(Matrix::new(1, batch_size, row.clone())); let rnn_input = embed.forward(&[&input]).remove(0); let mut outputs = model.forward(&[&rnn_input, &hidden]); output = Some(outputs.remove(0)); hidden = outputs.remove(0); } let output = output.unwrap(); let target = Tensor::new_const(Matrix::new(batch_size, 1, batch[len - 1].clone())); let loss = criterion.forward(&output, &target); loss.backward(Tensor::new_const(Matrix::ones(1, 1))); optim.step(true); let current_loss = loss.0.borrow().data.data()[0]; total_loss += current_loss; let current_accuracy: f64 = output .0 .borrow() .data .row_iter() .zip(batch[len - 1].iter()) .map(|(row, ix)| { if argmax(row.raw_slice()) == (*ix) as usize { 1.0 } else { 0.0 } }) .sum(); total_accuracy += current_accuracy; } println!( "Loss: {}, Accuracy: {}", total_loss, total_accuracy / (total_data_size as f64) ); } let batch = vec![ vec![word_index[&"Mary".to_owned()] as f64], vec![word_index[&"moved".to_owned()] as f64], vec![word_index[&"to".to_owned()] as f64], vec![word_index[&"the".to_owned()] as f64], ]; let mut hidden = model.create_start_state(1); let mut output = None; for row in batch.iter() { let input = Tensor::new_const(Matrix::new(1, 1, row.clone())); let rnn_input = embed.forward(&[&input]).remove(0); let mut outputs = model.forward(&[&rnn_input, &hidden]); output = Some(outputs.remove(0)); hidden = outputs.remove(0); } let output = argmax(output.unwrap().0.borrow().data.row(0).raw_slice()); println!("Prediction: {}", inverted_word_index[&output]); Ok(()) }
27.533646
183
0.527396
6239e9a796c5939ebc66fc9b6cda34ee27a41e7f
768
//! Errors returned by this library #[cfg(feature = "remote_list")] use std::net::TcpStream; error_chain! { foreign_links { Io(::std::io::Error); Url(::url::ParseError); Tls(::native_tls::Error) #[cfg(feature = "remote_list")]; Handshake(::native_tls::HandshakeError<TcpStream>) #[cfg(feature = "remote_list")]; } errors { UnsupportedScheme { } InvalidList { } NoHost { } InvalidHost { } InvalidEmail { } InvalidRule(t: String) { description("invalid rule") display("invalid rule: '{}'", t) } InvalidDomain(t: String) { description("invalid domain") display("invalid domain: '{}'", t) } } }
21.333333
91
0.529948
8a55bf55bebc7535a8fb3d64a0b3c18a3e04caba
2,298
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::value::{Struct, Value}; use canonical_serialization::SimpleDeserializer; use types::{ account_address::AccountAddress, account_config::AccountResource, byte_array::ByteArray, }; #[test] fn account_type() { // mimic an Account let authentication_key = ByteArray::new(vec![5u8; 32]); let balance = 128u64; let received_events_count = 8u64; let sent_events_count = 16u64; let sequence_number = 32u64; let sent_events_key = ByteArray::new(AccountAddress::random().to_vec()); let recv_events_key = ByteArray::new(AccountAddress::random().to_vec()); let mut account_fields: Vec<Value> = Vec::new(); account_fields.push(Value::byte_array(authentication_key.clone())); let mut coin_fields: Vec<Value> = Vec::new(); coin_fields.push(Value::u64(balance)); account_fields.push(Value::struct_(Struct::new(coin_fields))); account_fields.push(Value::bool(false)); account_fields.push(Value::bool(false)); account_fields.push(Value::struct_(Struct::new(vec![ Value::u64(received_events_count), Value::byte_array(recv_events_key.clone()), ]))); account_fields.push(Value::struct_(Struct::new(vec![ Value::u64(sent_events_count), Value::byte_array(sent_events_key.clone()), ]))); account_fields.push(Value::u64(sequence_number)); let account = Value::struct_(Struct::new(account_fields)); let blob = &account.simple_serialize().expect("blob must serialize"); let account_resource: AccountResource = SimpleDeserializer::deserialize(blob).expect("must deserialize"); assert_eq!(*account_resource.authentication_key(), authentication_key); assert_eq!(account_resource.balance(), balance); assert_eq!( account_resource.sent_events().key().as_bytes(), sent_events_key.as_bytes() ); assert_eq!( account_resource.received_events().count(), received_events_count ); assert_eq!( account_resource.received_events().key().as_bytes(), recv_events_key.as_bytes() ); assert_eq!(account_resource.sent_events().count(), sent_events_count); assert_eq!(account_resource.sequence_number(), sequence_number); }
38.3
92
0.70322
6240964dd5463115993a1798272b099e4be7567a
51,646
//! Definitions found commonly among almost all Unix derivatives //! //! More functions and definitions can be found in the more specific modules //! according to the platform in question. pub type int8_t = i8; pub type int16_t = i16; pub type int32_t = i32; pub type int64_t = i64; pub type uint8_t = u8; pub type uint16_t = u16; pub type uint32_t = u32; pub type uint64_t = u64; pub type c_schar = i8; pub type c_uchar = u8; pub type c_short = i16; pub type c_ushort = u16; pub type c_int = i32; pub type c_uint = u32; pub type c_float = f32; pub type c_double = f64; pub type c_longlong = i64; pub type c_ulonglong = u64; pub type intmax_t = i64; pub type uintmax_t = u64; pub type size_t = usize; pub type ptrdiff_t = isize; pub type intptr_t = isize; pub type uintptr_t = usize; pub type ssize_t = isize; pub type pid_t = i32; pub type uid_t = u32; pub type gid_t = u32; pub type in_addr_t = u32; pub type in_port_t = u16; pub type sighandler_t = ::size_t; pub type cc_t = ::c_uchar; #[cfg_attr(feature = "extra_traits", derive(Debug))] pub enum DIR {} impl ::Copy for DIR {} impl ::Clone for DIR { fn clone(&self) -> DIR { *self } } #[cfg_attr(feature = "extra_traits", derive(Debug))] pub enum locale_t {} impl ::Copy for locale_t {} impl ::Clone for locale_t { fn clone(&self) -> locale_t { *self } } s! { pub struct group { pub gr_name: *mut ::c_char, pub gr_passwd: *mut ::c_char, pub gr_gid: ::gid_t, pub gr_mem: *mut *mut ::c_char, } pub struct utimbuf { pub actime: time_t, pub modtime: time_t, } pub struct timeval { pub tv_sec: time_t, pub tv_usec: suseconds_t, } // linux x32 compatibility // See https://sourceware.org/bugzilla/show_bug.cgi?id=16437 pub struct timespec { pub tv_sec: time_t, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] pub tv_nsec: i64, #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] pub tv_nsec: ::c_long, } pub struct rlimit { pub rlim_cur: rlim_t, pub rlim_max: rlim_t, } pub struct rusage { pub ru_utime: timeval, pub ru_stime: timeval, pub ru_maxrss: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad1: u32, pub ru_ixrss: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad2: u32, pub ru_idrss: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad3: u32, pub ru_isrss: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad4: u32, pub ru_minflt: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad5: u32, pub ru_majflt: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad6: u32, pub ru_nswap: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad7: u32, pub ru_inblock: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad8: u32, pub ru_oublock: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad9: u32, pub ru_msgsnd: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad10: u32, pub ru_msgrcv: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad11: u32, pub ru_nsignals: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad12: u32, pub ru_nvcsw: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad13: u32, pub ru_nivcsw: c_long, #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] __pad14: u32, #[cfg(any(target_env = "musl", target_os = "emscripten"))] __reserved: [c_long; 16], } pub struct ipv6_mreq { pub ipv6mr_multiaddr: in6_addr, #[cfg(target_os = "android")] pub ipv6mr_interface: ::c_int, #[cfg(not(target_os = "android"))] pub ipv6mr_interface: ::c_uint, } pub struct hostent { pub h_name: *mut ::c_char, pub h_aliases: *mut *mut ::c_char, pub h_addrtype: ::c_int, pub h_length: ::c_int, pub h_addr_list: *mut *mut ::c_char, } pub struct iovec { pub iov_base: *mut ::c_void, pub iov_len: ::size_t, } pub struct pollfd { pub fd: ::c_int, pub events: ::c_short, pub revents: ::c_short, } pub struct winsize { pub ws_row: ::c_ushort, pub ws_col: ::c_ushort, pub ws_xpixel: ::c_ushort, pub ws_ypixel: ::c_ushort, } pub struct linger { pub l_onoff: ::c_int, pub l_linger: ::c_int, } pub struct sigval { // Actually a union of an int and a void* pub sival_ptr: *mut ::c_void } // <sys/time.h> pub struct itimerval { pub it_interval: ::timeval, pub it_value: ::timeval, } // <sys/times.h> pub struct tms { pub tms_utime: ::clock_t, pub tms_stime: ::clock_t, pub tms_cutime: ::clock_t, pub tms_cstime: ::clock_t, } pub struct servent { pub s_name: *mut ::c_char, pub s_aliases: *mut *mut ::c_char, pub s_port: ::c_int, pub s_proto: *mut ::c_char, } pub struct protoent { pub p_name: *mut ::c_char, pub p_aliases: *mut *mut ::c_char, pub p_proto: ::c_int, } } pub const INT_MIN: c_int = -2147483648; pub const INT_MAX: c_int = 2147483647; pub const SIG_DFL: sighandler_t = 0 as sighandler_t; pub const SIG_IGN: sighandler_t = 1 as sighandler_t; pub const SIG_ERR: sighandler_t = !0 as sighandler_t; pub const DT_UNKNOWN: u8 = 0; pub const DT_FIFO: u8 = 1; pub const DT_CHR: u8 = 2; pub const DT_DIR: u8 = 4; pub const DT_BLK: u8 = 6; pub const DT_REG: u8 = 8; pub const DT_LNK: u8 = 10; pub const DT_SOCK: u8 = 12; cfg_if! { if #[cfg(not(target_os = "redox"))] { pub const FD_CLOEXEC: ::c_int = 0x1; } } pub const USRQUOTA: ::c_int = 0; pub const GRPQUOTA: ::c_int = 1; pub const SIGIOT: ::c_int = 6; pub const S_ISUID: ::c_int = 0x800; pub const S_ISGID: ::c_int = 0x400; pub const S_ISVTX: ::c_int = 0x200; pub const IF_NAMESIZE: ::size_t = 16; pub const IFNAMSIZ: ::size_t = IF_NAMESIZE; pub const LOG_EMERG: ::c_int = 0; pub const LOG_ALERT: ::c_int = 1; pub const LOG_CRIT: ::c_int = 2; pub const LOG_ERR: ::c_int = 3; pub const LOG_WARNING: ::c_int = 4; pub const LOG_NOTICE: ::c_int = 5; pub const LOG_INFO: ::c_int = 6; pub const LOG_DEBUG: ::c_int = 7; pub const LOG_KERN: ::c_int = 0; pub const LOG_USER: ::c_int = 1 << 3; pub const LOG_MAIL: ::c_int = 2 << 3; pub const LOG_DAEMON: ::c_int = 3 << 3; pub const LOG_AUTH: ::c_int = 4 << 3; pub const LOG_SYSLOG: ::c_int = 5 << 3; pub const LOG_LPR: ::c_int = 6 << 3; pub const LOG_NEWS: ::c_int = 7 << 3; pub const LOG_UUCP: ::c_int = 8 << 3; pub const LOG_LOCAL0: ::c_int = 16 << 3; pub const LOG_LOCAL1: ::c_int = 17 << 3; pub const LOG_LOCAL2: ::c_int = 18 << 3; pub const LOG_LOCAL3: ::c_int = 19 << 3; pub const LOG_LOCAL4: ::c_int = 20 << 3; pub const LOG_LOCAL5: ::c_int = 21 << 3; pub const LOG_LOCAL6: ::c_int = 22 << 3; pub const LOG_LOCAL7: ::c_int = 23 << 3; pub const LOG_PID: ::c_int = 0x01; pub const LOG_CONS: ::c_int = 0x02; pub const LOG_ODELAY: ::c_int = 0x04; pub const LOG_NDELAY: ::c_int = 0x08; pub const LOG_NOWAIT: ::c_int = 0x10; pub const LOG_PRIMASK: ::c_int = 7; pub const LOG_FACMASK: ::c_int = 0x3f8; pub const PRIO_PROCESS: ::c_int = 0; pub const PRIO_PGRP: ::c_int = 1; pub const PRIO_USER: ::c_int = 2; pub const PRIO_MIN: ::c_int = -20; pub const PRIO_MAX: ::c_int = 20; pub const IPPROTO_ICMP: ::c_int = 1; pub const IPPROTO_ICMPV6: ::c_int = 58; pub const IPPROTO_TCP: ::c_int = 6; pub const IPPROTO_UDP: ::c_int = 17; pub const IPPROTO_IP: ::c_int = 0; pub const IPPROTO_IPV6: ::c_int = 41; pub const INADDR_LOOPBACK: in_addr_t = 2130706433; pub const INADDR_ANY: in_addr_t = 0; pub const INADDR_BROADCAST: in_addr_t = 4294967295; pub const INADDR_NONE: in_addr_t = 4294967295; pub const ARPOP_REQUEST: u16 = 1; pub const ARPOP_REPLY: u16 = 2; pub const ATF_COM: ::c_int = 0x02; pub const ATF_PERM: ::c_int = 0x04; pub const ATF_PUBL: ::c_int = 0x08; pub const ATF_USETRAILERS: ::c_int = 0x10; cfg_if! { if #[cfg(target_os = "l4re")] { // required libraries for L4Re are linked externally, ATM } else if #[cfg(feature = "use_std")] { // cargo build, don't pull in anything extra as the libstd dep // already pulls in all libs. } else if #[cfg(target_env = "musl")] { #[cfg_attr(feature = "rustc-dep-of-std", link(name = "c", kind = "static", cfg(target_feature = "crt-static")))] #[cfg_attr(feature = "rustc-dep-of-std", link(name = "c", cfg(not(target_feature = "crt-static"))))] extern {} } else if #[cfg(target_os = "emscripten")] { #[link(name = "c")] extern {} } else if #[cfg(all(target_os = "netbsd", feature = "rustc-dep-of-std", target_vendor = "rumprun"))] { // Since we don't use -nodefaultlibs on Rumprun, libc is always pulled // in automatically by the linker. We avoid passing it explicitly, as it // causes some versions of binutils to crash with an assertion failure. #[link(name = "m")] extern {} } else if #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android", target_os = "openbsd"))] { #[link(name = "c")] #[link(name = "m")] extern {} } else if #[cfg(target_os = "haiku")] { #[link(name = "root")] #[link(name = "network")] extern {} } else if #[cfg(target_env = "newlib")] { #[link(name = "c")] #[link(name = "m")] extern {} } else if #[cfg(target_os = "hermit")] { // no_default_libraries is set to false for HermitCore, so only a link // to "pthread" needs to be added. #[link(name = "pthread")] extern {} } else if #[cfg(target_env = "illumos")] { #[link(name = "c")] #[link(name = "m")] extern {} } else if #[cfg(target_os = "redox")] { #[cfg_attr(feature = "rustc-dep-of-std", link(name = "c", kind = "static-nobundle", cfg(target_feature = "crt-static")))] #[cfg_attr(feature = "rustc-dep-of-std", link(name = "c", cfg(not(target_feature = "crt-static"))))] extern {} } else { #[link(name = "c")] #[link(name = "m")] #[link(name = "rt")] #[link(name = "pthread")] extern {} } } #[cfg_attr(feature = "extra_traits", derive(Debug))] pub enum FILE {} impl ::Copy for FILE {} impl ::Clone for FILE { fn clone(&self) -> FILE { *self } } #[cfg_attr(feature = "extra_traits", derive(Debug))] pub enum fpos_t {} // TODO: fill this out with a struct impl ::Copy for fpos_t {} impl ::Clone for fpos_t { fn clone(&self) -> fpos_t { *self } } extern { pub fn isalnum(c: c_int) -> c_int; pub fn isalpha(c: c_int) -> c_int; pub fn iscntrl(c: c_int) -> c_int; pub fn isdigit(c: c_int) -> c_int; pub fn isgraph(c: c_int) -> c_int; pub fn islower(c: c_int) -> c_int; pub fn isprint(c: c_int) -> c_int; pub fn ispunct(c: c_int) -> c_int; pub fn isspace(c: c_int) -> c_int; pub fn isupper(c: c_int) -> c_int; pub fn isxdigit(c: c_int) -> c_int; pub fn tolower(c: c_int) -> c_int; pub fn toupper(c: c_int) -> c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "fopen$UNIX2003" )] pub fn fopen(filename: *const c_char, mode: *const c_char) -> *mut FILE; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "freopen$UNIX2003" )] pub fn freopen(filename: *const c_char, mode: *const c_char, file: *mut FILE) -> *mut FILE; pub fn fflush(file: *mut FILE) -> c_int; pub fn fclose(file: *mut FILE) -> c_int; pub fn remove(filename: *const c_char) -> c_int; pub fn rename(oldname: *const c_char, newname: *const c_char) -> c_int; pub fn tmpfile() -> *mut FILE; pub fn setvbuf(stream: *mut FILE, buffer: *mut c_char, mode: c_int, size: size_t) -> c_int; pub fn setbuf(stream: *mut FILE, buf: *mut c_char); pub fn getchar() -> c_int; pub fn putchar(c: c_int) -> c_int; pub fn fgetc(stream: *mut FILE) -> c_int; pub fn fgets(buf: *mut c_char, n: c_int, stream: *mut FILE) -> *mut c_char; pub fn fputc(c: c_int, stream: *mut FILE) -> c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "fputs$UNIX2003" )] pub fn fputs(s: *const c_char, stream: *mut FILE) -> c_int; pub fn puts(s: *const c_char) -> c_int; pub fn ungetc(c: c_int, stream: *mut FILE) -> c_int; pub fn fread(ptr: *mut c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "fwrite$UNIX2003" )] pub fn fwrite(ptr: *const c_void, size: size_t, nobj: size_t, stream: *mut FILE) -> size_t; pub fn fseek(stream: *mut FILE, offset: c_long, whence: c_int) -> c_int; pub fn ftell(stream: *mut FILE) -> c_long; pub fn rewind(stream: *mut FILE); #[cfg_attr(target_os = "netbsd", link_name = "__fgetpos50")] pub fn fgetpos(stream: *mut FILE, ptr: *mut fpos_t) -> c_int; #[cfg_attr(target_os = "netbsd", link_name = "__fsetpos50")] pub fn fsetpos(stream: *mut FILE, ptr: *const fpos_t) -> c_int; pub fn feof(stream: *mut FILE) -> c_int; pub fn ferror(stream: *mut FILE) -> c_int; pub fn perror(s: *const c_char); pub fn atoi(s: *const c_char) -> c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "strtod$UNIX2003" )] pub fn strtod(s: *const c_char, endp: *mut *mut c_char) -> c_double; pub fn strtol(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_long; pub fn strtoul(s: *const c_char, endp: *mut *mut c_char, base: c_int) -> c_ulong; pub fn calloc(nobj: size_t, size: size_t) -> *mut c_void; pub fn malloc(size: size_t) -> *mut c_void; pub fn realloc(p: *mut c_void, size: size_t) -> *mut c_void; pub fn free(p: *mut c_void); pub fn abort() -> !; pub fn exit(status: c_int) -> !; pub fn _exit(status: c_int) -> !; pub fn atexit(cb: extern fn()) -> c_int; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "system$UNIX2003" )] pub fn system(s: *const c_char) -> c_int; pub fn getenv(s: *const c_char) -> *mut c_char; pub fn strcpy(dst: *mut c_char, src: *const c_char) -> *mut c_char; pub fn strncpy(dst: *mut c_char, src: *const c_char, n: size_t) -> *mut c_char; pub fn strcat(s: *mut c_char, ct: *const c_char) -> *mut c_char; pub fn strncat(s: *mut c_char, ct: *const c_char, n: size_t) -> *mut c_char; pub fn strcmp(cs: *const c_char, ct: *const c_char) -> c_int; pub fn strncmp(cs: *const c_char, ct: *const c_char, n: size_t) -> c_int; pub fn strcoll(cs: *const c_char, ct: *const c_char) -> c_int; pub fn strchr(cs: *const c_char, c: c_int) -> *mut c_char; pub fn strrchr(cs: *const c_char, c: c_int) -> *mut c_char; pub fn strspn(cs: *const c_char, ct: *const c_char) -> size_t; pub fn strcspn(cs: *const c_char, ct: *const c_char) -> size_t; pub fn strdup(cs: *const c_char) -> *mut c_char; pub fn strpbrk(cs: *const c_char, ct: *const c_char) -> *mut c_char; pub fn strstr(cs: *const c_char, ct: *const c_char) -> *mut c_char; pub fn strcasecmp(s1: *const c_char, s2: *const c_char) -> c_int; pub fn strncasecmp(s1: *const c_char, s2: *const c_char, n: size_t) -> c_int; pub fn strlen(cs: *const c_char) -> size_t; pub fn strnlen(cs: *const c_char, maxlen: size_t) -> size_t; #[cfg_attr( all(target_os = "macos", target_arch = "x86"), link_name = "strerror$UNIX2003" )] pub fn strerror(n: c_int) -> *mut c_char; pub fn strtok(s: *mut c_char, t: *const c_char) -> *mut c_char; pub fn strxfrm(s: *mut c_char, ct: *const c_char, n: size_t) -> size_t; pub fn wcslen(buf: *const wchar_t) -> size_t; pub fn wcstombs(dest: *mut c_char, src: *const wchar_t, n: size_t) -> ::size_t; pub fn memchr(cx: *const c_void, c: c_int, n: size_t) -> *mut c_void; pub fn memcmp(cx: *const c_void, ct: *const c_void, n: size_t) -> c_int; pub fn memcpy(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; pub fn memmove(dest: *mut c_void, src: *const c_void, n: size_t) -> *mut c_void; pub fn memset(dest: *mut c_void, c: c_int, n: size_t) -> *mut c_void; } extern { #[cfg_attr(target_os = "netbsd", link_name = "__getpwnam50")] pub fn getpwnam(name: *const ::c_char) -> *mut passwd; #[cfg_attr(target_os = "netbsd", link_name = "__getpwuid50")] pub fn getpwuid(uid: ::uid_t) -> *mut passwd; pub fn fprintf(stream: *mut ::FILE, format: *const ::c_char, ...) -> ::c_int; pub fn printf(format: *const ::c_char, ...) -> ::c_int; pub fn snprintf(s: *mut ::c_char, n: ::size_t, format: *const ::c_char, ...) -> ::c_int; pub fn sprintf(s: *mut ::c_char, format: *const ::c_char, ...) -> ::c_int; #[cfg_attr(target_os = "linux", link_name = "__isoc99_fscanf")] pub fn fscanf(stream: *mut ::FILE, format: *const ::c_char, ...) -> ::c_int; #[cfg_attr(target_os = "linux", link_name = "__isoc99_scanf")] pub fn scanf(format: *const ::c_char, ...) -> ::c_int; #[cfg_attr(target_os = "linux", link_name = "__isoc99_sscanf")] pub fn sscanf(s: *const ::c_char, format: *const ::c_char, ...) -> ::c_int; pub fn getchar_unlocked() -> ::c_int; pub fn putchar_unlocked(c: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__socket30")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_socket")] pub fn socket(domain: ::c_int, ty: ::c_int, protocol: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "connect$UNIX2003")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_connect")] pub fn connect(socket: ::c_int, address: *const sockaddr, len: socklen_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "listen$UNIX2003")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_listen")] pub fn listen(socket: ::c_int, backlog: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "accept$UNIX2003")] pub fn accept(socket: ::c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "getpeername$UNIX2003")] pub fn getpeername(socket: ::c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "getsockname$UNIX2003")] pub fn getsockname(socket: ::c_int, address: *mut sockaddr, address_len: *mut socklen_t) -> ::c_int; pub fn setsockopt(socket: ::c_int, level: ::c_int, name: ::c_int, value: *const ::c_void, option_len: socklen_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "socketpair$UNIX2003")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_socketpair")] pub fn socketpair(domain: ::c_int, type_: ::c_int, protocol: ::c_int, socket_vector: *mut ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "sendto$UNIX2003")] #[cfg_attr(target_os = "illumos", link_name = "__xnet_sendto")] pub fn sendto(socket: ::c_int, buf: *const ::c_void, len: ::size_t, flags: ::c_int, addr: *const sockaddr, addrlen: socklen_t) -> ::ssize_t; pub fn shutdown(socket: ::c_int, how: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "chmod$UNIX2003")] pub fn chmod(path: *const c_char, mode: mode_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "fchmod$UNIX2003")] pub fn fchmod(fd: ::c_int, mode: mode_t) -> ::c_int; #[cfg_attr(target_os = "macos", link_name = "fstat$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__fstat50")] #[cfg_attr(target_os = "freebsd", link_name = "fstat@FBSD_1.0")] pub fn fstat(fildes: ::c_int, buf: *mut stat) -> ::c_int; pub fn mkdir(path: *const c_char, mode: mode_t) -> ::c_int; #[cfg_attr(target_os = "macos", link_name = "stat$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__stat50")] #[cfg_attr(target_os = "freebsd", link_name = "stat@FBSD_1.0")] pub fn stat(path: *const c_char, buf: *mut stat) -> ::c_int; pub fn pclose(stream: *mut ::FILE) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "fdopen$UNIX2003")] pub fn fdopen(fd: ::c_int, mode: *const c_char) -> *mut ::FILE; pub fn fileno(stream: *mut ::FILE) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "open$UNIX2003")] pub fn open(path: *const c_char, oflag: ::c_int, ...) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "creat$UNIX2003")] pub fn creat(path: *const c_char, mode: mode_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "fcntl$UNIX2003")] pub fn fcntl(fd: ::c_int, cmd: ::c_int, ...) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "opendir$INODE64")] #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "opendir$INODE64$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__opendir30")] pub fn opendir(dirname: *const c_char) -> *mut ::DIR; #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "fdopendir$INODE64")] #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "fdopendir$INODE64$UNIX2003")] pub fn fdopendir(fd: ::c_int) -> *mut ::DIR; #[cfg_attr(target_os = "macos", link_name = "readdir$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__readdir30")] #[cfg_attr(target_os = "freebsd", link_name = "readdir@FBSD_1.0")] pub fn readdir(dirp: *mut ::DIR) -> *mut ::dirent; #[cfg_attr(target_os = "macos", link_name = "readdir_r$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__readdir_r30")] #[cfg_attr(target_os = "freebsd", link_name = "readdir_r@FBSD_1.0")] /// The 64-bit libc on Solaris and illumos only has readdir_r. If a /// 32-bit Solaris or illumos target is ever created, it should use /// __posix_readdir_r. See libc(3LIB) on Solaris or illumos: /// https://illumos.org/man/3lib/libc /// https://docs.oracle.com/cd/E36784_01/html/E36873/libc-3lib.html /// https://www.unix.com/man-page/opensolaris/3LIB/libc/ pub fn readdir_r(dirp: *mut ::DIR, entry: *mut ::dirent, result: *mut *mut ::dirent) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "closedir$UNIX2003")] pub fn closedir(dirp: *mut ::DIR) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "rewinddir$INODE64")] #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "rewinddir$INODE64$UNIX2003")] pub fn rewinddir(dirp: *mut ::DIR); pub fn openat(dirfd: ::c_int, pathname: *const ::c_char, flags: ::c_int, ...) -> ::c_int; pub fn fchmodat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t, flags: ::c_int) -> ::c_int; pub fn fchown(fd: ::c_int, owner: ::uid_t, group: ::gid_t) -> ::c_int; pub fn fchownat(dirfd: ::c_int, pathname: *const ::c_char, owner: ::uid_t, group: ::gid_t, flags: ::c_int) -> ::c_int; #[cfg_attr(target_os = "macos", link_name = "fstatat$INODE64")] #[cfg_attr(target_os = "freebsd", link_name = "fstatat@FBSD_1.1")] pub fn fstatat(dirfd: ::c_int, pathname: *const ::c_char, buf: *mut stat, flags: ::c_int) -> ::c_int; pub fn linkat(olddirfd: ::c_int, oldpath: *const ::c_char, newdirfd: ::c_int, newpath: *const ::c_char, flags: ::c_int) -> ::c_int; pub fn mkdirat(dirfd: ::c_int, pathname: *const ::c_char, mode: ::mode_t) -> ::c_int; pub fn readlinkat(dirfd: ::c_int, pathname: *const ::c_char, buf: *mut ::c_char, bufsiz: ::size_t) -> ::ssize_t; pub fn renameat(olddirfd: ::c_int, oldpath: *const ::c_char, newdirfd: ::c_int, newpath: *const ::c_char) -> ::c_int; pub fn symlinkat(target: *const ::c_char, newdirfd: ::c_int, linkpath: *const ::c_char) -> ::c_int; pub fn unlinkat(dirfd: ::c_int, pathname: *const ::c_char, flags: ::c_int) -> ::c_int; pub fn access(path: *const c_char, amode: ::c_int) -> ::c_int; pub fn alarm(seconds: ::c_uint) -> ::c_uint; pub fn chdir(dir: *const c_char) -> ::c_int; pub fn fchdir(dirfd: ::c_int) -> ::c_int; pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "lchown$UNIX2003")] pub fn lchown(path: *const c_char, uid: uid_t, gid: gid_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "close$NOCANCEL$UNIX2003")] #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "close$NOCANCEL")] pub fn close(fd: ::c_int) -> ::c_int; pub fn dup(fd: ::c_int) -> ::c_int; pub fn dup2(src: ::c_int, dst: ::c_int) -> ::c_int; pub fn execl(path: *const c_char, arg0: *const c_char, ...) -> ::c_int; pub fn execle(path: *const ::c_char, arg0: *const ::c_char, ...) -> ::c_int; pub fn execlp(file: *const ::c_char, arg0: *const ::c_char, ...) -> ::c_int; pub fn execv(prog: *const c_char, argv: *const *const c_char) -> ::c_int; pub fn execve(prog: *const c_char, argv: *const *const c_char, envp: *const *const c_char) -> ::c_int; pub fn execvp(c: *const c_char, argv: *const *const c_char) -> ::c_int; pub fn fork() -> pid_t; pub fn fpathconf(filedes: ::c_int, name: ::c_int) -> c_long; pub fn getcwd(buf: *mut c_char, size: ::size_t) -> *mut c_char; pub fn getegid() -> gid_t; pub fn geteuid() -> uid_t; pub fn getgid() -> gid_t; pub fn getgroups(ngroups_max: ::c_int, groups: *mut gid_t) -> ::c_int; pub fn getlogin() -> *mut c_char; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "getopt$UNIX2003")] pub fn getopt(argc: ::c_int, argv: *const *mut c_char, optstr: *const c_char) -> ::c_int; pub fn getpgid(pid: pid_t) -> pid_t; pub fn getpgrp() -> pid_t; pub fn getpid() -> pid_t; pub fn getppid() -> pid_t; pub fn getuid() -> uid_t; pub fn isatty(fd: ::c_int) -> ::c_int; pub fn link(src: *const c_char, dst: *const c_char) -> ::c_int; pub fn lseek(fd: ::c_int, offset: off_t, whence: ::c_int) -> off_t; pub fn pathconf(path: *const c_char, name: ::c_int) -> c_long; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pause$UNIX2003")] pub fn pause() -> ::c_int; pub fn pipe(fds: *mut ::c_int) -> ::c_int; pub fn posix_memalign(memptr: *mut *mut ::c_void, align: ::size_t, size: ::size_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "read$UNIX2003")] pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) -> ::ssize_t; pub fn rmdir(path: *const c_char) -> ::c_int; pub fn seteuid(uid: uid_t) -> ::c_int; pub fn setegid(gid: gid_t) -> ::c_int; pub fn setgid(gid: gid_t) -> ::c_int; pub fn setpgid(pid: pid_t, pgid: pid_t) -> ::c_int; pub fn setsid() -> pid_t; pub fn setuid(uid: uid_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "sleep$UNIX2003")] pub fn sleep(secs: ::c_uint) -> ::c_uint; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "nanosleep$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__nanosleep50")] pub fn nanosleep(rqtp: *const timespec, rmtp: *mut timespec) -> ::c_int; pub fn tcgetpgrp(fd: ::c_int) -> pid_t; pub fn tcsetpgrp(fd: ::c_int, pgrp: ::pid_t) -> ::c_int; pub fn ttyname(fd: ::c_int) -> *mut c_char; pub fn unlink(c: *const c_char) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "wait$UNIX2003")] pub fn wait(status: *mut ::c_int) -> pid_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "waitpid$UNIX2003")] pub fn waitpid(pid: pid_t, status: *mut ::c_int, options: ::c_int) -> pid_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "write$UNIX2003")] pub fn write(fd: ::c_int, buf: *const ::c_void, count: ::size_t) -> ::ssize_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pread$UNIX2003")] pub fn pread(fd: ::c_int, buf: *mut ::c_void, count: ::size_t, offset: off_t) -> ::ssize_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pwrite$UNIX2003")] pub fn pwrite(fd: ::c_int, buf: *const ::c_void, count: ::size_t, offset: off_t) -> ::ssize_t; pub fn umask(mask: mode_t) -> mode_t; #[cfg_attr(target_os = "netbsd", link_name = "__utime50")] pub fn utime(file: *const c_char, buf: *const utimbuf) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "kill$UNIX2003")] pub fn kill(pid: pid_t, sig: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "killpg$UNIX2003")] pub fn killpg(pgrp: pid_t, sig: ::c_int) -> ::c_int; pub fn mlock(addr: *const ::c_void, len: ::size_t) -> ::c_int; pub fn munlock(addr: *const ::c_void, len: ::size_t) -> ::c_int; pub fn mlockall(flags: ::c_int) -> ::c_int; pub fn munlockall() -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "mmap$UNIX2003")] pub fn mmap(addr: *mut ::c_void, len: ::size_t, prot: ::c_int, flags: ::c_int, fd: ::c_int, offset: off_t) -> *mut ::c_void; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "munmap$UNIX2003")] pub fn munmap(addr: *mut ::c_void, len: ::size_t) -> ::c_int; pub fn if_nametoindex(ifname: *const c_char) -> ::c_uint; pub fn if_indextoname(ifindex: ::c_uint, ifname: *mut ::c_char) -> *mut ::c_char; #[cfg_attr(target_os = "macos", link_name = "lstat$INODE64")] #[cfg_attr(target_os = "netbsd", link_name = "__lstat50")] #[cfg_attr(target_os = "freebsd", link_name = "lstat@FBSD_1.0")] pub fn lstat(path: *const c_char, buf: *mut stat) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "fsync$UNIX2003")] pub fn fsync(fd: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "setenv$UNIX2003")] pub fn setenv(name: *const c_char, val: *const c_char, overwrite: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "unsetenv$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__unsetenv13")] pub fn unsetenv(name: *const c_char) -> ::c_int; pub fn symlink(path1: *const c_char, path2: *const c_char) -> ::c_int; pub fn truncate(path: *const c_char, length: off_t) -> ::c_int; pub fn ftruncate(fd: ::c_int, length: off_t) -> ::c_int; pub fn signal(signum: ::c_int, handler: sighandler_t) -> sighandler_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "getrlimit$UNIX2003")] pub fn getrlimit(resource: ::c_int, rlim: *mut rlimit) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "setrlimit$UNIX2003")] pub fn setrlimit(resource: ::c_int, rlim: *const rlimit) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__getrusage50")] pub fn getrusage(resource: ::c_int, usage: *mut rusage) -> ::c_int; #[cfg_attr(any(target_os = "macos", target_os = "ios"), link_name = "realpath$DARWIN_EXTSN")] pub fn realpath(pathname: *const ::c_char, resolved: *mut ::c_char) -> *mut ::c_char; pub fn flock(fd: ::c_int, operation: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__times13")] pub fn times(buf: *mut ::tms) -> ::clock_t; pub fn pthread_self() -> ::pthread_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_join$UNIX2003")] pub fn pthread_join(native: ::pthread_t, value: *mut *mut ::c_void) -> ::c_int; pub fn pthread_exit(value: *mut ::c_void); pub fn pthread_attr_init(attr: *mut ::pthread_attr_t) -> ::c_int; pub fn pthread_attr_destroy(attr: *mut ::pthread_attr_t) -> ::c_int; pub fn pthread_attr_setstacksize(attr: *mut ::pthread_attr_t, stack_size: ::size_t) -> ::c_int; pub fn pthread_attr_setdetachstate(attr: *mut ::pthread_attr_t, state: ::c_int) -> ::c_int; pub fn pthread_detach(thread: ::pthread_t) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__libc_thr_yield")] pub fn sched_yield() -> ::c_int; pub fn pthread_key_create(key: *mut pthread_key_t, dtor: ::Option<unsafe extern fn(*mut ::c_void)>) -> ::c_int; pub fn pthread_key_delete(key: pthread_key_t) -> ::c_int; pub fn pthread_getspecific(key: pthread_key_t) -> *mut ::c_void; pub fn pthread_setspecific(key: pthread_key_t, value: *const ::c_void) -> ::c_int; pub fn pthread_mutex_init(lock: *mut pthread_mutex_t, attr: *const pthread_mutexattr_t) -> ::c_int; pub fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> ::c_int; pub fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> ::c_int; pub fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> ::c_int; pub fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> ::c_int; pub fn pthread_mutexattr_init(attr: *mut pthread_mutexattr_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_mutexattr_destroy$UNIX2003")] pub fn pthread_mutexattr_destroy(attr: *mut pthread_mutexattr_t) -> ::c_int; pub fn pthread_mutexattr_settype(attr: *mut pthread_mutexattr_t, _type: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_cond_init$UNIX2003")] pub fn pthread_cond_init(cond: *mut pthread_cond_t, attr: *const pthread_condattr_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_cond_wait$UNIX2003")] pub fn pthread_cond_wait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_cond_timedwait$UNIX2003")] pub fn pthread_cond_timedwait(cond: *mut pthread_cond_t, lock: *mut pthread_mutex_t, abstime: *const ::timespec) -> ::c_int; pub fn pthread_cond_signal(cond: *mut pthread_cond_t) -> ::c_int; pub fn pthread_cond_broadcast(cond: *mut pthread_cond_t) -> ::c_int; pub fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> ::c_int; pub fn pthread_condattr_init(attr: *mut pthread_condattr_t) -> ::c_int; pub fn pthread_condattr_destroy(attr: *mut pthread_condattr_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_init$UNIX2003")] pub fn pthread_rwlock_init(lock: *mut pthread_rwlock_t, attr: *const pthread_rwlockattr_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_destroy$UNIX2003")] pub fn pthread_rwlock_destroy(lock: *mut pthread_rwlock_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_rdlock$UNIX2003")] pub fn pthread_rwlock_rdlock(lock: *mut pthread_rwlock_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_tryrdlock$UNIX2003")] pub fn pthread_rwlock_tryrdlock(lock: *mut pthread_rwlock_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_wrlock$UNIX2003")] pub fn pthread_rwlock_wrlock(lock: *mut pthread_rwlock_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_trywrlock$UNIX2003")] pub fn pthread_rwlock_trywrlock(lock: *mut pthread_rwlock_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pthread_rwlock_unlock$UNIX2003")] pub fn pthread_rwlock_unlock(lock: *mut pthread_rwlock_t) -> ::c_int; pub fn pthread_rwlockattr_init(attr: *mut pthread_rwlockattr_t) -> ::c_int; pub fn pthread_rwlockattr_destroy(attr: *mut pthread_rwlockattr_t) -> ::c_int; #[cfg_attr(target_os = "illumos", link_name = "__xnet_getsockopt")] pub fn getsockopt(sockfd: ::c_int, level: ::c_int, optname: ::c_int, optval: *mut ::c_void, optlen: *mut ::socklen_t) -> ::c_int; pub fn raise(signum: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigaction14")] pub fn sigaction(signum: ::c_int, act: *const sigaction, oldact: *mut sigaction) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__utimes50")] pub fn utimes(filename: *const ::c_char, times: *const ::timeval) -> ::c_int; pub fn dlopen(filename: *const ::c_char, flag: ::c_int) -> *mut ::c_void; pub fn dlerror() -> *mut ::c_char; pub fn dlsym(handle: *mut ::c_void, symbol: *const ::c_char) -> *mut ::c_void; pub fn dlclose(handle: *mut ::c_void) -> ::c_int; pub fn dladdr(addr: *const ::c_void, info: *mut Dl_info) -> ::c_int; pub fn getaddrinfo(node: *const c_char, service: *const c_char, hints: *const addrinfo, res: *mut *mut addrinfo) -> ::c_int; pub fn freeaddrinfo(res: *mut addrinfo); pub fn gai_strerror(errcode: ::c_int) -> *const ::c_char; #[cfg_attr(any( all(target_os = "linux", not(target_env = "musl")), target_os = "freebsd", target_os = "dragonfly", target_os = "haiku"), link_name = "__res_init")] #[cfg_attr(any(target_os = "macos", target_os = "ios"), link_name = "res_9_init")] pub fn res_init() -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__gmtime_r50")] pub fn gmtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; #[cfg_attr(target_os = "netbsd", link_name = "__localtime_r50")] pub fn localtime_r(time_p: *const time_t, result: *mut tm) -> *mut tm; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "mktime$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__mktime50")] pub fn mktime(tm: *mut tm) -> time_t; #[cfg_attr(target_os = "netbsd", link_name = "__time50")] pub fn time(time: *mut time_t) -> time_t; #[cfg_attr(target_os = "netbsd", link_name = "__gmtime50")] pub fn gmtime(time_p: *const time_t) -> *mut tm; #[cfg_attr(target_os = "netbsd", link_name = "__locatime50")] pub fn localtime(time_p: *const time_t) -> *mut tm; #[cfg_attr(target_os = "netbsd", link_name = "__difftime50")] pub fn difftime(time1: time_t, time0: time_t) -> ::c_double; #[cfg_attr(target_os = "netbsd", link_name = "__mknod50")] #[cfg_attr(target_os = "freebsd", link_name = "mknod@FBSD_1.0")] pub fn mknod(pathname: *const ::c_char, mode: ::mode_t, dev: ::dev_t) -> ::c_int; pub fn gethostname(name: *mut ::c_char, len: ::size_t) -> ::c_int; pub fn getservbyname(name: *const ::c_char, proto: *const ::c_char) -> *mut servent; pub fn getprotobyname(name: *const ::c_char) -> *mut protoent; pub fn getprotobynumber(proto: ::c_int) -> *mut protoent; pub fn chroot(name: *const ::c_char) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "usleep$UNIX2003")] pub fn usleep(secs: ::c_uint) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "send$UNIX2003")] pub fn send(socket: ::c_int, buf: *const ::c_void, len: ::size_t, flags: ::c_int) -> ::ssize_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "recv$UNIX2003")] pub fn recv(socket: ::c_int, buf: *mut ::c_void, len: ::size_t, flags: ::c_int) -> ::ssize_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "putenv$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__putenv50")] pub fn putenv(string: *mut c_char) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "poll$UNIX2003")] pub fn poll(fds: *mut pollfd, nfds: nfds_t, timeout: ::c_int) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "select$1050")] #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "select$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__select50")] pub fn select(nfds: ::c_int, readfs: *mut fd_set, writefds: *mut fd_set, errorfds: *mut fd_set, timeout: *mut timeval) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__setlocale50")] pub fn setlocale(category: ::c_int, locale: *const ::c_char) -> *mut ::c_char; pub fn localeconv() -> *mut lconv; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "sem_wait$UNIX2003")] pub fn sem_wait(sem: *mut sem_t) -> ::c_int; pub fn sem_trywait(sem: *mut sem_t) -> ::c_int; pub fn sem_post(sem: *mut sem_t) -> ::c_int; pub fn statvfs(path: *const c_char, buf: *mut statvfs) -> ::c_int; pub fn fstatvfs(fd: ::c_int, buf: *mut statvfs) -> ::c_int; pub fn readlink(path: *const c_char, buf: *mut c_char, bufsz: ::size_t) -> ::ssize_t; #[cfg_attr(target_os = "netbsd", link_name = "__sigemptyset14")] pub fn sigemptyset(set: *mut sigset_t) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigaddset14")] pub fn sigaddset(set: *mut sigset_t, signum: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigfillset14")] pub fn sigfillset(set: *mut sigset_t) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigdelset14")] pub fn sigdelset(set: *mut sigset_t, signum: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigismember14")] pub fn sigismember(set: *const sigset_t, signum: ::c_int) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigprocmask14")] pub fn sigprocmask(how: ::c_int, set: *const sigset_t, oldset: *mut sigset_t) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__sigpending14")] pub fn sigpending(set: *mut sigset_t) -> ::c_int; #[cfg_attr(target_os = "netbsd", link_name = "__timegm50")] pub fn timegm(tm: *mut ::tm) -> time_t; pub fn getsid(pid: pid_t) -> pid_t; pub fn sysconf(name: ::c_int) -> ::c_long; pub fn mkfifo(path: *const c_char, mode: mode_t) -> ::c_int; #[cfg_attr(all(target_os = "macos", target_arch = "x86_64"), link_name = "pselect$1050")] #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "pselect$UNIX2003")] #[cfg_attr(target_os = "netbsd", link_name = "__pselect50")] pub fn pselect(nfds: ::c_int, readfs: *mut fd_set, writefds: *mut fd_set, errorfds: *mut fd_set, timeout: *const timespec, sigmask: *const sigset_t) -> ::c_int; pub fn fseeko(stream: *mut ::FILE, offset: ::off_t, whence: ::c_int) -> ::c_int; pub fn ftello(stream: *mut ::FILE) -> ::off_t; #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "tcdrain$UNIX2003")] pub fn tcdrain(fd: ::c_int) -> ::c_int; pub fn cfgetispeed(termios: *const ::termios) -> ::speed_t; pub fn cfgetospeed(termios: *const ::termios) -> ::speed_t; pub fn cfsetispeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int; pub fn cfsetospeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int; pub fn tcgetattr(fd: ::c_int, termios: *mut ::termios) -> ::c_int; pub fn tcsetattr(fd: ::c_int, optional_actions: ::c_int, termios: *const ::termios) -> ::c_int; pub fn tcflow(fd: ::c_int, action: ::c_int) -> ::c_int; pub fn tcflush(fd: ::c_int, action: ::c_int) -> ::c_int; pub fn tcgetsid(fd: ::c_int) -> ::pid_t; pub fn tcsendbreak(fd: ::c_int, duration: ::c_int) -> ::c_int; pub fn mkstemp(template: *mut ::c_char) -> ::c_int; pub fn mkdtemp(template: *mut ::c_char) -> *mut ::c_char; pub fn tmpnam(ptr: *mut ::c_char) -> *mut ::c_char; pub fn openlog(ident: *const ::c_char, logopt: ::c_int, facility: ::c_int); pub fn closelog(); pub fn setlogmask(maskpri: ::c_int) -> ::c_int; #[cfg_attr(target_os = "macos", link_name = "syslog$DARWIN_EXTSN")] pub fn syslog(priority: ::c_int, message: *const ::c_char, ...); #[cfg_attr(all(target_os = "macos", target_arch = "x86"), link_name = "nice$UNIX2003")] pub fn nice(incr: ::c_int) -> ::c_int; pub fn grantpt(fd: ::c_int) -> ::c_int; pub fn posix_openpt(flags: ::c_int) -> ::c_int; pub fn ptsname(fd: ::c_int) -> *mut ::c_char; pub fn unlockpt(fd: ::c_int) -> ::c_int; pub fn strcasestr(cs: *const c_char, ct: *const c_char) -> *mut c_char; pub fn getline (lineptr: *mut *mut c_char, n: *mut size_t, stream: *mut FILE) -> ssize_t; } cfg_if! { if #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] { extern { pub fn cfmakeraw(termios: *mut ::termios); pub fn cfsetspeed(termios: *mut ::termios, speed: ::speed_t) -> ::c_int; } } } cfg_if! { if #[cfg(target_env = "uclibc")] { mod uclibc; pub use self::uclibc::*; } else if #[cfg(target_env = "newlib")] { mod newlib; pub use self::newlib::*; } else if #[cfg(any(target_os = "linux", target_os = "android", target_os = "emscripten"))] { mod notbsd; pub use self::notbsd::*; } else if #[cfg(any(target_os = "macos", target_os = "ios", target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd", target_os = "netbsd"))] { mod bsd; pub use self::bsd::*; } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { mod solarish; pub use self::solarish::*; } else if #[cfg(target_os = "haiku")] { mod haiku; pub use self::haiku::*; } else if #[cfg(target_os = "hermit")] { mod hermit; pub use self::hermit::*; } else if #[cfg(target_os = "redox")] { mod redox; pub use self::redox::*; } else { // Unknown target_os } } cfg_if! { if #[cfg(libc_core_cvoid)] { pub use ::ffi::c_void; } else { // Use repr(u8) as LLVM expects `void*` to be the same as `i8*` to help // enable more optimization opportunities around it recognizing things // like malloc/free. #[repr(u8)] #[allow(missing_copy_implementations)] #[allow(missing_debug_implementations)] pub enum c_void { // Two dummy variants so the #[repr] attribute can be used. #[doc(hidden)] __variant1, #[doc(hidden)] __variant2, } } } cfg_if! { if #[cfg(libc_align)] { mod align; pub use self::align::*; } else { mod no_align; pub use self::no_align::*; } }
42.824212
80
0.577644
21da7e052773e09dd54ba0ffce4db04d8ddf8c46
2,034
use amethyst_core::math::Isometry3; use crate::objects::*; /// Trait that defines the *Joint* server capabilities. #[allow(clippy::trivially_copy_pass_by_ref)] // TODO remove when all joints get implemented pub trait JointPhysicsServerTrait<N: crate::PtReal> { /// Creates a new joint. /// /// The parameter `initial_position` is used to calculates the body offset to the joint. /// /// The joint created by this function is not yet active; Indeed, you have to assign the /// `PhysicsHandle<PhysicsJointTag>` returned, to the two `Entities` that you want to constraint. /// /// To remove this joint, is necessary to drop all its handles. fn create( &self, desc: &JointDesc, initial_position: JointPosition<N>, ) -> PhysicsHandle<PhysicsJointTag>; /// Insert the rigid body to the joint, and in case creates the actual joint. /// It doesn't accept more than two handles per time. /// /// This function is called automatically when a `PhysicsHandle<PhysicsJointTag>` is assigned to /// an `Entity` that has a `PhysicsHandle<PhysicsRigidBodyTag>`. /// /// So, you have to just create the joint using the function `create_joint`. fn insert_rigid_body(&self, joint_tag: PhysicsJointTag, body_tag: PhysicsRigidBodyTag); /// Remove the rigid body to the joint. /// /// This function is called automatically when a `PhysicsHandle<PhysicsJointTag>` is removed from /// an `Entity`. /// /// To drop a joint, you simply need to drop the handle. fn remove_rigid_body(&self, joint_tag: PhysicsJointTag, body_tag: PhysicsRigidBodyTag); } /// Joint description, used during the joint creation. #[derive(Copy, Clone, Debug)] pub enum JointDesc { /// Fixed joint Fixed, } /// Used to position the joint. #[derive(Copy, Clone, Debug)] pub enum JointPosition<N: crate::PtReal> { /// Set the joint in the exact world position. Exact(Isometry3<N>), /// Put the joint between the two bodies. Middle, }
36.981818
101
0.685841
9cf01cdc050fc70c2e0d7568e719c057e9c87a78
6,177
use crate::error::Error; use crate::http_client::{get_slack_url, ResponseMetadata, SlackWebAPIClient}; use crate::usergroups::usergroup::Usergroup; use serde::{Deserialize, Serialize}; use serde_with::skip_serializing_none; #[skip_serializing_none] #[derive(Deserialize, Serialize, Debug, Default, PartialEq)] pub struct UpdateRequest { pub usergroup: String, pub users: String, pub include_count: Option<bool>, pub team_id: Option<String>, } #[skip_serializing_none] #[derive(Deserialize, Serialize, Debug, Default, PartialEq)] pub struct UpdateResponse { pub ok: bool, pub error: Option<String>, pub response_metadata: Option<ResponseMetadata>, pub usergroup: Option<Usergroup>, } pub async fn update<T>( client: &T, param: &UpdateRequest, bot_token: &str, ) -> Result<UpdateResponse, Error> where T: SlackWebAPIClient, { let url = get_slack_url("usergroups.users.update"); let json = serde_json::to_string(&param)?; client .post_json(&url, &json, bot_token) .await .and_then(|result| { serde_json::from_str::<UpdateResponse>(&result).map_err(Error::SerdeJsonError) }) } #[cfg(test)] mod test { use super::*; use crate::http_client::MockSlackWebAPIClient; use crate::usergroups::usergroup::Pref; #[test] fn convert_request() { let request = UpdateRequest { usergroup: "S0604QSJC".to_string(), users: "U060R4BJ4,U060RNRCZ".to_string(), team_id: Some("T1234567890".to_string()), include_count: Some(true), }; let json = r##"{ "usergroup": "S0604QSJC", "users": "U060R4BJ4,U060RNRCZ", "include_count": true, "team_id": "T1234567890" }"##; let j = serde_json::to_string_pretty(&request).unwrap(); assert_eq!(json, j); let s = serde_json::from_str::<UpdateRequest>(json).unwrap(); assert_eq!(request, s); } #[test] fn convert_response() { let response = UpdateResponse { ok: true, usergroup: Some(Usergroup { id: Some("S0615G0KT".to_string()), team_id: Some("T060RNRCH".to_string()), is_usergroup: Some(true), name: Some("Marketing Team".to_string()), description: Some("Marketing gurus, PR experts and product advocates.".to_string()), handle: Some("marketing-team".to_string()), is_external: Some(false), date_create: Some(1446746793), date_update: Some(1446746793), date_delete: Some(0), auto_type: Some("".to_string()), created_by: Some("U060RNRCZ".to_string()), updated_by: Some("U060RNRCZ".to_string()), deleted_by: Some("".to_string()), prefs: Some(Pref { channels: Some(vec![]), groups: Some(vec![]), }), user_count: Some("0".to_string()), }), ..Default::default() }; let json = r##"{ "ok": true, "usergroup": { "id": "S0615G0KT", "team_id": "T060RNRCH", "is_usergroup": true, "name": "Marketing Team", "description": "Marketing gurus, PR experts and product advocates.", "handle": "marketing-team", "is_external": false, "date_create": 1446746793, "date_update": 1446746793, "date_delete": 0, "auto_type": "", "created_by": "U060RNRCZ", "updated_by": "U060RNRCZ", "deleted_by": "", "prefs": { "channels": [], "groups": [] }, "user_count": "0" } }"##; let j = serde_json::to_string_pretty(&response).unwrap(); assert_eq!(json, j); let s = serde_json::from_str::<UpdateResponse>(json).unwrap(); assert_eq!(response, s); } #[async_std::test] async fn test_update() { let param = UpdateRequest { usergroup: "S0604QSJC".to_string(), users: "U060R4BJ4,U060RNRCZ".to_string(), team_id: Some("T1234567890".to_string()), include_count: Some(true), }; let mut mock = MockSlackWebAPIClient::new(); mock.expect_post_json().returning(|_, _, _| { Ok(r##"{ "ok": true, "usergroup": { "id": "S0615G0KT", "team_id": "T1234567890", "is_usergroup": true, "name": "My Test Team", "description": "Marketing gurus, PR experts and product advocates.", "handle": "marketing-team", "is_external": false, "date_create": 1446746793, "date_update": 1446746793, "date_delete": 0, "auto_type": "", "created_by": "U060RNRCZ", "updated_by": "U060RNRCZ", "deleted_by": "", "prefs": { "channels": [], "groups": [] }, "user_count": "0" } }"## .to_string()) }); let response = update(&mock, &param, &"test_token".to_string()) .await .unwrap(); let expect = UpdateResponse { ok: true, usergroup: Some(Usergroup { id: Some("S0615G0KT".to_string()), team_id: Some("T1234567890".to_string()), is_usergroup: Some(true), name: Some("My Test Team".to_string()), description: Some("Marketing gurus, PR experts and product advocates.".to_string()), handle: Some("marketing-team".to_string()), is_external: Some(false), date_create: Some(1446746793), date_update: Some(1446746793), date_delete: Some(0), auto_type: Some("".to_string()), created_by: Some("U060RNRCZ".to_string()), updated_by: Some("U060RNRCZ".to_string()), deleted_by: Some("".to_string()), prefs: Some(Pref { channels: Some(vec![]), groups: Some(vec![]), }), user_count: Some("0".to_string()), }), ..Default::default() }; assert_eq!(expect, response); } }
30.885
100
0.552048
6184690b9c4e9813e6bd434cc4ef5f859ce489dc
209
use super::program::Class; use super::foundation::Parsable; use super::token::Token; use anyhow::*; pub fn parse(tokens: &[Token]) -> Result<Box<Class>> { let (c, _) = Class::parse(tokens)?; Ok(c) }
19
54
0.631579
56b981dedd9d8347a2adb379a8b15518147f7845
2,806
// Copyright 2015 The coio Developers. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate clap; #[macro_use] extern crate log; extern crate env_logger; extern crate mio; extern crate coio; use clap::{Arg, App}; use coio::Scheduler; use coio::net::tcp::TcpListener; fn main() { env_logger::init().unwrap(); let matches = App::new("tcp-echo") .version(env!("CARGO_PKG_VERSION")) .author("Y. T. Chung <[email protected]>") .arg(Arg::with_name("BIND") .short("b") .long("bind") .takes_value(true) .required(true) .help("Listening on this address")) .arg(Arg::with_name("THREADS") .short("t") .long("threads") .takes_value(true) .help("Number of threads")) .get_matches(); let bind_addr = matches.value_of("BIND").unwrap().to_owned(); Scheduler::new() .with_workers(matches.value_of("THREADS").unwrap_or("1").parse().unwrap()) .run(move || { let server = TcpListener::bind(&bind_addr[..]).unwrap(); info!("Listening on {:?}", server.local_addr().unwrap()); for stream in server.incoming() { use std::io::{Read, Write}; let (mut stream, addr) = stream.unwrap(); info!("Accept connection: {:?}", addr); Scheduler::spawn(move || { let mut buf = [0; 1024 * 16]; loop { debug!("Trying to Read..."); match stream.read(&mut buf) { Ok(0) => { debug!("EOF received, going to close"); break; } Ok(len) => { info!("Read {} bytes, echo back!", len); stream.write_all(&buf[0..len]).unwrap(); } Err(err) => { panic!("Error occurs: {:?}", err); } } } info!("{:?} closed", addr); }); } }) .unwrap(); }
34.219512
82
0.417676
386ea68bc3bf14bfd687b4376699a5601509c542
512
pub fn option_zip<A, B>(a: Option<A>, b: Option<B>) -> Option<(A, B)> { a.and_then(|a| b.map(|b| (a, b))) } #[cfg(test)] mod tests { use super::option_zip; #[test] fn main() { assert_eq!(option_zip::<(), ()>(None, None), None); assert_eq!(option_zip::<u8, u8>(Some(255), None), None); assert_eq!(option_zip::<u8, u8>(None, Some(255)), None); assert_eq!( option_zip::<bool, u8>(Some(true), Some(255)), Some((true, 255)) ); } }
25.6
71
0.505859
5d456f8e358b64a3b41f8b161d3d2e619bdbb553
79,981
// DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! //! This documentation was generated from *App State* crate version *1.0.5+20170511*, where *20170511* is the exact revision of the *appstate:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.5*. //! //! Everything else about the *App State* *v1* API can be found at the //! [official documentation site](https://developers.google.com/games/services/web/api/states). //! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/appstate1). //! # Features //! //! Handle the following *Resources* with ease from the central [hub](struct.AppState.html) ... //! //! * states //! * [*clear*](struct.StateClearCall.html), [*delete*](struct.StateDeleteCall.html), [*get*](struct.StateGetCall.html), [*list*](struct.StateListCall.html) and [*update*](struct.StateUpdateCall.html) //! //! //! //! //! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs). //! //! # Structure of this Library //! //! The API is structured into the following primary items: //! //! * **[Hub](struct.AppState.html)** //! * a central object to maintain state and allow accessing all *Activities* //! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn //! allow access to individual [*Call Builders*](trait.CallBuilder.html) //! * **[Resources](trait.Resource.html)** //! * primary types that you can apply *Activities* to //! * a collection of properties and *Parts* //! * **[Parts](trait.Part.html)** //! * a collection of properties //! * never directly used in *Activities* //! * **[Activities](trait.CallBuilder.html)** //! * operations to apply to *Resources* //! //! All *structures* are marked with applicable traits to further categorize them and ease browsing. //! //! Generally speaking, you can invoke *Activities* like this: //! //! ```Rust,ignore //! let r = hub.resource().activity(...).doit() //! ``` //! //! Or specifically ... //! //! ```ignore //! let r = hub.states().clear(...).doit() //! let r = hub.states().update(...).doit() //! ``` //! //! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities` //! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be //! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired. //! The `doit()` method performs the actual communication with the server and returns the respective result. //! //! # Usage //! //! ## Setting up your Project //! //! To use this library, you would put the following lines into your `Cargo.toml` file: //! //! ```toml //! [dependencies] //! google-appstate1 = "*" //! ``` //! //! ## A complete example //! //! ```test_harness,no_run //! extern crate hyper; //! extern crate hyper_rustls; //! extern crate yup_oauth2 as oauth2; //! extern crate google_appstate1 as appstate1; //! use appstate1::UpdateRequest; //! use appstate1::{Result, Error}; //! # #[test] fn egal() { //! use std::default::Default; //! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; //! use appstate1::AppState; //! //! // Get an ApplicationSecret instance by some means. It contains the `client_id` and //! // `client_secret`, among other things. //! let secret: ApplicationSecret = Default::default(); //! // Instantiate the authenticator. It will choose a suitable authentication flow for you, //! // unless you replace `None` with the desired Flow. //! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about //! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and //! // retrieve them from storage. //! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, //! hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), //! <MemoryStorage as Default>::default(), None); //! let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); //! // As the method needs a request, you would usually fill it with the desired information //! // into the respective structure. Some of the parts shown here might not be applicable ! //! // Values shown here are possibly random and not representative ! //! let mut req = UpdateRequest::default(); //! //! // You can configure optional parameters by calling the respective setters at will, and //! // execute the final call using `doit()`. //! // Values shown here are possibly random and not representative ! //! let result = hub.states().update(req, -65) //! .current_state_version("sed") //! .doit(); //! //! match result { //! Err(e) => match e { //! // The Error enum provides details about what exactly happened. //! // You can also just use its `Debug`, `Display` or `Error` traits //! Error::HttpError(_) //! |Error::MissingAPIKey //! |Error::MissingToken(_) //! |Error::Cancelled //! |Error::UploadSizeLimitExceeded(_, _) //! |Error::Failure(_) //! |Error::BadRequest(_) //! |Error::FieldClash(_) //! |Error::JsonDecodeError(_, _) => println!("{}", e), //! }, //! Ok(res) => println!("Success: {:?}", res), //! } //! # } //! ``` //! ## Handling Errors //! //! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of //! the doit() methods, or handed as possibly intermediate results to either the //! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html). //! //! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This //! makes the system potentially resilient to all kinds of errors. //! //! ## Uploads and Downloads //! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be //! read by you to obtain the media. //! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default. //! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making //! this call: `.param("alt", "media")`. //! //! Methods supporting uploads can do so using up to 2 different protocols: //! *simple* and *resumable*. The distinctiveness of each is represented by customized //! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively. //! //! ## Customization and Callbacks //! //! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the //! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call. //! Respective methods will be called to provide progress information, as well as determine whether the system should //! retry on failure. //! //! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort. //! //! ## Optional Parts in Server-Requests //! //! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and //! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses //! are valid. //! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to //! the server to indicate either the set parts of the request or the desired parts in the response. //! //! ## Builder Arguments //! //! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods. //! These will always take a single argument, for which the following statements are true. //! //! * [PODs][wiki-pod] are handed by copy //! * strings are passed as `&str` //! * [request values](trait.RequestValue.html) are moved //! //! Arguments will always be copied or cloned into the builder, to make them independent of their original life times. //! //! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure //! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern //! [google-go-api]: https://github.com/google/google-api-go-client //! //! // Unused attributes happen thanks to defined, but unused structures // We don't warn about this, as depending on the API, some data structures or facilities are never used. // Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any // unused imports in fully featured APIs. Same with unused_mut ... . #![allow(unused_imports, unused_mut, dead_code)] // DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde; extern crate serde_json; extern crate yup_oauth2 as oauth2; extern crate mime; extern crate url; mod cmn; use std::collections::HashMap; use std::cell::RefCell; use std::borrow::BorrowMut; use std::default::Default; use std::collections::BTreeMap; use serde_json as json; use std::io; use std::fs; use std::mem; use std::thread::sleep; use std::time::Duration; pub use cmn::{MultiPartReader, ToParts, MethodInfo, Result, Error, CallBuilder, Hub, ReadSeek, Part, ResponseResult, RequestValue, NestedType, Delegate, DefaultDelegate, MethodsBuilder, Resource, ErrorResponse, remove_json_null_values}; // ############## // UTILITIES ### // ############ /// Identifies the an OAuth2 authorization scope. /// A scope is needed when requesting an /// [authorization token](https://developers.google.com/youtube/v3/guides/authentication). #[derive(PartialEq, Eq, Hash)] pub enum Scope { /// View and manage your data for this application Full, } impl AsRef<str> for Scope { fn as_ref(&self) -> &str { match *self { Scope::Full => "https://www.googleapis.com/auth/appstate", } } } impl Default for Scope { fn default() -> Scope { Scope::Full } } // ######## // HUB ### // ###### /// Central instance to access all AppState related resource activities /// /// # Examples /// /// Instantiate a new hub /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_appstate1 as appstate1; /// use appstate1::UpdateRequest; /// use appstate1::{Result, Error}; /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// use appstate1::AppState; /// /// // Get an ApplicationSecret instance by some means. It contains the `client_id` and /// // `client_secret`, among other things. /// let secret: ApplicationSecret = Default::default(); /// // Instantiate the authenticator. It will choose a suitable authentication flow for you, /// // unless you replace `None` with the desired Flow. /// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about /// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and /// // retrieve them from storage. /// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// <MemoryStorage as Default>::default(), None); /// let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = UpdateRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().update(req, -85) /// .current_state_version("dolores") /// .doit(); /// /// match result { /// Err(e) => match e { /// // The Error enum provides details about what exactly happened. /// // You can also just use its `Debug`, `Display` or `Error` traits /// Error::HttpError(_) /// |Error::MissingAPIKey /// |Error::MissingToken(_) /// |Error::Cancelled /// |Error::UploadSizeLimitExceeded(_, _) /// |Error::Failure(_) /// |Error::BadRequest(_) /// |Error::FieldClash(_) /// |Error::JsonDecodeError(_, _) => println!("{}", e), /// }, /// Ok(res) => println!("Success: {:?}", res), /// } /// # } /// ``` pub struct AppState<C, A> { client: RefCell<C>, auth: RefCell<A>, _user_agent: String, _base_url: String, _root_url: String, } impl<'a, C, A> Hub for AppState<C, A> {} impl<'a, C, A> AppState<C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { pub fn new(client: C, authenticator: A) -> AppState<C, A> { AppState { client: RefCell::new(client), auth: RefCell::new(authenticator), _user_agent: "google-api-rust-client/1.0.5".to_string(), _base_url: "https://www.googleapis.com/appstate/v1/".to_string(), _root_url: "https://www.googleapis.com/".to_string(), } } pub fn states(&'a self) -> StateMethods<'a, C, A> { StateMethods { hub: &self } } /// Set the user-agent header field to use in all requests to the server. /// It defaults to `google-api-rust-client/1.0.5`. /// /// Returns the previously set user-agent. pub fn user_agent(&mut self, agent_name: String) -> String { mem::replace(&mut self._user_agent, agent_name) } /// Set the base url to use in all requests to the server. /// It defaults to `https://www.googleapis.com/appstate/v1/`. /// /// Returns the previously set base url. pub fn base_url(&mut self, new_base_url: String) -> String { mem::replace(&mut self._base_url, new_base_url) } /// Set the root url to use in all requests to the server. /// It defaults to `https://www.googleapis.com/`. /// /// Returns the previously set root url. pub fn root_url(&mut self, new_root_url: String) -> String { mem::replace(&mut self._root_url, new_root_url) } } // ############ // SCHEMAS ### // ########## /// This is a JSON template to convert a list-response for app state. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [list states](struct.StateListCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListResponse { /// The app state data. pub items: Option<Vec<GetResponse>>, /// Uniquely identifies the type of this resource. Value is always the fixed string appstate#listResponse. pub kind: Option<String>, /// The maximum number of keys allowed for this user. #[serde(rename="maximumKeyCount")] pub maximum_key_count: Option<i32>, } impl ResponseResult for ListResponse {} /// This is a JSON template for an app state resource. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [get states](struct.StateGetCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct GetResponse { /// The requested data. pub data: Option<String>, /// Uniquely identifies the type of this resource. Value is always the fixed string appstate#getResponse. pub kind: Option<String>, /// The key for the data. #[serde(rename="stateKey")] pub state_key: Option<i32>, /// The current app state version. #[serde(rename="currentStateVersion")] pub current_state_version: Option<String>, } impl ResponseResult for GetResponse {} /// This is a JSON template for a requests which update app state /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [update states](struct.StateUpdateCall.html) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct UpdateRequest { /// Uniquely identifies the type of this resource. Value is always the fixed string appstate#updateRequest. pub kind: Option<String>, /// The new app state data that your application is trying to update with. pub data: Option<String>, } impl RequestValue for UpdateRequest {} /// This is a JSON template for an app state write result. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [clear states](struct.StateClearCall.html) (response) /// * [update states](struct.StateUpdateCall.html) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct WriteResult { /// Uniquely identifies the type of this resource. Value is always the fixed string appstate#writeResult. pub kind: Option<String>, /// The written key. #[serde(rename="stateKey")] pub state_key: Option<i32>, /// The version of the data for this key on the server. #[serde(rename="currentStateVersion")] pub current_state_version: Option<String>, } impl ResponseResult for WriteResult {} // ################### // MethodBuilders ### // ################# /// A builder providing access to all methods supported on *state* resources. /// It is not used directly, but through the `AppState` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_appstate1 as appstate1; /// /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// use appstate1::AppState; /// /// let secret: ApplicationSecret = Default::default(); /// let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// <MemoryStorage as Default>::default(), None); /// let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `clear(...)`, `delete(...)`, `get(...)`, `list(...)` and `update(...)` /// // to build up your call. /// let rb = hub.states(); /// # } /// ``` pub struct StateMethods<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, } impl<'a, C, A> MethodsBuilder for StateMethods<'a, C, A> {} impl<'a, C, A> StateMethods<'a, C, A> { /// Create a builder to help you perform the following task: /// /// Deletes a key and the data associated with it. The key is removed and no longer counts against the key quota. Note that since this method is not safe in the face of concurrent modifications, it should only be used for development and testing purposes. Invoking this method in shipping code can result in data loss and data corruption. /// /// # Arguments /// /// * `stateKey` - The key for the data to be retrieved. pub fn delete(&self, state_key: i32) -> StateDeleteCall<'a, C, A> { StateDeleteCall { hub: self.hub, _state_key: state_key, _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Retrieves the data corresponding to the passed key. If the key does not exist on the server, an HTTP 404 will be returned. /// /// # Arguments /// /// * `stateKey` - The key for the data to be retrieved. pub fn get(&self, state_key: i32) -> StateGetCall<'a, C, A> { StateGetCall { hub: self.hub, _state_key: state_key, _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Clears (sets to empty) the data for the passed key if and only if the passed version matches the currently stored version. This method results in a conflict error on version mismatch. /// /// # Arguments /// /// * `stateKey` - The key for the data to be retrieved. pub fn clear(&self, state_key: i32) -> StateClearCall<'a, C, A> { StateClearCall { hub: self.hub, _state_key: state_key, _current_data_version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists all the states keys, and optionally the state data. pub fn list(&self) -> StateListCall<'a, C, A> { StateListCall { hub: self.hub, _include_data: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Update the data associated with the input key if and only if the passed version matches the currently stored version. This method is safe in the face of concurrent writes. Maximum per-key size is 128KB. /// /// # Arguments /// /// * `request` - No description provided. /// * `stateKey` - The key for the data to be retrieved. pub fn update(&self, request: UpdateRequest, state_key: i32) -> StateUpdateCall<'a, C, A> { StateUpdateCall { hub: self.hub, _request: request, _state_key: state_key, _current_state_version: Default::default(), _delegate: Default::default(), _scopes: Default::default(), _additional_params: Default::default(), } } } // ################### // CallBuilders ### // ################# /// Deletes a key and the data associated with it. The key is removed and no longer counts against the key quota. Note that since this method is not safe in the face of concurrent modifications, it should only be used for development and testing purposes. Invoking this method in shipping code can result in data loss and data corruption. /// /// A builder for the *delete* method supported by a *state* resource. /// It is not used directly, but through a `StateMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_appstate1 as appstate1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use appstate1::AppState; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().delete(-63) /// .doit(); /// # } /// ``` pub struct StateDeleteCall<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, _state_key: i32, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for StateDeleteCall<'a, C, A> {} impl<'a, C, A> StateDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<hyper::client::Response> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "appstate.states.delete", http_method: hyper::method::Method::Delete }); let mut params: Vec<(&str, String)> = Vec::with_capacity((2 + self._additional_params.len())); params.push(("stateKey", self._state_key.to_string())); for &field in ["stateKey"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } let mut url = self.hub._base_url.clone() + "states/{stateKey}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{stateKey}", "stateKey")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["stateKey"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = res; dlg.finished(true); return Ok(result_value) } } } } /// The key for the data to be retrieved. /// /// Sets the *state key* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn state_key(mut self, new_value: i32) -> StateDeleteCall<'a, C, A> { self._state_key = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> StateDeleteCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for the response. pub fn param<T>(mut self, name: T, value: T) -> StateDeleteCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> StateDeleteCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Retrieves the data corresponding to the passed key. If the key does not exist on the server, an HTTP 404 will be returned. /// /// A builder for the *get* method supported by a *state* resource. /// It is not used directly, but through a `StateMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_appstate1 as appstate1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use appstate1::AppState; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().get(-22) /// .doit(); /// # } /// ``` pub struct StateGetCall<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, _state_key: i32, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for StateGetCall<'a, C, A> {} impl<'a, C, A> StateGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, GetResponse)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "appstate.states.get", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity((3 + self._additional_params.len())); params.push(("stateKey", self._state_key.to_string())); for &field in ["alt", "stateKey"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "states/{stateKey}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{stateKey}", "stateKey")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["stateKey"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The key for the data to be retrieved. /// /// Sets the *state key* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn state_key(mut self, new_value: i32) -> StateGetCall<'a, C, A> { self._state_key = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> StateGetCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for the response. pub fn param<T>(mut self, name: T, value: T) -> StateGetCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> StateGetCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Clears (sets to empty) the data for the passed key if and only if the passed version matches the currently stored version. This method results in a conflict error on version mismatch. /// /// A builder for the *clear* method supported by a *state* resource. /// It is not used directly, but through a `StateMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_appstate1 as appstate1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use appstate1::AppState; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().clear(-8) /// .current_data_version("justo") /// .doit(); /// # } /// ``` pub struct StateClearCall<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, _state_key: i32, _current_data_version: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for StateClearCall<'a, C, A> {} impl<'a, C, A> StateClearCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WriteResult)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "appstate.states.clear", http_method: hyper::method::Method::Post }); let mut params: Vec<(&str, String)> = Vec::with_capacity((4 + self._additional_params.len())); params.push(("stateKey", self._state_key.to_string())); if let Some(value) = self._current_data_version { params.push(("currentDataVersion", value.to_string())); } for &field in ["alt", "stateKey", "currentDataVersion"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "states/{stateKey}/clear"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{stateKey}", "stateKey")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["stateKey"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The key for the data to be retrieved. /// /// Sets the *state key* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn state_key(mut self, new_value: i32) -> StateClearCall<'a, C, A> { self._state_key = new_value; self } /// The version of the data to be cleared. Version strings are returned by the server. /// /// Sets the *current data version* query property to the given value. pub fn current_data_version(mut self, new_value: &str) -> StateClearCall<'a, C, A> { self._current_data_version = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> StateClearCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for the response. pub fn param<T>(mut self, name: T, value: T) -> StateClearCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> StateClearCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists all the states keys, and optionally the state data. /// /// A builder for the *list* method supported by a *state* resource. /// It is not used directly, but through a `StateMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_appstate1 as appstate1; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use appstate1::AppState; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().list() /// .include_data(true) /// .doit(); /// # } /// ``` pub struct StateListCall<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, _include_data: Option<bool>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for StateListCall<'a, C, A> {} impl<'a, C, A> StateListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, ListResponse)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "appstate.states.list", http_method: hyper::method::Method::Get }); let mut params: Vec<(&str, String)> = Vec::with_capacity((3 + self._additional_params.len())); if let Some(value) = self._include_data { params.push(("includeData", value.to_string())); } for &field in ["alt", "includeData"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "states"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Whether to include the full data in addition to the version number /// /// Sets the *include data* query property to the given value. pub fn include_data(mut self, new_value: bool) -> StateListCall<'a, C, A> { self._include_data = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> StateListCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for the response. pub fn param<T>(mut self, name: T, value: T) -> StateListCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> StateListCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Update the data associated with the input key if and only if the passed version matches the currently stored version. This method is safe in the face of concurrent writes. Maximum per-key size is 128KB. /// /// A builder for the *update* method supported by a *state* resource. /// It is not used directly, but through a `StateMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_appstate1 as appstate1; /// use appstate1::UpdateRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage}; /// # use appstate1::AppState; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate, /// # hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), /// # <MemoryStorage as Default>::default(), None); /// # let mut hub = AppState::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = UpdateRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.states().update(req, -81) /// .current_state_version("labore") /// .doit(); /// # } /// ``` pub struct StateUpdateCall<'a, C, A> where C: 'a, A: 'a { hub: &'a AppState<C, A>, _request: UpdateRequest, _state_key: i32, _current_state_version: Option<String>, _delegate: Option<&'a mut Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C, A> CallBuilder for StateUpdateCall<'a, C, A> {} impl<'a, C, A> StateUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2::GetToken { /// Perform the operation you have build so far. pub fn doit(mut self) -> Result<(hyper::client::Response, WriteResult)> { use std::io::{Read, Seek}; use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location}; let mut dd = DefaultDelegate; let mut dlg: &mut Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(MethodInfo { id: "appstate.states.update", http_method: hyper::method::Method::Put }); let mut params: Vec<(&str, String)> = Vec::with_capacity((5 + self._additional_params.len())); params.push(("stateKey", self._state_key.to_string())); if let Some(value) = self._current_state_version { params.push(("currentStateVersion", value.to_string())); } for &field in ["alt", "stateKey", "currentStateVersion"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "states/{stateKey}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{stateKey}", "stateKey")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["stateKey"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } if params.len() > 0 { url.push('?'); url.push_str(&url::form_urlencoded::serialize(params)); } let mut json_mime_type = mime::Mime(mime::TopLevel::Application, mime::SubLevel::Json, Default::default()); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.borrow_mut().token(self._scopes.keys()) { Ok(token) => token, Err(err) => { match dlg.token(&*err) { Some(token) => token, None => { dlg.finished(false); return Err(Error::MissingToken(err)) } } } }; let auth_header = Authorization(Bearer { token: token.access_token }); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); let mut req = client.borrow_mut().request(hyper::method::Method::Put, &url) .header(UserAgent(self.hub._user_agent.clone())) .header(auth_header.clone()) .header(ContentType(json_mime_type.clone())) .header(ContentLength(request_size as u64)) .body(&mut request_value_reader); dlg.pre_request(); req.send() }; match req_result { Err(err) => { if let oauth2::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(Error::HttpError(err)) } Ok(mut res) => { if !res.status.is_success() { let mut json_err = String::new(); res.read_to_string(&mut json_err).unwrap(); if let oauth2::Retry::After(d) = dlg.http_failure(&res, json::from_str(&json_err).ok(), json::from_str(&json_err).ok()) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<ErrorResponse>(&json_err){ Err(_) => Err(Error::Failure(res)), Ok(serr) => Err(Error::BadRequest(serr)) } } let result_value = { let mut json_response = String::new(); res.read_to_string(&mut json_response).unwrap(); match json::from_str(&json_response) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&json_response, &err); return Err(Error::JsonDecodeError(json_response, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: UpdateRequest) -> StateUpdateCall<'a, C, A> { self._request = new_value; self } /// The key for the data to be retrieved. /// /// Sets the *state key* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn state_key(mut self, new_value: i32) -> StateUpdateCall<'a, C, A> { self._state_key = new_value; self } /// The version of the app state your application is attempting to update. If this does not match the current version, this method will return a conflict error. If there is no data stored on the server for this key, the update will succeed irrespective of the value of this parameter. /// /// Sets the *current state version* query property to the given value. pub fn current_state_version(mut self, new_value: &str) -> StateUpdateCall<'a, C, A> { self._current_state_version = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut Delegate) -> StateUpdateCall<'a, C, A> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known paramters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *userIp* (query-string) - IP address of the site where the request originates. Use this if you want to enforce per-user limits. /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *alt* (query-string) - Data format for the response. pub fn param<T>(mut self, name: T, value: T) -> StateUpdateCall<'a, C, A> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> StateUpdateCall<'a, C, A> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } }
43.515234
342
0.579988
505004a4a8bfab45f7bfc0c55c50c0df7b1a2c88
1,007
//! Support for the KDE Idle Protocol use crate::seat::Seat; use crate::wayland_sys::server::wl_display as wl_server_display; use wlroots_sys::{ wl_display, wlr_idle, wlr_idle_create, wlr_idle_notify_activity, wlr_idle_set_enabled }; #[derive(Debug)] pub struct Manager { manager: *mut wlr_idle } impl Manager { pub(crate) unsafe fn new(display: *mut wl_server_display) -> Option<Self> { let manager_raw = wlr_idle_create(display as *mut wl_display); if !manager_raw.is_null() { Some(Manager { manager: manager_raw }) } else { None } } /// Restart the timers for the seat pub fn notify_activity(&mut self, seat: &Seat) { unsafe { wlr_idle_notify_activity(self.manager, seat.as_ptr()) } } /// If we are passed a null pointer, update timers for all seats. pub fn set_enabled(&mut self, seat: &Seat, enabled: bool) { unsafe { wlr_idle_set_enabled(self.manager, seat.as_ptr(), enabled) } } }
27.972222
89
0.660377
2f0a043f9a7de907158e28a1bb7a617659b94ef2
1,148
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use target::{Target, TargetOptions}; use super::apple_ios_base::{opts, Arch}; pub fn target() -> Target { Target { llvm_target: "arm64-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), options: TargetOptions { features: "+neon,+fp-armv8,+cyclone".to_string(), eliminate_frame_pointer: false, .. opts(Arch::Arm64) }, } }
37.032258
69
0.651568
1d04e593cb6644b0bc2e8444999048ece0581fc9
676
extern crate arrayidx; extern crate rearray; use arrayidx::*; use rearray::*; use rearray::linalg::*; #[test] fn test_sgemv() { let a: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![17, 8])); let x: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![8])); let mut y: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![17])); y.matrix_vector_mult(1.0, a.clone(), x.clone(), 0.0); } #[test] fn test_sgemm() { let a: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![17, 8])); let b: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![8, 15])); let mut y: Rearray<f32> = Rearray::zeros(IndexNd::from(vec![17, 15])); y.matrix_mult(1.0, a.clone(), b.clone(), 0.0); }
29.391304
72
0.637574
1e7e6ec726f81a402f2f51c944e4f45bce949dfa
1,889
use cookie::Cookie; use time::OffsetDateTime; use crate::{Cookie as CrateCookie, CookieExpires as CrateCookieExpires}; impl<'a> From<&'a CrateCookie> for Cookie<'a> { fn from(cc: &'a CrateCookie) -> Self { let mut c = Self::new(&cc.name, &cc.value); c.set_domain(&cc.domain); match cc.expires { CrateCookieExpires::Session => { c.set_expires(None); } CrateCookieExpires::DateTime(dt) => c.set_expires(OffsetDateTime::from_unix_timestamp( dt.naive_utc().timestamp(), )), } c.set_http_only(cc.http_only); c.set_path(&cc.path); c.set_secure(cc.secure); c } } #[cfg(test)] mod tests { use super::*; use chrono::{DateTime, NaiveDateTime, Utc}; #[test] fn test_convert() -> Result<(), String> { let mut cc = CrateCookie { http_only: true, domain: ".example.com".to_owned(), include_subdomains: true, path: "/".to_owned(), secure: true, expires: CrateCookieExpires::Session, name: "foo".to_owned(), value: "bar".to_owned(), }; let c = Cookie::from(&cc); assert_eq!(c.http_only(), Some(true)); assert_eq!(c.domain(), Some(".example.com")); assert_eq!(c.path(), Some("/")); assert_eq!(c.secure(), Some(true)); assert_eq!(c.expires(), None); assert_eq!(c.name(), "foo"); assert_eq!(c.value(), "bar"); cc.expires = CrateCookieExpires::DateTime(DateTime::<Utc>::from_utc( NaiveDateTime::from_timestamp(1640586740, 0), Utc, )); let c = Cookie::from(&cc); assert_eq!( c.expires(), Some(OffsetDateTime::from_unix_timestamp(1640586740)) ); Ok(()) } }
27.779412
98
0.530439
d51f4a93ad018df8909596bac9a007adb4e6d5a5
3,750
//! Loads a Cargo project into a static instance of analysis, without support //! for incorporating changes. use std::{path::Path, sync::Arc}; use anyhow::Result; use crossbeam_channel::{unbounded, Receiver}; use ide::{AnalysisHost, Change}; use ide_db::base_db::CrateGraph; use project_model::{CargoConfig, ProcMacroClient, ProjectManifest, ProjectWorkspace}; use vfs::{loader::Handle, AbsPath, AbsPathBuf}; use crate::reload::{ProjectFolders, SourceRootConfig}; pub fn load_cargo( root: &Path, load_out_dirs_from_check: bool, with_proc_macro: bool, ) -> Result<(AnalysisHost, vfs::Vfs)> { let root = AbsPathBuf::assert(std::env::current_dir()?.join(root)); let root = ProjectManifest::discover_single(&root)?; let ws = ProjectWorkspace::load( root, &CargoConfig { load_out_dirs_from_check, ..Default::default() }, )?; let (sender, receiver) = unbounded(); let mut vfs = vfs::Vfs::default(); let mut loader = { let loader = vfs_notify::NotifyHandle::spawn(Box::new(move |msg| sender.send(msg).unwrap())); Box::new(loader) }; let proc_macro_client = if with_proc_macro { let path = std::env::current_exe()?; Some(ProcMacroClient::extern_process(path, &["proc-macro"]).unwrap()) } else { None }; let crate_graph = ws.to_crate_graph(None, proc_macro_client.as_ref(), &mut |path: &AbsPath| { let contents = loader.load_sync(path); let path = vfs::VfsPath::from(path.to_path_buf()); vfs.set_file_contents(path.clone(), contents); vfs.file_id(&path) }); let project_folders = ProjectFolders::new(&[ws]); loader.set_config(vfs::loader::Config { load: project_folders.load, watch: vec![] }); log::debug!("crate graph: {:?}", crate_graph); let host = load(crate_graph, project_folders.source_root_config, &mut vfs, &receiver); Ok((host, vfs)) } fn load( crate_graph: CrateGraph, source_root_config: SourceRootConfig, vfs: &mut vfs::Vfs, receiver: &Receiver<vfs::loader::Message>, ) -> AnalysisHost { let lru_cap = std::env::var("RA_LRU_CAP").ok().and_then(|it| it.parse::<usize>().ok()); let mut host = AnalysisHost::new(lru_cap); let mut analysis_change = Change::new(); // wait until Vfs has loaded all roots for task in receiver { match task { vfs::loader::Message::Progress { n_done, n_total } => { if n_done == n_total { break; } } vfs::loader::Message::Loaded { files } => { for (path, contents) in files { vfs.set_file_contents(path.into(), contents) } } } } let changes = vfs.take_changes(); for file in changes { if file.exists() { let contents = vfs.file_contents(file.file_id).to_vec(); if let Ok(text) = String::from_utf8(contents) { analysis_change.change_file(file.file_id, Some(Arc::new(text))) } } } let source_roots = source_root_config.partition(&vfs); analysis_change.set_roots(source_roots); analysis_change.set_crate_graph(crate_graph); host.apply_change(analysis_change); host } #[cfg(test)] mod tests { use super::*; use hir::Crate; #[test] fn test_loading_rust_analyzer() { let path = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap().parent().unwrap(); let (host, _vfs) = load_cargo(path, false, false).unwrap(); let n_crates = Crate::all(host.raw_database()).len(); // RA has quite a few crates, but the exact count doesn't matter assert!(n_crates > 20); } }
32.894737
97
0.6168
e6d66c503f72d66d7b2ba9fafab56355773c6d2b
5,199
// Copyright (C) 2020 Quentin Kniep <[email protected]> // Distributed under terms of the MIT license. mod dealer; mod game; mod hand; mod player; mod rules; mod shoe; mod strategy; #[macro_use] extern crate prettytable; use indicatif::{ProgressBar, ProgressStyle}; use prettytable::Table; use game::Game; use player::Player; use rules::MINIMUM_BET; use strategy::*; const TESTS: usize = 100; const ROUNDS: usize = 10000; fn main() { let mut roi_no = 0.0; let mut roi_hilo = 0.0; let mut roi_ko = 0.0; let mut roi_uston = 0.0; let mut exp_earning_no = 0; let mut exp_earning_hilo = 0; let mut exp_earning_ko = 0; let mut exp_earning_uston = 0; let mut avg_final_bankroll_no = 0.0; let mut avg_final_bankroll_hilo = 0.0; let mut avg_final_bankroll_ko = 0.0; let mut avg_final_bankroll_uston = 0.0; let mut co_bankrupt_no = 0.0; let mut co_bankrupt_hilo = 0.0; let mut co_bankrupt_ko = 0.0; let mut co_bankrupt_uston = 0.0; println!( "Running {} tests with {} games each, a total of {} games...", TESTS, ROUNDS, TESTS * ROUNDS ); let pb = ProgressBar::new((TESTS * ROUNDS) as u64); pb.set_style(ProgressStyle::default_bar() .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} - {per_sec} ({eta})") .progress_chars("#>-")); for _ in 0..TESTS { pb.inc(ROUNDS as u64); let mut game = Game::new(); game.join(Player::new(&NO_COUNT)); game.join(Player::new(&HILO_COUNT)); game.join(Player::new(&KO_COUNT)); game.join(Player::new(&USTON_SS_COUNT)); let mut bankrupt_no = false; let mut bankrupt_hilo = false; let mut bankrupt_ko = false; let mut bankrupt_uston = false; for _ in 0..ROUNDS { let bank_before = game.bankrolls(); game.play_round(); let bet_sizes = game.bet_sizes(); let bank = game.bankrolls(); roi_no += (bank[0] - bank_before[0]) as f64 / bet_sizes[0] as f64; roi_hilo += (bank[1] - bank_before[1]) as f64 / bet_sizes[1] as f64; roi_ko += (bank[2] - bank_before[2]) as f64 / bet_sizes[2] as f64; roi_uston += (bank[3] - bank_before[3]) as f64 / bet_sizes[3] as f64; if bank[0] < 0 { bankrupt_no = true; } if bank[1] < 0 { bankrupt_hilo = true; } if bank[2] < 0 { bankrupt_ko = true; } if bank[3] < 0 { bankrupt_uston = true; } } let bank = game.bankrolls(); exp_earning_no += bank[0] - 10000; exp_earning_hilo += bank[1] - 10000; exp_earning_ko += bank[2] - 10000; exp_earning_uston += bank[3] - 10000; avg_final_bankroll_no += game.bankrolls()[0] as f64 / TESTS as f64; avg_final_bankroll_hilo += game.bankrolls()[1] as f64 / TESTS as f64; avg_final_bankroll_ko += game.bankrolls()[2] as f64 / TESTS as f64; avg_final_bankroll_uston += game.bankrolls()[3] as f64 / TESTS as f64; if bankrupt_no { co_bankrupt_no += 1.0 / TESTS as f64; } if bankrupt_hilo { co_bankrupt_hilo += 1.0 / TESTS as f64; } if bankrupt_ko { co_bankrupt_ko += 1.0 / TESTS as f64; } if bankrupt_uston { co_bankrupt_uston += 1.0 / TESTS as f64; } } pb.finish_with_message("Done!"); println!(""); let mut table = Table::new(); table.set_format(*prettytable::format::consts::FORMAT_NO_BORDER_LINE_SEPARATOR); table.set_titles(row!["Metric", "No Count", "HiLo Count", "KO Count", "Uston SS Count"]); table.add_row(row![ "Avg. final bankroll", format!("${:.2}", avg_final_bankroll_no), format!("${:.2}", avg_final_bankroll_hilo), format!("${:.2}", avg_final_bankroll_ko), format!("${:.2}", avg_final_bankroll_uston), ]); table.add_row(row![ "ROI", format!("{:.2}%", roi_no * 100.0 / (ROUNDS * TESTS) as f64), format!("{:.2}%", roi_hilo * 100.0 / (ROUNDS * TESTS) as f64), format!("{:.2}%", roi_ko * 100.0 / (ROUNDS * TESTS) as f64), format!("{:.2}%", roi_uston * 100.0 / (ROUNDS * TESTS) as f64), ]); table.add_row(row![ "Expected earnings", format!("{:.2}% min bet", exp_earning_no as f64 * 100.0 / (ROUNDS * TESTS * MINIMUM_BET) as f64), format!("{:.2}% min bet", exp_earning_hilo as f64 * 100.0 / (ROUNDS * TESTS * MINIMUM_BET) as f64), format!("{:.2}% min bet", exp_earning_ko as f64 * 100.0 / (ROUNDS * TESTS * MINIMUM_BET) as f64), format!("{:.2}% min bet", exp_earning_uston as f64 * 100.0 / (ROUNDS * TESTS * MINIMUM_BET) as f64), ]); table.add_row(row![ "Risk of ruin", format!("{:.2}%", co_bankrupt_no * 100.0), format!("{:.2}%", co_bankrupt_hilo * 100.0), format!("{:.2}%", co_bankrupt_ko * 100.0), format!("{:.2}%", co_bankrupt_uston * 100.0), ]); table.printstd(); }
32.49375
110
0.56357
1a3404addd0d2be820448f36c14b8e13cdbd7f12
4,487
use { lazy_regex::*, std::path::{Path, PathBuf}, }; // TODO virer et utiliser PathBuf directement ? #[derive(Debug)] pub struct DupFile { pub path: PathBuf, // pub staged_for_removal: bool, } /// the list of files having a hash /// TODO rename DupSet ? #[derive(Debug, Default)] pub struct DupSet { pub files: Vec<DupFile>, // identical files pub file_len: u64, } #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct DupFileRef { pub dup_set_idx: usize, pub dup_file_idx: usize, } impl DupFile { pub fn new(path: PathBuf) -> Self { Self { path, //staged_for_removal: false, } } } impl DupFileRef { pub fn path(self, dups: &[DupSet]) -> &Path { &dups[self.dup_set_idx].files[self.dup_file_idx].path } pub fn file_name(self, dups: &[DupSet]) -> String { self.path(dups) .file_name() .map_or_else(|| "".to_string(), |n| n.to_string_lossy().to_string()) } /// get the file name when the file has a name like "thing (3).jpg" /// or "thing (3rd copy).png" pub fn copy_name(self, dups: &[DupSet]) -> Option<&str> { copy_name(self.path(dups)) } /// tells whether the file has a name like "thing (3).jpg" /// or "thing (3rd copy).png" pub fn is_copy_named(self, dups: &[DupSet]) -> bool { self.copy_name(dups).is_some() } } /// get the name if this path is of a "copy" file, that is an usual name for a copy pub fn copy_name(path: &Path) -> Option<&str> { path.file_name() .and_then(std::ffi::OsStr::to_str) .filter(|n| { regex_is_match!( r#"(?x) .+ \(( \d+ | [^)]* copy )\) (\.\w+)? $ "#, n ) }) } #[test] fn test_is_copy_named() { use std::path::PathBuf; let copies = &[ "/some/path/to/bla (3).jpg", "bla (3455).jpg", "uuuuu (copy).rs", "/home/dys/Images/pink hexapodes (another copy).jpeg", "~/uuuuu (copy)", "uuuuu (3rd copy)", ]; for s in copies { assert!(copy_name(&PathBuf::from(s)).is_some()); } let not_copies = &[ "copy", "copy.txt", "bla.png", "/home/dys/not a copy", "(don't copy)", ]; for s in not_copies { assert!(copy_name(&PathBuf::from(s)).is_none()); } } #[cfg(test)] mod qc_tests { use std::str::FromStr; use super::*; use ::quickcheck::Arbitrary; quickcheck! { fn qc_is_copy_named_for_copy_paths(wrapper: WrapperForCopy) -> bool{ copy_name(&wrapper.path).is_some() } } quickcheck! { fn qc_is_not_copy_named_for_not_copy_paths(wrapper: WrapperForNotACopy) -> bool{ copy_name(&wrapper.path).is_none() } } #[derive(Clone, Debug)] struct WrapperForCopy { path: PathBuf, } #[derive(Clone, Debug)] struct WrapperForNotACopy { path: PathBuf, } impl Arbitrary for WrapperForCopy { fn arbitrary(g: &mut quickcheck::Gen) -> Self { let copies = &[ "/some/path/to/bla (3).jpg", "bla (3455).jpg", "uuuuu (copy).rs", "/home/dys/Images/pink hexapodes (another copy).jpeg", "~/uuuuu (copy)", "uuuuu (3rd copy)", ]; let path = generate_string(g, copies); Self { path } } } impl Arbitrary for WrapperForNotACopy { fn arbitrary(g: &mut quickcheck::Gen) -> Self { let not_copies = &[ "copy", "copy.txt", "bla.png", "/home/dys/not a copy", "(don't copy)", ]; let path = generate_string(g, not_copies); Self { path } } } fn generate_string(g: &mut quickcheck::Gen, candidates: &[&str]) -> PathBuf { // TODO - loop to ensure the generated pathbuf doesn't already contain inappropriate // text let candidate: &str = candidates[usize::arbitrary(g) % candidates.len()]; let s = PathBuf::arbitrary(g); let os_string = s.into_os_string().into_string().unwrap(); let os_string = format!("{}{}", os_string, candidate); PathBuf::from_str(&os_string).unwrap() } }
26.087209
92
0.520169
fb40e814b0c064e6cadb0bb05145f86851a5c36d
1,441
use aoc::*; use md5::Digest; use smallvec::SmallVec; use std::iter::once; fn hash_generator(input: &[u8]) -> impl Iterator<Item = (usize, Digest)> { let input_len = input.len(); let mut n: usize = 1; let mut data = SmallVec::<[u8; 24]>::from_slice(input); data.push(b'1'); once((n, md5::compute(&data))).chain(std::iter::from_fn(move || { let mut carry = 1; for (pos, x) in data[input_len..].iter_mut().enumerate().rev() { if *x + carry <= b'9' { *x += carry; break; } else if pos == 0 { data[input_len..].fill(b'0'); data.push(b'0'); data[input_len] = b'1'; break; } else { *x = b'0'; carry = 1; } } n += 1; Some((n, md5::compute(&data))) })) } fn find_digest(input: &[u8], f: impl Fn(&Digest) -> bool) -> Result<usize> { hash_generator(input).find(|(_, digest)| f(digest)).map(|(n, _)| n).value() } fn main() -> Result<()> { let input = setup(file!())?; let input = String::from_utf8_lossy(&input); let input = input.trim().as_bytes(); let result1 = find_digest(input, |digest| digest[..2] == [0, 0] && digest[2] <= 0x0F)?; let result2 = find_digest(input, |digest| digest[..3] == [0, 0, 0])?; println!("{}", result1); println!("{}", result2); Ok(()) }
27.188679
91
0.48508
f715603ba1975de3dd1ca4fb1c023f492e639ccd
2,206
use serde::{Deserialize, Serialize}; use crate::prototypes::{Prototype, Visitor}; use crate::types::*; // TODO: Import only specific types #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Unit { /// attack_parameters :: AttackParameters attack_parameters: AttackParameters, /// distance_per_frame :: float distance_per_frame: f32, /// distraction_cooldown :: uint32 distraction_cooldown: u32, /// movement_speed :: float movement_speed: f32, /// pollution_to_join_attack :: float pollution_to_join_attack: f32, /// run_animation :: RotatedAnimation run_animation: RotatedAnimation, /// vision_distance :: double vision_distance: f64, /// affected_by_tiles :: bool (optional) affected_by_tiles: Option<bool>, /// ai_settings :: UnitAISettings (optional) ai_settings: Option<UnitAISettings>, /// alternative_attacking_frame_sequence :: table (optional) alternative_attacking_frame_sequence: Option<Vec<Todo>>, /// can_open_gates :: bool (optional) can_open_gates: Option<bool>, /// dying_sound :: Sound (optional) dying_sound: Option<Sound>, /// has_belt_immunity :: bool (optional) has_belt_immunity: Option<bool>, /// light :: LightDefinition (optional) light: Option<LightDefinition>, /// max_pursue_distance :: double (optional) max_pursue_distance: Option<f64>, /// min_pursue_time :: uint32 (optional) min_pursue_time: Option<u32>, /// move_while_shooting :: bool (optional) move_while_shooting: Option<bool>, /// radar_range :: uint32 (optional) radar_range: Option<u32>, /// render_layer :: RenderLayer (optional) render_layer: Option<RenderLayer>, /// rotation_speed :: float (optional) rotation_speed: Option<f32>, /// running_sound_animation_positions :: table (array) of float (optional) running_sound_animation_positions: Option<Vec<f32>>, /// spawning_time_modifier :: double (optional) spawning_time_modifier: Option<f64>, /// walking_sound :: Sound (optional) walking_sound: Option<Sound>, } impl Prototype for Unit { const TYPE: Option<&'static str> = Some("unit"); }
27.234568
78
0.690843
5befc313f52369396cfe815c1c7404073e35207c
17,717
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ move_types::{account_address::AccountAddress, language_storage::TypeTag}, types::{ account_config::{xdx_type_tag, xus_tag, XDX_NAME, XUS_NAME}, chain_id::ChainId, transaction::{authenticator::AuthenticationKey, RawTransaction, TransactionPayload}, }, }; use serde::{Deserialize, Serialize}; use std::fmt; pub use diem_transaction_builder::stdlib; use diem_types::transaction::Script; pub struct TransactionBuilder { sender: Option<AccountAddress>, sequence_number: Option<u64>, payload: TransactionPayload, max_gas_amount: u64, gas_unit_price: u64, gas_currency_code: String, expiration_timestamp_secs: u64, chain_id: ChainId, } impl TransactionBuilder { pub fn sender(mut self, sender: AccountAddress) -> Self { self.sender = Some(sender); self } pub fn sequence_number(mut self, sequence_number: u64) -> Self { self.sequence_number = Some(sequence_number); self } pub fn max_gas_amount(mut self, max_gas_amount: u64) -> Self { self.max_gas_amount = max_gas_amount; self } pub fn gas_unit_price(mut self, gas_unit_price: u64) -> Self { self.gas_unit_price = gas_unit_price; self } pub fn gas_currency_code<T: Into<String>>(mut self, gas_currency_code: T) -> Self { self.gas_currency_code = gas_currency_code.into(); self } pub fn chain_id(mut self, chain_id: ChainId) -> Self { self.chain_id = chain_id; self } pub fn expiration_timestamp_secs(mut self, expiration_timestamp_secs: u64) -> Self { self.expiration_timestamp_secs = expiration_timestamp_secs; self } pub fn build(self) -> RawTransaction { RawTransaction::new( self.sender.expect("sender must have been set"), self.sequence_number .expect("sequence number must have been set"), self.payload, self.max_gas_amount, self.gas_unit_price, self.gas_currency_code, self.expiration_timestamp_secs, self.chain_id, ) } } #[derive(Clone, Debug)] pub struct TransactionFactory { max_gas_amount: u64, gas_unit_price: u64, gas_currency: Currency, transaction_expiration_time: u64, chain_id: ChainId, diem_version: u64, } impl TransactionFactory { pub fn new(chain_id: ChainId) -> Self { Self { max_gas_amount: 1_000_000, gas_unit_price: 0, gas_currency: Currency::XUS, transaction_expiration_time: 100, chain_id, diem_version: 0, } } pub fn with_max_gas_amount(mut self, max_gas_amount: u64) -> Self { self.max_gas_amount = max_gas_amount; self } pub fn with_gas_unit_price(mut self, gas_unit_price: u64) -> Self { self.gas_unit_price = gas_unit_price; self } pub fn with_gas_currency(mut self, gas_currency: Currency) -> Self { self.gas_currency = gas_currency; self } pub fn with_transaction_expiration_time(mut self, transaction_expiration_time: u64) -> Self { self.transaction_expiration_time = transaction_expiration_time; self } pub fn with_chain_id(mut self, chain_id: ChainId) -> Self { self.chain_id = chain_id; self } pub fn with_diem_version(mut self, diem_version: u64) -> Self { self.diem_version = diem_version; self } pub fn payload(&self, payload: TransactionPayload) -> TransactionBuilder { self.transaction_builder(payload) } pub fn add_currency_to_account(&self, currency: Currency) -> TransactionBuilder { let currency = currency.type_tag(); if self.is_script_function_enabled() { self.payload(stdlib::encode_add_currency_to_account_script_function( currency, )) } else { self.script(stdlib::encode_add_currency_to_account_script(currency)) } } pub fn add_recovery_rotation_capability( &self, recovery_address: AccountAddress, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_add_recovery_rotation_capability_script_function(recovery_address), ) } else { self.script(stdlib::encode_add_recovery_rotation_capability_script( recovery_address, )) } } pub fn add_validator_and_reconfigure( &self, sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_add_validator_and_reconfigure_script_function( sliding_nonce, validator_name, validator_address, ), ) } else { self.script(stdlib::encode_add_validator_and_reconfigure_script( sliding_nonce, validator_name, validator_address, )) } } pub fn burn_txn_fees(&self, coin_type: Currency) -> TransactionBuilder { let coin_type = coin_type.type_tag(); if self.is_script_function_enabled() { self.payload(stdlib::encode_burn_txn_fees_script_function(coin_type)) } else { self.script(stdlib::encode_burn_txn_fees_script(coin_type)) } } pub fn burn_with_amount( &self, token: Currency, sliding_nonce: u64, preburn_address: AccountAddress, amount: u64, ) -> TransactionBuilder { self.payload(stdlib::encode_burn_with_amount_script_function( token.type_tag(), sliding_nonce, preburn_address, amount, )) } pub fn cancel_burn_with_amount( &self, token: Currency, preburn_address: AccountAddress, amount: u64, ) -> TransactionBuilder { self.payload(stdlib::encode_cancel_burn_with_amount_script_function( token.type_tag(), preburn_address, amount, )) } pub fn peer_to_peer( &self, currency: Currency, payee: AccountAddress, amount: u64, ) -> TransactionBuilder { self.peer_to_peer_with_metadata(currency, payee, amount, Vec::new(), Vec::new()) } pub fn peer_to_peer_with_metadata( &self, currency: Currency, payee: AccountAddress, amount: u64, metadata: Vec<u8>, metadata_signature: Vec<u8>, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_peer_to_peer_with_metadata_script_function( currency.type_tag(), payee, amount, metadata, metadata_signature, )) } else { self.script(stdlib::encode_peer_to_peer_with_metadata_script( currency.type_tag(), payee, amount, metadata, metadata_signature, )) } } pub fn create_child_vasp_account( &self, coin_type: Currency, child_auth_key: AuthenticationKey, add_all_currencies: bool, child_initial_balance: u64, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_create_child_vasp_account_script_function( coin_type.type_tag(), child_auth_key.derived_address(), child_auth_key.prefix().to_vec(), add_all_currencies, child_initial_balance, )) } else { self.script(stdlib::encode_create_child_vasp_account_script( coin_type.type_tag(), child_auth_key.derived_address(), child_auth_key.prefix().to_vec(), add_all_currencies, child_initial_balance, )) } } pub fn create_designated_dealer( &self, coin_type: Currency, sliding_nonce: u64, auth_key: AuthenticationKey, human_name: &str, add_all_currencies: bool, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_create_designated_dealer_script_function( coin_type.type_tag(), sliding_nonce, auth_key.derived_address(), auth_key.prefix().to_vec(), human_name.as_bytes().into(), add_all_currencies, )) } else { self.script(stdlib::encode_create_designated_dealer_script( coin_type.type_tag(), sliding_nonce, auth_key.derived_address(), auth_key.prefix().to_vec(), human_name.as_bytes().into(), add_all_currencies, )) } } pub fn create_parent_vasp_account( &self, coin_type: Currency, sliding_nonce: u64, parent_auth_key: AuthenticationKey, human_name: &str, add_all_currencies: bool, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_create_parent_vasp_account_script_function( coin_type.type_tag(), sliding_nonce, parent_auth_key.derived_address(), parent_auth_key.prefix().to_vec(), human_name.as_bytes().into(), add_all_currencies, )) } else { self.script(stdlib::encode_create_parent_vasp_account_script( coin_type.type_tag(), sliding_nonce, parent_auth_key.derived_address(), parent_auth_key.prefix().to_vec(), human_name.as_bytes().into(), add_all_currencies, )) } } pub fn create_recovery_address(&self) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_create_recovery_address_script_function()) } else { self.script(stdlib::encode_create_recovery_address_script()) } } pub fn rotate_authentication_key(&self, new_key: AuthenticationKey) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_rotate_authentication_key_script_function( new_key.to_vec(), )) } else { self.script(stdlib::encode_rotate_authentication_key_script( new_key.to_vec(), )) } } pub fn rotate_authentication_key_with_recovery_address( &self, recovery_address: AccountAddress, to_recover: AccountAddress, new_key: AuthenticationKey, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_rotate_authentication_key_with_recovery_address_script_function( recovery_address, to_recover, new_key.to_vec(), ), ) } else { self.script( stdlib::encode_rotate_authentication_key_with_recovery_address_script( recovery_address, to_recover, new_key.to_vec(), ), ) } } pub fn rotate_dual_attestation_info( &self, new_url: Vec<u8>, new_key: Vec<u8>, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_rotate_dual_attestation_info_script_function( new_url, new_key, )) } else { self.script(stdlib::encode_rotate_dual_attestation_info_script( new_url, new_key, )) } } pub fn publish_shared_ed25519_public_key(&self, public_key: Vec<u8>) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_publish_shared_ed25519_public_key_script_function(public_key), ) } else { self.script(stdlib::encode_publish_shared_ed25519_public_key_script( public_key, )) } } pub fn publish_rotate_ed25519_public_key(&self, public_key: Vec<u8>) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_rotate_shared_ed25519_public_key_script_function(public_key), ) } else { self.script(stdlib::encode_rotate_shared_ed25519_public_key_script( public_key, )) } } pub fn update_diem_version(&self, sliding_nonce: u64, major: u64) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload(stdlib::encode_update_diem_version_script_function( sliding_nonce, major, )) } else { self.script(stdlib::encode_update_diem_version_script( sliding_nonce, major, )) } } pub fn remove_validator_and_reconfigure( &self, sliding_nonce: u64, validator_name: Vec<u8>, validator_address: AccountAddress, ) -> TransactionBuilder { if self.is_script_function_enabled() { self.payload( stdlib::encode_remove_validator_and_reconfigure_script_function( sliding_nonce, validator_name, validator_address, ), ) } else { self.script(stdlib::encode_remove_validator_and_reconfigure_script( sliding_nonce, validator_name, validator_address, )) } } pub fn add_diem_id_domain( &self, address: AccountAddress, domain: Vec<u8>, ) -> TransactionBuilder { self.payload(stdlib::encode_add_diem_id_domain_script_function( address, domain, )) } pub fn remove_diem_id_domain( &self, address: AccountAddress, domain: Vec<u8>, ) -> TransactionBuilder { self.payload(stdlib::encode_remove_diem_id_domain_script_function( address, domain, )) } // // Internal Helpers // fn script(&self, script: Script) -> TransactionBuilder { self.payload(TransactionPayload::Script(script)) } fn transaction_builder(&self, payload: TransactionPayload) -> TransactionBuilder { TransactionBuilder { sender: None, sequence_number: None, payload, max_gas_amount: self.max_gas_amount, gas_unit_price: self.gas_unit_price, gas_currency_code: self.gas_currency.into(), expiration_timestamp_secs: self.expiration_timestamp(), chain_id: self.chain_id, } } fn expiration_timestamp(&self) -> u64 { std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs() + self.transaction_expiration_time } fn is_script_function_enabled(&self) -> bool { self.diem_version >= 2 } } pub struct DualAttestationMessage { message: Box<[u8]>, } impl DualAttestationMessage { pub fn new<M: Into<Vec<u8>>>(metadata: M, reciever: AccountAddress, amount: u64) -> Self { let mut message = metadata.into(); bcs::serialize_into(&mut message, &reciever).unwrap(); bcs::serialize_into(&mut message, &amount).unwrap(); message.extend(b"@@$$DIEM_ATTEST$$@@"); Self { message: message.into(), } } pub fn message(&self) -> &[u8] { &self.message } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub enum Currency { XDX, XUS, } impl Currency { pub fn as_str(&self) -> &str { match self { Currency::XDX => XDX_NAME, Currency::XUS => XUS_NAME, } } pub fn type_tag(&self) -> TypeTag { match self { Currency::XDX => xdx_type_tag(), Currency::XUS => xus_tag(), } } } impl PartialEq<str> for Currency { fn eq(&self, other: &str) -> bool { self.as_str().eq(other) } } impl PartialEq<Currency> for str { fn eq(&self, other: &Currency) -> bool { other.as_str().eq(self) } } impl PartialEq<String> for Currency { fn eq(&self, other: &String) -> bool { self.as_str().eq(other) } } impl PartialEq<Currency> for String { fn eq(&self, other: &Currency) -> bool { other.as_str().eq(self) } } impl From<Currency> for String { fn from(currency: Currency) -> Self { currency.as_str().to_owned() } } impl fmt::Display for Currency { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.as_str()) } }
29.577629
98
0.583507
ab15547f4ee70d5fbd3acb6e08fc23cb9ca39230
3,271
// Copyright 2021 The Hypatia Authors // All rights reserved // // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file or at // https://opensource.org/licenses/MIT. //! Task State Segment support. //! //! In the host, we only support the 64-bit TSS. use core::arch::asm; use crate::segment; use crate::tss::TSS; /// Support the x86_64 64-bit Global Descriptor Table. /// /// We waste a few bytes per CPU by allocating a 4KiB page for /// the GDT, then we map that at the known GDT location in the /// per-CPU virtual memory segment, but we pad that out to 64KiB /// by mapping the zero page repeatedly beyond the end of the /// GDT proper. /// /// We do this, in part, because VMX unconditionally resets the /// limit on the GDT to 65535 on VM exit and we don't want to /// reset the segment descriptor each time, nor have hardware /// accidentally doing strange things because of segmentation. #[repr(C, align(4096))] pub struct GDT { null: segment::Descriptor, hypertext: segment::Descriptor, _hyperdata: segment::Descriptor, _userdata: segment::Descriptor, _usertext: segment::Descriptor, _unused: segment::Descriptor, // For alignment. task: segment::TaskStateDescriptor, } impl GDT { /// Returns a new GDT with a task segment descriptor that refers /// to the given TSS. pub fn new(task_state: &TSS) -> GDT { GDT { null: segment::Descriptor::null(), hypertext: segment::Descriptor::code64(), _hyperdata: segment::Descriptor::empty(), _userdata: segment::Descriptor::empty(), _usertext: segment::Descriptor::empty(), _unused: segment::Descriptor::empty(), task: task_state.descriptor(), } } /// Returns the code selector for %cs pub const fn code_selector() -> u16 { 1 << 3 } /// Returns the task selector for %tr pub const fn task_selector() -> u16 { 6 << 3 } /// Loads the GDTR with this GDT by building a descriptor on the /// stack and then invoking the LGDT instruction on that descriptor. /// /// # Safety /// /// Called on a valid GDT. unsafe fn lgdt(&self) { let base = self as *const _ as usize as u64; const LIMIT: u16 = core::mem::size_of::<GDT>() as u16 - 1; asm!(r#" subq $16, %rsp; movq {}, 8(%rsp); movq ${}, 6(%rsp); lgdt 6(%rsp); addq $16, %rsp; "#, in(reg) base, const LIMIT, options(att_syntax)); } /// Loads the %tr register with a selector referring to a GDT's /// TSS descriptor. /// /// # Safety /// /// Private function that's called from a public function that /// ensures that a valid GDT with a task descriptor in the correct /// position is loaded before this is invoked. unsafe fn ltr(selector: u16) { asm!("ltr {:x};", in(reg) selector); } /// Loads this GDT and sets the task register to refer to its /// TSS descriptor. /// /// # Safety /// /// Must be called on a valid, initialized GDT. pub unsafe fn load(&self) { self.lgdt(); Self::ltr(Self::task_selector()); } }
31.152381
72
0.610517
08de48fcee539e992e5ffb777fcf8cc7b8c0cd1b
40
mustang::can_run_this!(); fn main() {}
10
25
0.625
50f55a1a0c8e97aa0581535b58fc1dd009131e9a
8,251
#![feature(test)] extern crate test; use std::collections::HashMap; use std::iter::Iterator; use std::rc::Rc; mod examples; #[derive(Clone)] pub struct CSP<K,V> where K: std::cmp::Eq, K: std::hash::Hash, V: std::clone::Clone { vars: HashMap<K, DVar<V>>, constrs: Vec<(K, K, Rc<Fn(&V,&V) -> bool>)> } pub struct CSPSolution<K,V> where K: std::cmp::Eq, K: std::hash::Hash, V: std::clone::Clone { problem_stack: Vec<CSP<K,V>>, variables: Vec<K>, nvars: usize, branches: Vec<usize>, done: bool, } #[derive(Clone)] struct DVar<V> where V: std::clone::Clone { options: Vec<V>, } impl<K,V> CSP<K,V> where K: std::cmp::Eq, K: std::clone::Clone, K: std::hash::Hash, V: std::clone::Clone, { fn new() -> CSP<K,V> { CSP { vars: HashMap::new(), constrs: Vec::new() } } fn add_var(&mut self, key: K, options: Vec<V>) { let var = DVar { options: options }; self.vars.insert(key, var); } fn add_constr(&mut self, key1: K, key2: K, constr: Rc<Fn(&V, &V) -> bool>) { self.constrs.push((key1, key2, constr)); } fn reduce(&mut self) -> Option<bool> { let mut reduced = false; let mut did_some = false; let mut first = true; while did_some || first { first = false; did_some = false; for &(ref x, ref y, ref cf) in self.constrs.iter() { let mut goodopts = Vec::new(); { let xvar = self.vars.get(&x).unwrap(); let nopts = xvar.options.len(); for xo in xvar.options.iter() { if self.vars.get(&y).unwrap().options.iter() .any(|y| cf(xo, y)) { goodopts.push(xo.clone()); } } if goodopts.len() == 0 { return None }; if goodopts.len() < nopts { reduced = true; did_some = true }; } self.vars.get_mut(&x).unwrap().options = goodopts; } } Some(reduced) } fn solutions(&mut self) -> CSPSolution<K,V> { let _ = self.reduce(); CSPSolution::new(self) } } impl<K,V> CSPSolution<K,V> where K: std::cmp::Eq, K: std::clone::Clone, K: std::hash::Hash, V: std::clone::Clone, { fn new(csp: &CSP<K,V>) -> CSPSolution<K,V> { let vars: Vec<K> = csp.vars.keys().map(|x| (*x).clone()).collect(); let nvars = vars.len(); let mut stack = Vec::with_capacity(nvars + 1); stack.push(csp.clone()); let mut ret = CSPSolution { problem_stack: stack, variables: vars, nvars: nvars, branches: vec![0; nvars], done: false, }; if !ret.find_consistent(0) { ret.done = true } ret } fn find_consistent(&mut self, start: usize) -> bool { let mut cur = start; loop { if cur >= self.nvars { break } let _ = self.problem_stack.drain((cur + 1)..); let mut csp = self.problem_stack.last().unwrap().clone(); csp.vars.get_mut(&self.variables[cur]).unwrap().restrict(self.branches[cur]); if csp.reduce().is_none() { cur = match self.incr_branches(cur) { None => return false, Some(s) => s, } } else { self.problem_stack.push(csp); cur += 1; } } true } fn incr_branches(&mut self, last: usize) -> Option<usize> { for cur in (last + 1)..self.nvars { self.branches[cur] = 0 } let mut cur = last; while self.branches[cur] + 1 == self.problem_stack[cur].vars.get(&self.variables[cur]).unwrap().options.len() { self.branches[cur] = 0; if cur > 0 { cur -= 1 } else { self.done = true; return None } } self.branches[cur] += 1; Some(cur) } fn incr_consistent(&mut self) -> bool { let last = self.nvars - 1; match self.incr_branches(last) { None => false, Some(s) => self.find_consistent(s), } } fn result(&self) -> HashMap<K,V> { let mut map = HashMap::new(); for (k, v) in self.problem_stack.last().unwrap().vars.iter() { let _ = map.insert((*k).clone(), v.options[0].clone()); } map } } impl<K,V> Iterator for CSPSolution<K,V> where K: std::cmp::Eq, K: std::clone::Clone, K: std::hash::Hash, V: std::clone::Clone, { type Item = HashMap<K,V>; fn next(&mut self) -> Option<Self::Item> { if self.done { return None } let res = Some(self.result()); self.done = !self.incr_consistent(); res } } impl <V> DVar<V> where V: std::clone::Clone { fn restrict(&mut self, which: usize) { let opt = self.options[which].clone(); self.options.clear(); self.options.push(opt); } fn set(&mut self, what: &V) { self.options.clear(); self.options.push(what.clone()); } } #[cfg(test)] mod tests { use super::CSP; use test::Bencher; use super::examples; use std::rc::Rc; #[test] fn simple_reduce_test() { let mut csp = CSP::new(); csp.add_var(1, vec![1,2]); csp.add_var(2, vec![1,2]); csp.add_constr(1, 2, Rc::new(|x,_| *x == 1)); csp.reduce(); let mut nsols = 0; for m in csp.solutions() { nsols += 1; if nsols > 10 { assert!(false) } assert!(m[&1] == 1); assert!(m[&2] == 1 || m[&2] == 2); } assert!(nsols == 2); } #[bench] fn eight_queens(b: &mut Bencher) { let mut csp = examples::n_queens(8); assert!(!csp.reduce().unwrap()); assert!(csp.vars.values().all(|d| d.options.len() == 8)); assert!(!csp.reduce().unwrap()); assert!(csp.vars.values().all(|d| d.options.len() == 8)); b.iter(|| { let mut nsols = 0; for _ in csp.solutions() { nsols += 1; if nsols > 1000 { break } } assert!(nsols == 92); }); } #[bench] fn sudoku_1(b: &mut Bencher) { let mut csp = examples::sudoku(); csp.vars.get_mut(&(2,2)).unwrap().set(&9); csp.vars.get_mut(&(2,3)).unwrap().set(&6); csp.vars.get_mut(&(2,4)).unwrap().set(&8); csp.vars.get_mut(&(2,6)).unwrap().set(&2); csp.vars.get_mut(&(2,7)).unwrap().set(&7); csp.vars.get_mut(&(2,8)).unwrap().set(&4); csp.vars.get_mut(&(3,2)).unwrap().set(&2); csp.vars.get_mut(&(3,8)).unwrap().set(&6); csp.vars.get_mut(&(4,2)).unwrap().set(&3); csp.vars.get_mut(&(4,4)).unwrap().set(&2); csp.vars.get_mut(&(4,6)).unwrap().set(&4); csp.vars.get_mut(&(4,8)).unwrap().set(&5); csp.vars.get_mut(&(6,2)).unwrap().set(&5); csp.vars.get_mut(&(6,4)).unwrap().set(&1); csp.vars.get_mut(&(6,6)).unwrap().set(&9); csp.vars.get_mut(&(6,8)).unwrap().set(&3); csp.vars.get_mut(&(7,2)).unwrap().set(&6); csp.vars.get_mut(&(7,8)).unwrap().set(&8); csp.vars.get_mut(&(8,2)).unwrap().set(&7); csp.vars.get_mut(&(8,3)).unwrap().set(&3); csp.vars.get_mut(&(8,4)).unwrap().set(&6); csp.vars.get_mut(&(8,6)).unwrap().set(&8); csp.vars.get_mut(&(8,7)).unwrap().set(&2); csp.vars.get_mut(&(8,8)).unwrap().set(&9); println!(""); b.iter(|| { assert!(csp.clone().reduce().unwrap()) }); let _ = csp.reduce(); for i in 1..10 { for j in 1..10 { print!("{} ", csp.vars.get(&(i,j)).unwrap().options.len()); } println!(""); } println!(""); println!("{:?}", csp.vars.get(&(18,1)).unwrap().options); assert!(false); b.iter(|| { let mut nsols = 0; for _ in csp.solutions() { nsols += 1; assert!(false) } assert!(nsols == 1); }); } }
29.679856
89
0.481639
08ab7f4bb169d047fdc0fad08a2e98271aa2cf16
2,686
use crc_all::Crc; use nrf52840_hal::twim::{Error, Instance, Twim}; pub struct SensorData { pub co2: f32, pub temperature: f32, pub humidity: f32, } pub const DEFAULT_ADDRESS: u8 = 0x61; pub struct SCD30<T: Instance>(Twim<T>); impl<T> SCD30<T> where T: Instance, { pub fn init(i2c2: Twim<T>) -> Self { SCD30(i2c2) } pub fn get_firmware_version(&mut self) -> Result<[u8; 2], Error> { //This is the command we write to the sensor for getting the firmware version, //The command is made up of 2 bytes //0xd1 is the command for reading the firmware version let command: [u8; 2] = [0xd1, 0x00]; let mut rd_buffer = [0u8; 2]; self.0.write(DEFAULT_ADDRESS, &command)?; self.0.read(DEFAULT_ADDRESS, &mut rd_buffer)?; let major = u8::from_be(rd_buffer[0]); let minor = u8::from_be(rd_buffer[1]); Ok([major, minor]) } pub fn start_continuous_measurement(&mut self, pressure: u16) -> Result<(), Error> { let mut command: [u8; 5] = [0x00, 0x10, 0x00, 0x00, 0x00]; let argument_bytes = &pressure.to_be_bytes(); command[2] = argument_bytes[0]; command[3] = argument_bytes[1]; let mut crc = Crc::<u8>::new(0x31, 8, 0xff, 0x00, false); crc.update(&pressure.to_be_bytes()); command[4] = crc.finish(); self.0.write(DEFAULT_ADDRESS, &command)?; Ok(()) } pub fn data_ready(&mut self) -> Result<bool, Error> { let command: [u8; 2] = [0x02, 0x02]; let mut rd_buffer = [0u8; 3]; self.0.write(DEFAULT_ADDRESS, &command)?; self.0.read(DEFAULT_ADDRESS, &mut rd_buffer)?; Ok(u16::from_be_bytes([rd_buffer[0], rd_buffer[1]]) == 1) } pub fn read_measurement(&mut self) -> Result<SensorData, Error> { let command: [u8; 2] = [0x03, 0x00]; let mut rd_buffer = [0u8; 18]; self.0.write(DEFAULT_ADDRESS, &command)?; self.0.read(DEFAULT_ADDRESS, &mut rd_buffer)?; let data = SensorData { co2: f32::from_bits(u32::from_be_bytes([ rd_buffer[0], rd_buffer[1], rd_buffer[3], rd_buffer[4], ])), temperature: f32::from_bits(u32::from_be_bytes([ rd_buffer[6], rd_buffer[7], rd_buffer[9], rd_buffer[10], ])), humidity: f32::from_bits(u32::from_be_bytes([ rd_buffer[12], rd_buffer[13], rd_buffer[15], rd_buffer[16], ])), }; Ok(data) } }
28.574468
88
0.544676
debfe80e832183e9656136dce35f29e290078e11
11,657
use super::stacker::{Addr, MemoryArena, TermHashMap}; use crate::fieldnorm::FieldNormReaders; use crate::postings::recorder::{BufferLender, NothingRecorder, Recorder, TFAndPositionRecorder, TermFrequencyRecorder, TermScoreRecorder}; use crate::postings::UnorderedTermId; use crate::postings::{FieldSerializer, InvertedIndexSerializer}; use crate::schema::IndexRecordOption; use crate::schema::{Field, FieldEntry, FieldType, Schema, Term}; use crate::termdict::TermOrdinal; use crate::tokenizer::TokenStream; use crate::tokenizer::{Token, MAX_TOKEN_LEN}; use crate::DocId; use fnv::FnvHashMap; use std::collections::HashMap; use std::io; use std::marker::PhantomData; use std::ops::DerefMut; fn posting_from_field_entry(field_entry: &FieldEntry) -> Box<dyn PostingsWriter> { match *field_entry.field_type() { FieldType::Str(ref text_options) => text_options .get_indexing_options() .map(|indexing_options| match indexing_options.index_option() { IndexRecordOption::Basic => { SpecializedPostingsWriter::<NothingRecorder>::new_boxed() } IndexRecordOption::WithFreqs => { SpecializedPostingsWriter::<TermFrequencyRecorder>::new_boxed() } IndexRecordOption::WithFreqsAndPositions => { SpecializedPostingsWriter::<TFAndPositionRecorder>::new_boxed() } IndexRecordOption::WithScore => { SpecializedPostingsWriter::<TermScoreRecorder>::new_boxed() } }) .unwrap_or_else(|| SpecializedPostingsWriter::<NothingRecorder>::new_boxed()), FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) | FieldType::HierarchicalFacet => SpecializedPostingsWriter::<NothingRecorder>::new_boxed(), FieldType::Bytes => { // FieldType::Bytes cannot actually be indexed. // TODO fix during the indexer refactoring described in #276 SpecializedPostingsWriter::<NothingRecorder>::new_boxed() } } } pub struct MultiFieldPostingsWriter { heap: MemoryArena, schema: Schema, term_index: TermHashMap, per_field_postings_writers: Vec<Box<dyn PostingsWriter>>, } fn make_field_partition( term_offsets: &[(&[u8], Addr, UnorderedTermId)], ) -> Vec<(Field, usize, usize)> { let term_offsets_it = term_offsets .iter() .map(|(key, _, _)| Term::wrap(key).field()) .enumerate(); let mut prev_field_opt = None; let mut fields = vec![]; let mut offsets = vec![]; for (offset, field) in term_offsets_it { if Some(field) != prev_field_opt { prev_field_opt = Some(field); fields.push(field); offsets.push(offset); } } offsets.push(term_offsets.len()); let mut field_offsets = vec![]; for i in 0..fields.len() { field_offsets.push((fields[i], offsets[i], offsets[i + 1])); } field_offsets } impl MultiFieldPostingsWriter { /// Create a new `MultiFieldPostingsWriter` given /// a schema and a heap. pub fn new(schema: &Schema, table_bits: usize) -> MultiFieldPostingsWriter { let term_index = TermHashMap::new(table_bits); let per_field_postings_writers: Vec<_> = schema .fields() .map(|(_, field_entry)| posting_from_field_entry(field_entry)) .collect(); MultiFieldPostingsWriter { heap: MemoryArena::new(), schema: schema.clone(), term_index, per_field_postings_writers, } } pub fn mem_usage(&self) -> usize { self.term_index.mem_usage() + self.heap.mem_usage() } pub fn index_text( &mut self, doc: DocId, field: Field, token_stream: &mut dyn TokenStream, ) -> u32 { let postings_writer = self.per_field_postings_writers[field.field_id() as usize].deref_mut(); postings_writer.index_text( &mut self.term_index, doc, field, token_stream, &mut self.heap, ) } pub fn subscribe(&mut self, doc: DocId, term: &Term) -> UnorderedTermId { let postings_writer = self.per_field_postings_writers[term.field().field_id() as usize].deref_mut(); postings_writer.subscribe(&mut self.term_index, doc, 0u32, 0u32, term, &mut self.heap) } /// Serialize the inverted index. /// It pushes all term, one field at a time, towards the /// postings serializer. pub fn serialize( &self, serializer: &mut InvertedIndexSerializer, fieldnorm_readers: FieldNormReaders, ) -> crate::Result<HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>> { let mut term_offsets: Vec<(&[u8], Addr, UnorderedTermId)> = self.term_index.iter().collect(); term_offsets.sort_unstable_by_key(|&(k, _, _)| k); let mut unordered_term_mappings: HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>> = HashMap::new(); let field_offsets = make_field_partition(&term_offsets); for (field, start, stop) in field_offsets { let field_entry = self.schema.get_field_entry(field); match *field_entry.field_type() { FieldType::Str(_) | FieldType::HierarchicalFacet => { // populating the (unordered term ord) -> (ordered term ord) mapping // for the field. let unordered_term_ids = term_offsets[start..stop] .iter() .map(|&(_, _, bucket)| bucket); let mapping: FnvHashMap<UnorderedTermId, TermOrdinal> = unordered_term_ids .enumerate() .map(|(term_ord, unord_term_id)| { (unord_term_id as UnorderedTermId, term_ord as TermOrdinal) }) .collect(); unordered_term_mappings.insert(field, mapping); } FieldType::U64(_) | FieldType::I64(_) | FieldType::F64(_) | FieldType::Date(_) => {} FieldType::Bytes => {} } let postings_writer = &self.per_field_postings_writers[field.field_id() as usize]; let fieldnorm_reader = fieldnorm_readers.get_field(field); let mut field_serializer = serializer.new_field( field, postings_writer.total_num_tokens(), fieldnorm_reader, )?; postings_writer.serialize( &term_offsets[start..stop], &mut field_serializer, &self.term_index.heap, &self.heap, )?; field_serializer.close()?; } Ok(unordered_term_mappings) } } /// The `PostingsWriter` is in charge of receiving documenting /// and building a `Segment` in anonymous memory. /// /// `PostingsWriter` writes in a `MemoryArena`. pub trait PostingsWriter { /// Record that a document contains a term at a given position. /// /// * doc - the document id /// * pos - the term position (expressed in tokens) /// * term - the term /// * heap - heap used to store the postings informations as well as the terms /// in the hashmap. fn subscribe( &mut self, term_index: &mut TermHashMap, doc: DocId, pos: u32, score: u32, term: &Term, heap: &mut MemoryArena, ) -> UnorderedTermId; /// Serializes the postings on disk. /// The actual serialization format is handled by the `PostingsSerializer`. fn serialize( &self, term_addrs: &[(&[u8], Addr, UnorderedTermId)], serializer: &mut FieldSerializer<'_>, term_heap: &MemoryArena, heap: &MemoryArena, ) -> io::Result<()>; /// Tokenize a text and subscribe all of its token. fn index_text( &mut self, term_index: &mut TermHashMap, doc_id: DocId, field: Field, token_stream: &mut dyn TokenStream, heap: &mut MemoryArena, ) -> u32 { let mut term = Term::for_field(field); let mut sink = |token: &Token| { // We skip all tokens with a len greater than u16. if token.text.len() <= MAX_TOKEN_LEN { term.set_text(token.text.as_str()); self.subscribe(term_index, doc_id, token.position as u32, token.score as u32, &term, heap); } else { info!( "A token exceeding MAX_TOKEN_LEN ({}>{}) was dropped. Search for \ MAX_TOKEN_LEN in the documentation for more information.", token.text.len(), MAX_TOKEN_LEN ); } }; token_stream.process(&mut sink) } fn total_num_tokens(&self) -> u64; } /// The `SpecializedPostingsWriter` is just here to remove dynamic /// dispatch to the recorder information. pub(crate) struct SpecializedPostingsWriter<Rec: Recorder + 'static> { total_num_tokens: u64, _recorder_type: PhantomData<Rec>, } impl<Rec: Recorder + 'static> SpecializedPostingsWriter<Rec> { /// constructor pub fn new() -> SpecializedPostingsWriter<Rec> { SpecializedPostingsWriter { total_num_tokens: 0u64, _recorder_type: PhantomData, } } /// Builds a `SpecializedPostingsWriter` storing its data in a heap. pub fn new_boxed() -> Box<dyn PostingsWriter> { Box::new(SpecializedPostingsWriter::<Rec>::new()) } } impl<Rec: Recorder + 'static> PostingsWriter for SpecializedPostingsWriter<Rec> { fn subscribe( &mut self, term_index: &mut TermHashMap, doc: DocId, position: u32, score: u32, term: &Term, heap: &mut MemoryArena, ) -> UnorderedTermId { debug_assert!(term.as_slice().len() >= 4); self.total_num_tokens += 1; term_index.mutate_or_create(term, |opt_recorder: Option<Rec>| { if let Some(mut recorder) = opt_recorder { let current_doc = recorder.current_doc(); if current_doc != doc { recorder.close_doc(heap); recorder.new_doc(doc, heap); } recorder.record_position(position, score, heap); recorder } else { let mut recorder = Rec::new(); recorder.new_doc(doc, heap); recorder.record_position(position, score, heap); recorder } }) as UnorderedTermId } fn serialize( &self, term_addrs: &[(&[u8], Addr, UnorderedTermId)], serializer: &mut FieldSerializer<'_>, termdict_heap: &MemoryArena, heap: &MemoryArena, ) -> io::Result<()> { let mut buffer_lender = BufferLender::default(); for &(term_bytes, addr, _) in term_addrs { let recorder: Rec = termdict_heap.read(addr); let term_doc_freq = recorder.term_doc_freq().unwrap_or(0u32); serializer.new_term(&term_bytes[4..], term_doc_freq)?; recorder.serialize(&mut buffer_lender, serializer, heap)?; serializer.close_term()?; } Ok(()) } fn total_num_tokens(&self) -> u64 { self.total_num_tokens } }
36.201863
138
0.584027
56d6e18428d89863c91eb1a8bf8a7bcc1dd31b29
296
pub use self::debug_marker::DebugMarker; pub use self::debug_report::DebugReport; pub use self::debug_utils::DebugUtils; pub use self::metal_surface::MetalSurface; pub use self::tooling_info::ToolingInfo; mod debug_marker; mod debug_report; mod debug_utils; mod metal_surface; mod tooling_info;
24.666667
42
0.810811
5b1aa399f59c5c74b86d014b40b12ee4bafa00dd
2,724
use super::{BaseRocksSecondaryIndex, Chunk, IndexId, RocksSecondaryIndex, RocksTable, TableId}; use crate::base_rocks_secondary_index; use crate::metastore::{IdRow, MetaStoreEvent}; use crate::rocks_table_impl; use byteorder::{BigEndian, WriteBytesExt}; use rocksdb::DB; use serde::{Deserialize, Deserializer}; use std::io::Cursor; impl Chunk { pub fn new(partition_id: u64, row_count: usize) -> Chunk { Chunk { partition_id, row_count: row_count as u64, uploaded: false, active: false, last_used: None, } } pub fn get_row_count(&self) -> u64 { self.row_count } pub fn get_full_name(&self, chunk_id: u64) -> String { chunk_file_name(chunk_id) } pub fn get_partition_id(&self) -> u64 { self.partition_id } pub fn set_uploaded(&self, uploaded: bool) -> Chunk { Chunk { partition_id: self.partition_id, row_count: self.row_count, uploaded, active: uploaded, last_used: self.last_used.clone(), } } pub fn deactivate(&self) -> Chunk { Chunk { partition_id: self.partition_id, row_count: self.row_count, uploaded: self.uploaded, active: false, last_used: self.last_used.clone(), } } pub fn uploaded(&self) -> bool { self.uploaded } pub fn active(&self) -> bool { self.active } } pub fn chunk_file_name(chunk_id: u64) -> String { format!("{}.chunk.parquet", chunk_id) } #[derive(Clone, Copy, Debug)] pub(crate) enum ChunkRocksIndex { PartitionId = 1, } rocks_table_impl!(Chunk, ChunkRocksTable, TableId::Chunks, { vec![Box::new(ChunkRocksIndex::PartitionId)] }); base_rocks_secondary_index!(Chunk, ChunkRocksIndex); #[derive(Hash, Clone, Debug)] pub enum ChunkIndexKey { ByPartitionId(u64), } impl RocksSecondaryIndex<Chunk, ChunkIndexKey> for ChunkRocksIndex { fn typed_key_by(&self, row: &Chunk) -> ChunkIndexKey { match self { ChunkRocksIndex::PartitionId => ChunkIndexKey::ByPartitionId(row.partition_id), } } fn key_to_bytes(&self, key: &ChunkIndexKey) -> Vec<u8> { match key { ChunkIndexKey::ByPartitionId(partition_id) => { let mut buf = Cursor::new(Vec::new()); buf.write_u64::<BigEndian>(*partition_id).unwrap(); buf.into_inner() } } } fn is_unique(&self) -> bool { match self { ChunkRocksIndex::PartitionId => false, } } fn get_id(&self) -> IndexId { *self as IndexId } }
24.990826
95
0.59141
8feb009b623b4639de32d5af0f57e4eee990af2c
12,042
//! Thread safe scheduler use crate::queue::Notifier; use crate::NotificationId; use crossbeam_queue::SegQueue; use std::cmp::Ordering as CmpOrdering; use std::collections::{BTreeSet, HashSet}; use std::fmt::Debug; use std::hash::{Hash, Hasher}; use std::ops::{Add, Sub}; use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use std::sync::{Arc, RwLock}; use std::thread::JoinHandle; use std::{ fmt, thread, time::{Duration, Instant}, }; /// Schedules notification deliveries #[derive(Debug)] pub struct NotificationScheduler { notifier: Arc<dyn Notifier>, scheduler: Arc<Scheduler>, } impl NotificationScheduler { /// Creates a new scheduler that uses the provided notifier to deliver notifications pub fn new(notifier: Arc<dyn Notifier>, scheduler: Arc<Scheduler>) -> NotificationScheduler { NotificationScheduler { notifier, scheduler, } } /// Schedules recurring notification deliveries with fixed intervals pub fn notify_with_fixed_interval<I: Into<Option<Duration>>>( &self, id: NotificationId, interval: Duration, initial_delay: I, name: Option<String>, ) -> ScheduleEntryId where I: Into<Option<Duration>>, { let notifier = Arc::clone(&self.notifier); let entry = ScheduleEntry::with_interval(interval, initial_delay, name, move || { let _ = notifier.notify(id); }); let id = entry.id; self.scheduler.schedule(entry); id } /// Schedules a one-time notification delivery pub fn notify_once_after_delay( &self, id: NotificationId, delay: Duration, name: Option<String>, ) -> ScheduleEntryId { let notifier = Arc::clone(&self.notifier); let entry = ScheduleEntry::one_time(delay, name, move || { let _ = notifier.notify(id); }); let id = entry.id; self.scheduler.schedule(entry); id } /// Cancels future notification(s) pub fn cancel(&self, id: ScheduleEntryId) { self.scheduler.cancel(id); } } type Callback = dyn Fn() + Send + Sync + 'static; /// Entry associated with callback #[derive(Clone)] pub struct ScheduleEntry { start: Instant, /// The interval with which to run the callback. No interval means only one-time run interval: Option<Duration>, callback: Arc<Callback>, /// The assigned name of the entry for debugging purposes pub name: Option<String>, /// Entry Id pub id: ScheduleEntryId, } impl ScheduleEntry { /// Creates an entry to run the callback repeatedly with a fixed delay pub fn with_interval<I, F>( interval: Duration, initial_delay: I, name: Option<String>, callback: F, ) -> ScheduleEntry where I: Into<Option<Duration>>, F: Fn() + Send + Sync + 'static, { let now = Instant::now(); ScheduleEntry { start: initial_delay.into().map(|d| now.add(d)).unwrap_or(now), interval: Some(interval), callback: Arc::new(callback), name, id: ScheduleEntryId::gen_next(), } } /// Creates an entry to run the callback only once after a given delay pub fn one_time<F>(delay: Duration, name: Option<String>, callback: F) -> ScheduleEntry where F: Fn() + Send + Sync + 'static, { ScheduleEntry { start: Instant::now().add(delay), interval: None, callback: Arc::new(callback), name, id: ScheduleEntryId::gen_next(), } } } impl fmt::Debug for ScheduleEntry { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ScheduleEntry") .field("start", &self.start) .field("interval", &self.interval) .field("name", &self.name) .field("id", &self.id) .finish() } } impl Hash for ScheduleEntry { fn hash<H: Hasher>(&self, hasher: &mut H) { self.start.hash(hasher); self.id.hash(hasher); } } impl Eq for ScheduleEntry {} impl PartialEq for ScheduleEntry { fn eq(&self, other: &Self) -> bool { self.start == other.start && self.id == other.id } } impl Ord for ScheduleEntry { fn cmp(&self, other: &Self) -> CmpOrdering { self.start.cmp(&other.start).then(self.id.cmp(&other.id)) } } impl PartialOrd for ScheduleEntry { fn partial_cmp(&self, other: &Self) -> Option<CmpOrdering> { Some(self.cmp(other)) } } static NEXT_SCHEDULE_ENTRY_ID: AtomicU32 = AtomicU32::new(1); /// Id associated with an entry #[derive(Copy, Clone, Debug, PartialEq, Ord, PartialOrd, Eq, Hash)] pub struct ScheduleEntryId(u32); impl ScheduleEntryId { /// Generates next `ScheduleEntryId`, which is guaranteed to be unique pub fn gen_next() -> ScheduleEntryId { let id = NEXT_SCHEDULE_ENTRY_ID.fetch_add(1, Ordering::SeqCst); ScheduleEntryId(id) } /// Returns id pub fn id(&self) -> u32 { self.0 } } /// Scheduler Status #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum SchedulerStatus { /// Currently executing an entry Active, /// Waiting for new entries to be scheduled Parked, /// Waiting to execute entries in the queue at the scheduled intervals ParkedTimeout, } /// Single-threaded scheduler that prioritizes "cancels" over schedule executions, hence multiple queues #[derive(Debug)] pub struct Scheduler { shutdown: Arc<AtomicBool>, thread_handle: JoinHandle<()>, schedule_queue: Arc<SegQueue<ScheduleEntry>>, cancel_queue: Arc<SegQueue<ScheduleEntryId>>, name: String, status: Arc<RwLock<SchedulerStatus>>, entry_count: Arc<AtomicU32>, } impl Default for Scheduler { fn default() -> Scheduler { Scheduler::new(None) } } // Helps distinguish different scheduler creations static SCHEDULER_THREAD_ID: AtomicU32 = AtomicU32::new(1); impl Scheduler { /// Returns the name of the scheduler pub fn name(&self) -> &str { self.name.as_str() } /// Schedules entry for execution(s) pub fn schedule(&self, entry: ScheduleEntry) { self.schedule_queue.push(entry); self.thread_handle.thread().unpark(); } /// Cancels future execution(s) pub fn cancel(&self, id: ScheduleEntryId) { self.cancel_queue.push(id); self.thread_handle.thread().unpark(); } /// Returns the scheduler's current status pub fn status(&self) -> SchedulerStatus { *(self.status.read().unwrap()) } /// Number of current entries pub fn entry_count(&self) -> u32 { self.entry_count.load(Ordering::SeqCst) } /// Creates a scheduler pub fn new(name: Option<String>) -> Scheduler { let t_id = SCHEDULER_THREAD_ID.fetch_add(1, Ordering::SeqCst); let name_prefix = "mio-misc-scheduler"; let name = name .map(|n| format!("{}-{}-{}", name_prefix, n, t_id)) .unwrap_or_else(|| format!("{}-{}", name_prefix, t_id)); let name_clone = name.clone(); let shut_down = Arc::new(AtomicBool::new(false)); let shutdown_clone = Arc::clone(&shut_down); let entry_count = Arc::new(AtomicU32::new(0)); let entry_count_clone = Arc::clone(&entry_count); let schedule_queue = Arc::new(SegQueue::new()); let schedule_queue_clone = Arc::clone(&schedule_queue); let cancel_queue = Arc::new(SegQueue::new()); let cancel_queue_clone = Arc::clone(&cancel_queue); let status = Arc::new(RwLock::new(SchedulerStatus::Active)); let status_clone = Arc::clone(&status); let thread_handle = thread::Builder::new() .name(name.clone()) .spawn(move || { let mut entries: BTreeSet<ScheduleEntry> = BTreeSet::new(); let mut entries_to_cancel: HashSet<ScheduleEntryId> = HashSet::new(); while !shut_down.load(Ordering::SeqCst) { // cancel requests take precedence while let Some(entry_id) = cancel_queue.pop() { trace!( "{}: cancelling scheduler entry with id {:?};", name, entry_id ); let _ = entries_to_cancel.insert(entry_id); } if let Some(entry) = schedule_queue.pop() { trace!("{}: scheduling entry; {:?};", name, entry); if entries.insert(entry) { entry_count.fetch_add(1, Ordering::SeqCst); } } if let Some(entry) = entries.iter().cloned().next() { let now = Instant::now(); // time to execute a callback ? if now.ge(&entry.start) { entries.remove(&entry); // entry still relevant ? if !entries_to_cancel.contains(&entry.id) { trace!("{}: executing scheduler entry; {:?}", name, entry); let cb = Arc::clone(&entry.callback); cb(); if let Some(interval) = entry.interval { // add back let updated_entry = ScheduleEntry { start: Instant::now().add(interval), interval: entry.interval, callback: entry.callback, name: entry.name, id: entry.id, }; entries.insert(updated_entry); } } else { // not executing and not scheduling a new entry trace!("{}: cancelling scheduler entry; {:?}", name, entry); if entries_to_cancel.remove(&entry.id) { entry_count.fetch_sub(1, Ordering::SeqCst); } } } else { // park until the nearest time when we need to execute a function let timeout_dur = entry.start.sub(now); trace!("{}: parking scheduler for {:?}", name, timeout_dur); *status.write().unwrap() = SchedulerStatus::ParkedTimeout; thread::park_timeout(timeout_dur); *status.write().unwrap() = SchedulerStatus::Active; } } else { // there's no function to execute, so park indefinitely instead of spinning idly trace!("{}: parking scheduler until being un-parked", name); *status.write().unwrap() = SchedulerStatus::Parked; thread::park(); *status.write().unwrap() = SchedulerStatus::Active; } } }) .unwrap(); Scheduler { shutdown: shutdown_clone, thread_handle, schedule_queue: schedule_queue_clone, cancel_queue: cancel_queue_clone, name: name_clone, status: status_clone, entry_count: entry_count_clone, } } } impl Drop for Scheduler { fn drop(&mut self) { self.shutdown.store(true, Ordering::SeqCst); self.thread_handle.thread().unpark(); } }
34.405714
104
0.542933
69f6e41fb69d81bba54b5d746f8d88fda7b7da31
13,357
//! Windows impl of mio-enabled serial ports. use mio::{Evented, Poll, PollOpt, Ready, Token}; use mio_named_pipes::NamedPipe; use serialport::prelude::*; use serialport::windows::COMPort; use std::ffi::OsStr; use std::io::{self, Read, Write}; use std::mem; use std::os::windows::ffi::OsStrExt; use std::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle}; use std::path::Path; use std::ptr; use std::time::Duration; use winapi::um::commapi::SetCommTimeouts; use winapi::um::fileapi::*; use winapi::um::handleapi::INVALID_HANDLE_VALUE; use winapi::um::winbase::{COMMTIMEOUTS, FILE_FLAG_OVERLAPPED}; use winapi::um::winnt::{FILE_ATTRIBUTE_NORMAL, GENERIC_READ, GENERIC_WRITE, HANDLE}; /// Windows serial port pub struct Serial { inner: COMPort, pipe: NamedPipe, } impl Serial { /// Opens a COM port at the specified path pub fn from_path<T: AsRef<Path>>(path: T, settings: &SerialPortSettings) -> io::Result<Self> { let mut name = Vec::<u16>::new(); name.extend(OsStr::new("\\\\.\\").encode_wide()); name.extend(path.as_ref().as_os_str().encode_wide()); name.push(0); let handle = unsafe { CreateFileW( name.as_ptr(), GENERIC_READ | GENERIC_WRITE, 0, ptr::null_mut(), OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, 0 as HANDLE, ) }; if handle != INVALID_HANDLE_VALUE { let handle = unsafe { mem::transmute(handle) }; // Construct NamedPipe and COMPort from Handle let pipe = unsafe { NamedPipe::from_raw_handle(handle) }; let mut serial = unsafe { COMPort::from_raw_handle(handle) }; serial.set_all(settings)?; override_comm_timeouts(handle)?; Ok(Serial { inner: serial, pipe: pipe, }) } else { Err(io::Error::last_os_error()) } } } impl SerialPort for Serial { /// Returns a struct with the current port settings fn settings(&self) -> SerialPortSettings { self.inner.settings() } /// Return the name associated with the serial port, if known. fn name(&self) -> Option<String> { self.inner.name() } /// Returns the current baud rate. /// /// This function returns `None` if the baud rate could not be determined. This may occur if /// the hardware is in an uninitialized state. Setting a baud rate with `set_baud_rate()` /// should initialize the baud rate to a supported value. fn baud_rate(&self) -> ::Result<u32> { self.inner.baud_rate() } /// Returns the character size. /// /// This function returns `None` if the character size could not be determined. This may occur /// if the hardware is in an uninitialized state or is using a non-standard character size. /// Setting a baud rate with `set_char_size()` should initialize the character size to a /// supported value. fn data_bits(&self) -> ::Result<::DataBits> { self.inner.data_bits() } /// Returns the flow control mode. /// /// This function returns `None` if the flow control mode could not be determined. This may /// occur if the hardware is in an uninitialized state or is using an unsupported flow control /// mode. Setting a flow control mode with `set_flow_control()` should initialize the flow /// control mode to a supported value. fn flow_control(&self) -> ::Result<::FlowControl> { self.inner.flow_control() } /// Returns the parity-checking mode. /// /// This function returns `None` if the parity mode could not be determined. This may occur if /// the hardware is in an uninitialized state or is using a non-standard parity mode. Setting /// a parity mode with `set_parity()` should initialize the parity mode to a supported value. fn parity(&self) -> ::Result<::Parity> { self.inner.parity() } /// Returns the number of stop bits. /// /// This function returns `None` if the number of stop bits could not be determined. This may /// occur if the hardware is in an uninitialized state or is using an unsupported stop bit /// configuration. Setting the number of stop bits with `set_stop-bits()` should initialize the /// stop bits to a supported value. fn stop_bits(&self) -> ::Result<::StopBits> { self.inner.stop_bits() } /// Returns the current timeout. fn timeout(&self) -> Duration { Duration::from_secs(0) } // Port settings setters /// Applies all settings for a struct. This isn't guaranteed to involve only /// a single call into the driver, though that may be done on some /// platforms. fn set_all(&mut self, settings: &SerialPortSettings) -> ::Result<()> { self.inner.set_all(settings)?; override_comm_timeouts(self.inner.as_raw_handle())?; Ok(()) } /// Sets the baud rate. /// /// ## Errors /// /// If the implementation does not support the requested baud rate, this function may return an /// `InvalidInput` error. Even if the baud rate is accepted by `set_baud_rate()`, it may not be /// supported by the underlying hardware. fn set_baud_rate(&mut self, baud_rate: u32) -> ::Result<()> { self.inner.set_baud_rate(baud_rate) } /// Sets the character size. fn set_data_bits(&mut self, data_bits: ::DataBits) -> ::Result<()> { self.inner.set_data_bits(data_bits) } /// Sets the flow control mode. fn set_flow_control(&mut self, flow_control: ::FlowControl) -> ::Result<()> { self.inner.set_flow_control(flow_control) } /// Sets the parity-checking mode. fn set_parity(&mut self, parity: ::Parity) -> ::Result<()> { self.inner.set_parity(parity) } /// Sets the number of stop bits. fn set_stop_bits(&mut self, stop_bits: ::StopBits) -> ::Result<()> { self.inner.set_stop_bits(stop_bits) } /// Sets the timeout for future I/O operations. This parameter is ignored but /// required for trait completeness. fn set_timeout(&mut self, _: Duration) -> ::Result<()> { Ok(()) } // Functions for setting non-data control signal pins /// Sets the state of the RTS (Request To Send) control signal. /// /// Setting a value of `true` asserts the RTS control signal. `false` clears the signal. /// /// ## Errors /// /// This function returns an error if the RTS control signal could not be set to the desired /// state on the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn write_request_to_send(&mut self, level: bool) -> ::Result<()> { self.inner.write_request_to_send(level) } /// Writes to the Data Terminal Ready pin /// /// Setting a value of `true` asserts the DTR control signal. `false` clears the signal. /// /// ## Errors /// /// This function returns an error if the DTR control signal could not be set to the desired /// state on the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn write_data_terminal_ready(&mut self, level: bool) -> ::Result<()> { self.inner.write_data_terminal_ready(level) } // Functions for reading additional pins /// Reads the state of the CTS (Clear To Send) control signal. /// /// This function returns a boolean that indicates whether the CTS control signal is asserted. /// /// ## Errors /// /// This function returns an error if the state of the CTS control signal could not be read /// from the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn read_clear_to_send(&mut self) -> ::Result<bool> { self.inner.read_clear_to_send() } /// Reads the state of the Data Set Ready control signal. /// /// This function returns a boolean that indicates whether the DSR control signal is asserted. /// /// ## Errors /// /// This function returns an error if the state of the DSR control signal could not be read /// from the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn read_data_set_ready(&mut self) -> ::Result<bool> { self.inner.read_data_set_ready() } /// Reads the state of the Ring Indicator control signal. /// /// This function returns a boolean that indicates whether the RI control signal is asserted. /// /// ## Errors /// /// This function returns an error if the state of the RI control signal could not be read from /// the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn read_ring_indicator(&mut self) -> ::Result<bool> { self.inner.read_ring_indicator() } /// Reads the state of the Carrier Detect control signal. /// /// This function returns a boolean that indicates whether the CD control signal is asserted. /// /// ## Errors /// /// This function returns an error if the state of the CD control signal could not be read from /// the underlying hardware: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn read_carrier_detect(&mut self) -> ::Result<bool> { self.inner.read_carrier_detect() } /// Gets the number of bytes available to be read from the input buffer. /// /// # Errors /// /// This function may return the following errors: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn bytes_to_read(&self) -> ::Result<u32> { self.inner.bytes_to_read() } /// Get the number of bytes written to the output buffer, awaiting transmission. /// /// # Errors /// /// This function may return the following errors: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn bytes_to_write(&self) -> ::Result<u32> { self.inner.bytes_to_write() } /// Discards all bytes from the serial driver's input buffer and/or output buffer. /// /// # Errors /// /// This function may return the following errors: /// /// * `NoDevice` if the device was disconnected. /// * `Io` for any other type of I/O error. fn clear(&self, buffer_to_clear: ClearBuffer) -> ::Result<()> { self.inner.clear(buffer_to_clear) } // Misc methods /// Attempts to clone the `SerialPort`. This allow you to write and read simultaneously from the /// same serial connection. Please note that if you want a real asynchronous serial port you /// should look at [mio-serial](https://crates.io/crates/mio-serial) or /// [tokio-serial](https://crates.io/crates/tokio-serial). /// /// Also, you must be very carefull when changing the settings of a cloned `SerialPort` : since /// the settings are cached on a per object basis, trying to modify them from two different /// objects can cause some nasty behavior. /// /// # Errors /// /// This function returns an error if the serial port couldn't be cloned. fn try_clone(&self) -> ::Result<Box<SerialPort>> { self.inner.try_clone() } } impl Read for Serial { fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> { self.pipe.read(bytes) } } impl Write for Serial { fn write(&mut self, bytes: &[u8]) -> io::Result<usize> { self.pipe.write(bytes) } fn flush(&mut self) -> io::Result<()> { self.pipe.flush() } } impl Evented for Serial { fn register( &self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt, ) -> io::Result<()> { self.pipe.register(poll, token, interest, opts) } fn reregister( &self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt, ) -> io::Result<()> { self.pipe.reregister(poll, token, interest, opts) } fn deregister(&self, poll: &Poll) -> io::Result<()> { self.pipe.deregister(poll) } } /// Overrides timeout value set by serialport-rs so that the read end will /// never wake up with 0-byte payload. fn override_comm_timeouts(handle: RawHandle) -> io::Result<()> { let mut timeouts = COMMTIMEOUTS { // wait at most 1ms between two bytes (0 means no timeout) ReadIntervalTimeout: 1, // disable "total" timeout to wait at least 1 byte forever ReadTotalTimeoutMultiplier: 0, ReadTotalTimeoutConstant: 0, // write timeouts are just copied from serialport-rs WriteTotalTimeoutMultiplier: 0, WriteTotalTimeoutConstant: 0, }; let r = unsafe { SetCommTimeouts(handle, &mut timeouts) }; if r == 0 { return Err(io::Error::last_os_error()); } Ok(()) }
34.336761
100
0.620798
eb6d50df25d281e3565c32bd0612d9199750a971
16,722
use std::cmp; use std::io::prelude::*; use std::io; use std::sync::mpsc::channel; use std::thread; use std::time::Duration; use tempdir::TempDir; use {TryRead, TryWrite}; use mio::{Token, Ready, PollOpt, Poll, Events}; use iovec::IoVec; use mio_uds_windows::{net, UnixListener, UnixStream}; #[test] fn accept() { struct H { hit: bool, listener: UnixListener, shutdown: bool } let dir = TempDir::new("uds").unwrap(); let l = UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { net::UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); }); let poll = Poll::new().unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { hit: false, listener: l, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { h.hit = true; assert_eq!(event.token(), Token(1)); assert!(event.readiness().is_readable()); assert!(h.listener.accept().is_ok()); h.shutdown = true; } } assert!(h.hit); assert!(h.listener.accept().unwrap().is_none()); t.join().unwrap(); } #[test] fn connect() { struct H { hit: u32, shutdown: bool } let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap(); let (tx, rx) = channel(); let (tx2, rx2) = channel(); let t = thread::spawn(move || { let s = l.accept().unwrap(); rx.recv().unwrap(); drop(s); tx2.send(()).unwrap(); }); let poll = Poll::new().unwrap(); let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { hit: 0, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); match h.hit { 0 => assert!(event.readiness().is_writable()), 1 => assert!(event.readiness().is_readable()), _ => panic!(), } h.hit += 1; h.shutdown = true; } } assert_eq!(h.hit, 1); tx.send(()).unwrap(); rx2.recv().unwrap(); h.shutdown = false; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); match h.hit { 0 => assert!(event.readiness().is_writable()), 1 => assert!(event.readiness().is_readable()), _ => panic!(), } h.hit += 1; h.shutdown = true; } } assert_eq!(h.hit, 2); t.join().unwrap(); } #[test] fn read() { const N: usize = 16 * 1024 * 1024; struct H { amt: usize, socket: UnixStream, shutdown: bool } let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let b = [0; 1024]; let mut amt = 0; while amt < N { amt += s.write(&b).unwrap(); } }); let poll = Poll::new().unwrap(); let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { amt: 0, socket: s, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); let mut b = [0; 1024]; loop { if let Some(amt) = h.socket.try_read(&mut b).unwrap() { h.amt += amt; } else { break } if h.amt >= N { h.shutdown = true; break } } } } t.join().unwrap(); } #[test] fn read_bufs() { const N: usize = 16 * 1024 * 1024; let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let b = [1; 1024]; let mut amt = 0; while amt < N { amt += s.write(&b).unwrap(); } }); let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(128); let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::readable(), PollOpt::level()).unwrap(); let b1 = &mut [0; 10][..]; let b2 = &mut [0; 383][..]; let b3 = &mut [0; 28][..]; let b4 = &mut [0; 8][..]; let b5 = &mut [0; 128][..]; let mut b: [&mut IoVec; 5] = [ b1.into(), b2.into(), b3.into(), b4.into(), b5.into(), ]; let mut so_far = 0; loop { for buf in b.iter_mut() { for byte in buf.as_mut_bytes() { *byte = 0; } } poll.poll(&mut events, None).unwrap(); match s.read_bufs(&mut b) { Ok(0) => { assert_eq!(so_far, N); break } Ok(mut n) => { so_far += n; for buf in b.iter() { let buf = buf.as_bytes(); for byte in buf[..cmp::min(n, buf.len())].iter() { assert_eq!(*byte, 1); } n = n.saturating_sub(buf.len()); if n == 0 { break } } assert_eq!(n, 0); } Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), } } t.join().unwrap(); } #[test] fn write() { const N: usize = 16 * 1024 * 1024; struct H { amt: usize, socket: UnixStream, shutdown: bool } let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < N { amt += s.read(&mut b).unwrap(); } }); let poll = Poll::new().unwrap(); let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { amt: 0, socket: s, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { assert_eq!(event.token(), Token(1)); let b = [0; 1024]; loop { if let Some(amt) = h.socket.try_write(&b).unwrap() { h.amt += amt; } else { break } if h.amt >= N { h.shutdown = true; break } } } } t.join().unwrap(); } #[test] fn write_bufs() { const N: usize = 16 * 1024 * 1024; let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < N { for byte in b.iter_mut() { *byte = 0; } let n = s.read(&mut b).unwrap(); amt += n; for byte in b[..n].iter() { assert_eq!(*byte, 1); } } }); let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(128); let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); let b1 = &[1; 10][..]; let b2 = &[1; 383][..]; let b3 = &[1; 28][..]; let b4 = &[1; 8][..]; let b5 = &[1; 128][..]; let b: [&IoVec; 5] = [ b1.into(), b2.into(), b3.into(), b4.into(), b5.into(), ]; let mut so_far = 0; while so_far < N { poll.poll(&mut events, None).unwrap(); match s.write_bufs(&b) { Ok(n) => so_far += n, Err(e) => assert_eq!(e.kind(), io::ErrorKind::WouldBlock), } } t.join().unwrap(); } #[test] fn connect_then_close() { struct H { listener: UnixListener, shutdown: bool } let dir = TempDir::new("uds").unwrap(); let poll = Poll::new().unwrap(); let l = UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); poll.register(&s, Token(2), Ready::readable(), PollOpt::edge()).unwrap(); let mut events = Events::with_capacity(128); let mut h = H { listener: l, shutdown: false }; while !h.shutdown { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(1) { let s = h.listener.accept().unwrap().unwrap().0; poll.register(&s, Token(3), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); drop(s); } else if event.token() == Token(2) { h.shutdown = true; } } } } #[test] fn listen_then_close() { let poll = Poll::new().unwrap(); let dir = TempDir::new("uds").unwrap(); let l = UnixListener::bind(dir.path().join("foo")).unwrap(); poll.register(&l, Token(1), Ready::readable(), PollOpt::edge()).unwrap(); drop(l); let mut events = Events::with_capacity(128); poll.poll(&mut events, Some(Duration::from_millis(100))).unwrap(); for event in &events { if event.token() == Token(1) { panic!("recieved ready() on a closed UnixListener") } } } fn assert_send<T: Send>() { } fn assert_sync<T: Sync>() { } #[test] fn test_uds_sockets_are_send() { assert_send::<UnixListener>(); assert_send::<UnixStream>(); assert_sync::<UnixListener>(); assert_sync::<UnixStream>(); } #[test] fn bind_twice_bad() { let dir = TempDir::new("uds").unwrap(); let l1 = UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l1.local_addr().unwrap();; assert!(UnixListener::bind(&addr.as_pathname().unwrap()).is_err()); } #[test] fn multiple_writes_immediate_success() { const N: usize = 16; let dir = TempDir::new("uds").unwrap(); let l = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; let t = thread::spawn(move || { let mut s = l.accept().unwrap().0; let mut b = [0; 1024]; let mut amt = 0; while amt < 1024*N { for byte in b.iter_mut() { *byte = 0; } let n = s.read(&mut b).unwrap(); amt += n; for byte in b[..n].iter() { assert_eq!(*byte, 1); } } }); let poll = Poll::new().unwrap(); let mut s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(1), Ready::writable(), PollOpt::level()).unwrap(); let mut events = Events::with_capacity(16); // Wait for our UDS stream to connect 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in events.iter() { if event.token() == Token(1) && event.readiness().is_writable() { break 'outer } } } for _ in 0..N { s.write(&[1; 1024]).unwrap(); } t.join().unwrap(); } #[test] fn connection_reset_by_peer() { let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(16); let mut buf = [0u8; 16]; let dir = TempDir::new("uds").unwrap(); // Create listener let l = UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = l.local_addr().unwrap();; // Connect client let client = net::UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); // Convert to Mio stream let client = UnixStream::from_stream(client).unwrap(); // Register server poll.register(&l, Token(0), Ready::readable(), PollOpt::edge()).unwrap(); // Register interest in the client poll.register(&client, Token(1), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); // Wait for listener to be ready let mut server; 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(0) { match l.accept() { Ok(Some((sock, _))) => { server = sock; break 'outer; } Ok(None) => {} Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} Err(e) => panic!("unexpected error {:?}", e), } } } } // Close the connection drop(client); // Wait a moment thread::sleep(Duration::from_millis(100)); // Register interest in the server socket poll.register(&server, Token(3), Ready::readable(), PollOpt::edge()).unwrap(); loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(3) { assert!(event.readiness().is_readable()); match server.read(&mut buf) { Ok(0) | Err(_) => {}, Ok(x) => panic!("expected empty buffer but read {} bytes", x), } return; } } } } #[test] fn connect_error() { let poll = Poll::new().unwrap(); let dir = TempDir::new("uds").unwrap(); // This test is structured differently from the test // 'test_tcp::connect_error' in the mio codebase because // UnixStream::connect() seems to behave differently from // TcpStream::connect() in this case. Specifically, an error // with kind == io::ErrorKind::ConnectionRefused is returned // from poll.register() rather than poll.poll(). Is that ok? let l = UnixStream::connect(&dir.path().join("foo")).unwrap(); let e = poll.register(&l, Token(0), Ready::writable(), PollOpt::edge()); assert!(e.is_err()); assert_eq!(e.err().unwrap().kind(), io::ErrorKind::ConnectionRefused); } #[test] fn write_error() { let poll = Poll::new().unwrap(); let mut events = Events::with_capacity(16); let (tx, rx) = channel(); let dir = TempDir::new("uds").unwrap(); let listener = net::UnixListener::bind(dir.path().join("foo")).unwrap(); let addr = listener.local_addr().unwrap();; let t = thread::spawn(move || { let (conn, _addr) = listener.accept().unwrap(); rx.recv().unwrap(); drop(conn); }); let mut s = UnixStream::connect(&addr.as_pathname().unwrap()).unwrap(); poll.register(&s, Token(0), Ready::readable() | Ready::writable(), PollOpt::edge()).unwrap(); let mut wait_writable = || { 'outer: loop { poll.poll(&mut events, None).unwrap(); for event in &events { if event.token() == Token(0) && event.readiness().is_writable() { break 'outer } } } }; wait_writable(); tx.send(()).unwrap(); t.join().unwrap(); let buf = [0; 1024]; loop { match s.write(&buf) { Ok(_) => {} Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { wait_writable() } Err(e) => { println!("good error: {}", e); break } } } }
27.963211
102
0.494977
08c37a2adf1c09721d127e21915a7d8b187cf794
4,206
use super::header::BlockHeader; use crate::{ consensus::difficulty::{canonical_difficulty, BlockDifficultyBombData}, models::{switch_is_active, BlockNumber, ChainSpec, SealVerificationParams, EMPTY_LIST_HASH}, }; pub fn verify_link_by_parent_hash(child: &BlockHeader, parent: &BlockHeader) -> bool { let given_parent_hash = child.parent_hash(); let expected_parent_hash = parent.hash(); given_parent_hash == expected_parent_hash } pub fn verify_link_block_nums(child: &BlockHeader, parent: &BlockHeader) -> bool { let given_block_num = child.number().0; let expected_block_num = parent.number().0 + 1; given_block_num == expected_block_num } pub fn verify_link_timestamps(child: &BlockHeader, parent: &BlockHeader) -> bool { let parent_timestamp = parent.timestamp(); let child_timestamp = child.timestamp(); parent_timestamp < child_timestamp } pub fn verify_link_difficulties( child: &BlockHeader, parent: &BlockHeader, chain_spec: &ChainSpec, ) -> bool { let (&byzantium_formula, &homestead_formula, difficulty_bomb) = match &chain_spec.consensus.seal_verification { SealVerificationParams::Ethash { byzantium_formula, homestead_formula, difficulty_bomb, .. } => (byzantium_formula, homestead_formula, difficulty_bomb), _ => { panic!("unsupported consensus engine"); } }; let given_child_difficulty = child.difficulty(); let expected_child_difficulty = canonical_difficulty( child.number(), child.timestamp(), parent.difficulty(), parent.timestamp(), parent.ommers_hash() != EMPTY_LIST_HASH, switch_is_active(byzantium_formula, child.number()), switch_is_active(homestead_formula, child.number()), difficulty_bomb .as_ref() .map(|bomb| BlockDifficultyBombData { delay_to: bomb.get_delay_to(child.number()), }), ); given_child_difficulty == expected_child_difficulty } pub fn verify_link_pow(_child: &BlockHeader, _parent: &BlockHeader) -> bool { // TODO: verify_link_pow true } fn enumerate_sequential_pairs( headers: &[BlockHeader], ) -> impl Iterator<Item = (&BlockHeader, &BlockHeader)> { let prev_it = headers.iter(); let next_it = headers.iter().skip(1); prev_it.zip(next_it) } /// Verify that all blocks in the slice are linked by the parent_hash field. pub fn verify_slice_is_linked_by_parent_hash(headers: &[BlockHeader]) -> bool { enumerate_sequential_pairs(headers) .all(|(parent, child)| verify_link_by_parent_hash(child, parent)) } /// Verify that block numbers start from the expected /// slice.start_block_num and increase sequentially. pub fn verify_slice_block_nums(headers: &[BlockHeader], start_block_num: BlockNumber) -> bool { if headers.is_empty() { return true; } for (parent, child) in enumerate_sequential_pairs(headers) { if !verify_link_block_nums(child, parent) { return false; } } // verify the first block number let first = &headers[0]; let first_block_num = first.number(); first_block_num == start_block_num } /// Verify that timestamps are in the past and increase monotonically. pub fn verify_slice_timestamps(headers: &[BlockHeader], max_timestamp: u64) -> bool { if headers.is_empty() { return true; } for (parent, child) in enumerate_sequential_pairs(headers) { if !verify_link_timestamps(child, parent) { return false; } } let last = headers.last().unwrap(); let last_timestamp = last.timestamp(); last_timestamp < max_timestamp } /// Verify that difficulty field is calculated properly. pub fn verify_slice_difficulties(headers: &[BlockHeader], chain_spec: &ChainSpec) -> bool { enumerate_sequential_pairs(headers) .all(|(parent, child)| verify_link_difficulties(child, parent, chain_spec)) } /// Verify the headers proof-of-work. pub fn verify_slice_pow(_headers: &[BlockHeader]) -> bool { // TODO: verify_slice_pow true }
33.11811
96
0.678079
21e63005267b99620cbc0a46556b1da4360e9e67
298
use std::cmp::PartialEq; pub const WORLD_WIDTH: f64 = 800.0; pub const WORLD_HEIGHT: f64 = 600.0; #[derive(Debug, PartialEq)] pub struct Position { pub x: f64, pub y: f64, } pub trait Positionable { fn set_position(&mut self, x: f64, y: f64); fn position(&self) -> &Position; }
17.529412
47
0.647651
e24af3c07ff0ffb21597955646ae61ece302e29a
1,733
use clap::{App, Arg}; #[derive(Debug)] struct AppArgs { help: bool, number: u32, opt_number: Option<u32>, width: u32, input: Vec<std::path::PathBuf>, } fn is_width(s: &str) -> Result<(), String> { let w: u32 = s.parse().map_err(|_| "not a number")?; if w != 0 { Ok(()) } else { Err("width must be positive".to_string()) } } fn main() { let matches = App::new("App") .arg( Arg::new("number") .long("number") .required(true) .help("Sets a number") .takes_value(true), ) .arg( Arg::new("opt-number") .long("opt-number") .help("Sets an optional number") .takes_value(true), ) .arg( Arg::new("width") .long("width") .default_value("10") .validator(is_width) .help("Sets width") .takes_value(true), ) .arg( Arg::new("INPUT") .takes_value(true) .multiple_values(true) .allow_invalid_utf8(true), ) .get_matches(); let args = AppArgs { help: matches.is_present("help"), number: matches.value_of_t("number").unwrap(), opt_number: matches.value_of_t("opt-number").ok(), width: matches.value_of_t("width").unwrap(), input: matches .values_of_os("INPUT") .unwrap() .map(|s| s.into()) .collect(), }; if 10 < args.input.len() { println!("{:#?}", args.input.len()); } else { println!("{:#?}", args); } }
24.757143
58
0.446047
d731877aa1b8b3ef0016277ce8e10b4561bde21d
4,050
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::IOStream; use crate::TlsAuthenticationMode; use crate::TlsCertificate; use crate::TlsConnection; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; use std::ptr; glib::wrapper! { pub struct TlsServerConnection(Interface<ffi::GTlsServerConnection, ffi::GTlsServerConnectionInterface>) @requires TlsConnection, IOStream; match fn { get_type => || ffi::g_tls_server_connection_get_type(), } } impl TlsServerConnection { pub fn new<P: IsA<IOStream>, Q: IsA<TlsCertificate>>( base_io_stream: &P, certificate: Option<&Q>, ) -> Result<TlsServerConnection, glib::Error> { unsafe { let mut error = ptr::null_mut(); let ret = ffi::g_tls_server_connection_new( base_io_stream.as_ref().to_glib_none().0, certificate.map(|p| p.as_ref()).to_glib_none().0, &mut error, ); if error.is_null() { Ok(from_glib_full(ret)) } else { Err(from_glib_full(error)) } } } } pub const NONE_TLS_SERVER_CONNECTION: Option<&TlsServerConnection> = None; pub trait TlsServerConnectionExt: 'static { fn get_property_authentication_mode(&self) -> TlsAuthenticationMode; fn set_property_authentication_mode(&self, authentication_mode: TlsAuthenticationMode); fn connect_property_authentication_mode_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId; } impl<O: IsA<TlsServerConnection>> TlsServerConnectionExt for O { fn get_property_authentication_mode(&self) -> TlsAuthenticationMode { unsafe { let mut value = glib::Value::from_type(<TlsAuthenticationMode as StaticType>::static_type()); glib::gobject_ffi::g_object_get_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"authentication-mode\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `authentication-mode` getter") .unwrap() } } fn set_property_authentication_mode(&self, authentication_mode: TlsAuthenticationMode) { unsafe { glib::gobject_ffi::g_object_set_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"authentication-mode\0".as_ptr() as *const _, glib::Value::from(&authentication_mode).to_glib_none().0, ); } } fn connect_property_authentication_mode_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_authentication_mode_trampoline<P, F: Fn(&P) + 'static>( this: *mut ffi::GTlsServerConnection, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) where P: IsA<TlsServerConnection>, { let f: &F = &*(f as *const F); f(&TlsServerConnection::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::authentication-mode\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_authentication_mode_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for TlsServerConnection { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("TlsServerConnection") } }
33.196721
143
0.594568
389feefbc744e496ba4356ae5317d4b8a927535d
7,033
use log::{debug, info}; use rand::prelude::*; use rhai::Scope; use serde_json::Value; use state::HostState; use std::{collections::HashMap, fmt}; pub use rhai::serde::*; pub use rhai::{ Array, Dynamic, Engine, EvalAltResult, ImmutableString, Map, Position, RegisterFn, RegisterResultFn, }; use serde::{Deserialize, Serialize}; mod functions; pub mod state; pub mod scheduler; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)] pub struct ScriptIdentifier { pub id: u128, pub name: String, } impl Into<ScriptIdentifier> for &str { fn into(self) -> ScriptIdentifier { let mut rng = rand::thread_rng(); ScriptIdentifier { id: rng.gen(), name: self.to_string(), } } } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Default)] pub struct Script { pub identifier: ScriptIdentifier, pub body: String, } impl Script { pub fn new(script_body: &str) -> Script { return Script { identifier: "Unnamed Script".into(), body: script_body.to_string(), }; } pub fn with_name<T: Into<ScriptIdentifier>>(id: T, script_body: &str) -> Script { return Script { identifier: id.into(), body: script_body.to_string(), }; } } /// Wrapper object for the underlying scripting engine. pub struct ScriptEngine<'a> { pub engine: Engine, pub scope: Scope<'a>, } #[derive(PartialEq, Clone, Serialize, Deserialize, Default)] pub struct ScriptEngineError { file: String, line: usize, error_text: String, } // Implement std::fmt::Display for AppError impl fmt::Display for ScriptEngineError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "An Error Occurred, Please Try Again!") // user-facing output } } // Implement std::fmt::Debug for AppError impl fmt::Debug for ScriptEngineError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{{ file: {}, line: {} error: {} }}", self.file, self.line, self.error_text ) // programmer-facing output } } #[derive(Debug, Clone)] pub struct ScriptResult { is_error: bool, pub underlying: Value, } impl ScriptResult { pub fn to<T>(&self) -> Result<T, ScriptEngineError> where for<'de> T: Deserialize<'de>, { let bare_str = self.underlying.as_str().unwrap_or_default(); serde_json::from_value::<T>(self.underlying.clone()).map_err(|_err| ScriptEngineError { error_text: format!("Could not deserialize {} into struct", bare_str).into(), ..Default::default() }) } } pub trait ScriptEnginePlugin { fn init(&self, host: &mut ScriptEngine); } impl Default for ScriptEngine<'_> { fn default() -> Self { ScriptEngine { engine: Engine::new(), scope: Scope::new(), } } } impl<'a> ScriptEngine<'a> { pub fn with_default_plugins() -> ScriptEngine<'a> { let mut host = ScriptEngine::default(); host.add_plugin(HostState::new()) .add_plugin(functions::rand::Plugin); host } pub fn with_engine<T>(&mut self, func: T) where for<'fo> T: FnOnce(&'fo mut Engine), { func(&mut self.engine); } pub fn add_plugin<S: ScriptEnginePlugin>(&mut self, plugin: S) -> &mut ScriptEngine<'a> { plugin.init(self); self } pub fn execute(&mut self, script: Script) -> Result<ScriptResult, ScriptEngineError> { debug!("Start running script in Script Host:\n {:?}", script); let mut scope = self.scope.clone(); let result = self .engine .eval_with_scope::<Dynamic>(&mut scope, &script.body) .map_err(|err| ScriptEngineError { error_text: err.to_string(), line: err.position().position().unwrap_or_default(), file: script.identifier.name.clone(), }); // update the scope after running the script self.scope = scope; info!( "Done running script {} in Script Host", script.identifier.name ); match result { Ok(r) => { let bare_str = r.as_str().unwrap_or_default(); let val: Value = if r.is::<String>() { if let Ok(obj) = serde_json::from_str(bare_str) { obj } else { // support for returned raw strings serde_json::from_str(&format!("\"{}\"", bare_str)).unwrap() } } else { from_dynamic::<Value>(&r).map_err(|_err| ScriptEngineError { error_text: format!("Could not deserialize {} into struct", bare_str) .into(), ..Default::default() })? }; Ok(ScriptResult { is_error: false, underlying: serde_json::to_value(val).unwrap(), }) } Err(err) => { println!("Error running script {:?}", err); Ok(ScriptResult { is_error: true, underlying: serde_json::to_value(err.to_string()).unwrap(), }) } } } } #[cfg(test)] mod tests { use crate::*; use std::sync::Once; static START: Once = Once::new(); fn init() { START.call_once(|| { // run initialization here std::env::set_var("RUST_LOG", "trace"); pretty_env_logger::init(); }); } fn test() -> Result<Dynamic, Box<EvalAltResult>> { to_dynamic(User { id: 1 }) } #[derive(Serialize, Deserialize, PartialEq, Clone, Copy, Debug)] struct User { id: u32, } impl From<ScriptResult> for User { fn from(sr: ScriptResult) -> Self { sr.to().unwrap() } } #[tokio::test(threaded_scheduler)] async fn script_host() { init(); let mut sh = ScriptEngine::with_default_plugins(); let script = r#" print ("Hello" + " World"); "test" "#; let result = sh .execute(Script::with_name("test script", &script)) .unwrap(); assert_eq!("test", result.to::<String>().unwrap_or_default()); } #[tokio::test(threaded_scheduler)] async fn script_host_adv() { init(); let mut sh = ScriptEngine::with_default_plugins(); let script = r#" print ("Advanced Test"); #{ "id": 42 } "#; let result = sh .execute(Script::with_name("test script", &script)) .unwrap(); assert_eq!( User { id: 42 }, result.to::<User>().expect("to convert to a user") ); } }
26.640152
95
0.534338
f8ffa91e2e2b00fd4dbffcdbdb0b26c667fef5a6
2,062
#[macro_use] extern crate gfx; extern crate gfx_phase; use std::marker::PhantomData; gfx_parameters!( Params { u_Transform@ transform: [[f32; 4]; 4], u_Color@ color: [f32; 4], }); pub struct Material<R: gfx::Resources> { pub something: PhantomData<R>, //tex_color: gfx::handle::Texture<R>, //tex_blade: gfx::handle::Texture<R>, } impl<R: gfx::Resources> gfx_phase::Material for Material<R> {} pub trait ViewInfo: gfx_phase::ToDepth { fn get_transform(&self) -> [[f32; 4]; 4]; } pub struct Technique<R: gfx::Resources> { program: gfx::handle::Program<R>, state: gfx::DrawState, } impl<R: gfx::Resources> Technique<R> { pub fn new<F: gfx::Factory<R>>(factory: &mut F) -> Technique<R> { use gfx::traits::FactoryExt; Technique { program: factory.link_program( include_bytes!("../gpu/shader.glslv"), include_bytes!("../gpu/shader.glslf"), ).unwrap(), state: gfx::DrawState::new().depth(gfx::state::Comparison::LessEqual, true), } } } impl< R: gfx::Resources, V: ViewInfo, > gfx_phase::Technique<R, Material<R>, V> for Technique<R> { type Kernel = (); type Params = Params<R>; fn test(&self, _: &gfx::Mesh<R>, _: &Material<R>) -> Option<()> { Some(()) } fn compile<'a>(&'a self, kernel: (), space: &V) -> gfx_phase::TechResult<'a, R, Params<R>> { ( &self.program, Params { transform: space.get_transform(), color: [0.0; 4], _r: PhantomData, }, &self.state, None, ) } fn fix_params(&self, _mat: &Material<R>, _space: &V, _params: &mut Params<R>) { } } pub type Phase<R, V> = gfx_phase::CachedPhase<R, Material<R>, V, Technique<R>>; pub fn create< R: gfx::Resources, F: gfx::Factory<R>, V: ViewInfo, >(factory: &mut F) -> Phase<R, V> { gfx_phase::Phase::new("Fur", Technique::new(factory)) .with_cache() }
25.45679
88
0.550436
4b8f70a4973c4c6c9ca99d0ef76d2e2b457ab43d
1,979
use crate::build_solidity; use ethabi::Token; #[test] fn builtins() { let mut vm = build_solidity( r#" contract timestamp { function mr_now() public returns (uint64) { return block.timestamp; } function mr_slot() public returns (uint64) { return block.slot; } function mr_blocknumber() public returns (uint64) { return block.number; } function msg_data(uint32 x) public returns (bytes) { return msg.data; } function sig() public returns (bytes4) { return msg.sig; } }"#, ); vm.constructor("timestamp", &[], 0); let returns = vm.function("mr_now", &[], &[], 0, None); assert_eq!( returns, vec![Token::Uint(ethereum_types::U256::from(1620656423))] ); let returns = vm.function("mr_slot", &[], &[], 0, None); assert_eq!( returns, vec![Token::Uint(ethereum_types::U256::from(70818331))] ); let returns = vm.function("mr_blocknumber", &[], &[], 0, None); assert_eq!( returns, vec![Token::Uint(ethereum_types::U256::from(70818331))] ); let returns = vm.function( "msg_data", &[Token::Uint(ethereum_types::U256::from(0xdeadcafeu32))], &[], 0, None, ); if let Token::Bytes(v) = &returns[0] { println!("{}", hex::encode(v)); } assert_eq!( returns, vec![Token::Bytes( hex::decode("84da38e000000000000000000000000000000000000000000000000000000000deadcafe") .unwrap() )] ); let returns = vm.function("sig", &[], &[], 0, None); if let Token::FixedBytes(v) = &returns[0] { println!("{}", hex::encode(v)); } assert_eq!( returns, vec![Token::FixedBytes(hex::decode("00a7029b").unwrap())] ); }
24.432099
99
0.511369
eba5ee29996b1663801e30db588dfe067ba49253
2,054
use std::cell::RefCell; #[derive(Debug,PartialEq)] pub struct Node<'graph, T> { parent: RefCell<Option<&'graph Node<'graph, T>>>, children: RefCell<Vec<&'graph Node<'graph, T>>>, data: RefCell<T>, } impl<T> Node<'_, T> { pub fn new<'a>(data: T) -> Node<'a, T> { let node = Node{ parent: RefCell::new(None), children: RefCell::new(vec![]), data: RefCell::new(data), }; node } } pub fn add_child_to_parent<'b, T>(parent: &'b Node<'b, T>, child: &'b Node<'b, T>) { parent.children.borrow_mut().push(child); child.parent.replace(Some(parent)); } #[cfg(test)] mod test { use super::Node; use crate::trees::common::NodeData; use super::add_child_to_parent; #[test] fn test_new_node() { let data = NodeData{value: 0.5}; let n = Node::new(data.clone()); assert_eq!(n.data.borrow().value, data.value); } #[test] fn test_build_graph() { let p = Node::new(NodeData{value: 0.1}); let c1 = Node::new(NodeData{value: 0.3}); let c2 = Node::new(NodeData{value: 0.4}); add_child_to_parent(&p, &c1); add_child_to_parent(&p, &c2); } #[test] fn test_replace_data() { let p = Node::new(NodeData{value: 0.1}); let c1 = Node::new(NodeData{value: 0.3}); add_child_to_parent(&p, &c1); assert_eq!(p.data.borrow().value, 0.1); p.data.borrow_mut().value = 0.6; assert_eq!(p.data.borrow().value, 0.6); } #[test] fn test_get_children_through_parent() { let p = Node::new(NodeData{value: 0.1}); let c1 = Node::new(NodeData{value: 0.3}); add_child_to_parent(&p, &c1); let first_child = p.children.borrow()[0]; assert_eq!(first_child.data.borrow().value, 0.3); let c1c1 = Node::new(NodeData{value:0.5}); add_child_to_parent(first_child, &c1c1); let grand_child = c1.children.borrow()[0]; assert_eq!(grand_child.data.borrow().value, 0.5); } }
27.026316
84
0.563778
ccace6d063700bb00ce62c410750cb6690b7ca80
2,353
use super::{cos_phi, cos_theta, same_hemisphere, sin_phi, sin_theta, Bxdf, BxdfSample, BxdfType}; use crate::{ math::{Point2, Spectrum, Vec3}, sampling::cosine_sample_hemisphere, }; // Based on Physically Based Rendering 3rd ed. // https://www.pbr-book.org/3ed-2018/Reflection_Models/Microfacet_Models#OrenndashNayarDiffuseReflection pub struct OrenNayar { reflectance: Spectrum<f32>, a: f32, b: f32, } impl OrenNayar { /// Instead of pbrt's sigma degrees, expect sigma radians. pub fn new(reflectance: Spectrum<f32>, sigma: f32) -> Self { let sigma2 = sigma * sigma; let a = 1.0 - (sigma2 / (2.0 * (sigma2 + 0.33))); let b = 0.45 * sigma2 / (sigma2 + 0.09); Self { reflectance, a, b } } } impl Bxdf for OrenNayar { fn f(&self, wi: Vec3<f32>, wo: Vec3<f32>) -> Spectrum<f32> { let sin_theta_i = sin_theta(wi); let sin_theta_o = sin_theta(wo); let max_cos = if sin_theta_i > 1e-4 && sin_theta_o > 1e-4 { let sin_phi_i = sin_phi(wi); let cos_phi_i = cos_phi(wi); let sin_phi_o = sin_phi(wo); let cos_phi_o = cos_phi(wo); let d_cos = cos_phi_i * cos_phi_o + sin_phi_i * sin_phi_o; d_cos.max(0.0) } else { 0.0 }; let (sin_alpha, tan_beta) = if cos_theta(wi).abs() > cos_theta(wo).abs() { (sin_theta_o, sin_theta_i / cos_theta(wi).abs()) } else { (sin_theta_i, sin_theta_o / cos_theta(wo).abs()) }; self.reflectance * std::f32::consts::FRAC_1_PI * (self.a + self.b * max_cos * sin_alpha * tan_beta) } fn sample_f(&self, wo: Vec3<f32>, u: Point2<f32>) -> BxdfSample { let mut wi = cosine_sample_hemisphere(u); if wo.z < 0.0 { wi.z *= -1.0; }; let pdf = self.pdf(wo, wi); let f = self.f(wo, wi); BxdfSample { wi, f, pdf, sample_type: self.flags(), } } fn pdf(&self, wo: Vec3<f32>, wi: Vec3<f32>) -> f32 { if same_hemisphere(wo, wi) { cos_theta(wi).abs() * std::f32::consts::FRAC_1_PI } else { 0.0 } } fn flags(&self) -> BxdfType { BxdfType::DIFFUSE | BxdfType::REFLECTION } }
28.695122
104
0.542711
ff5c3d2ed8373a7fc7e56aafd80d2c7c99fc61a9
346
//! This crate configures memory allocator. //! //! The swc crates related to the node binding should depend on this crate. #[cfg(all( not(debug_assertions), not(all(target_os = "linux", target_arch = "aarch64", target_env = "musl")), ))] #[global_allocator] static ALLOC: mimalloc_rust::GlobalMiMalloc = mimalloc_rust::GlobalMiMalloc;
31.454545
80
0.722543
876dbae4f810adc37e4973ddbe0e6ef350f3e4c7
4,968
// Copyright (c) The cargo-guppy Contributors // SPDX-License-Identifier: MIT OR Apache-2.0 use crate::context::ContextImpl; use anyhow::Result; use camino::{Utf8Path, Utf8PathBuf}; use fixtures::json::*; use hakari::{diffy::PatchFormatter, Hakari, HakariBuilder, HakariCargoToml, HakariOutputOptions}; use once_cell::sync::Lazy; use proptest::prelude::*; use proptest_ext::ValueGenerator; pub struct HakariTomlContext; impl<'g> ContextImpl<'g> for HakariTomlContext { type IterArgs = usize; type IterItem = (usize, HakariTomlItem<'g>); type Existing = HakariCargoToml; fn dir_name(fixture: &'g JsonFixture) -> Utf8PathBuf { fixture .abs_path() .parent() .expect("up to dirname of summary") .join("hakari") } fn file_name(fixture: &'g JsonFixture, &(count, _): &Self::IterItem) -> String { format!("{}-{}.toml", fixture.name(), count) } fn iter( fixture: &'g JsonFixture, &count: &Self::IterArgs, ) -> Box<dyn Iterator<Item = Self::IterItem> + 'g> { // Make a fresh generator for each output so that filtering by --fixtures continues to // produce deterministic results. let mut generator = ValueGenerator::from_seed(fixture.name()); let graph = fixture.graph(); // TODO: add tests for hakari id -- none of our fixtures appear to have a // workspace-hack or other Hakari package let hakari_builder_strategy = HakariBuilder::prop010_strategy(graph, Just(None)); let iter = (0..count).map(move |idx| { // The partial clones mean that a change to the algorithm in part of the strategy won't // affect the rest of it. let mut iter_generator = generator.partial_clone(); let mut builder = iter_generator .partial_clone() .generate(&hakari_builder_strategy); // The alternate fixture uses this registry. if fixture.name() == "metadata_alternate_registries" { builder.add_registries([("my-registry", METADATA_ALTERNATE_REGISTRY_URL)]); } let hakari = builder.compute(); let mut output_options = HakariOutputOptions::default(); output_options .set_builder_summary(true) .set_absolute_paths(true); let toml = hakari .to_toml_string(&output_options) .expect("to_toml_string worked"); (idx, HakariTomlItem { hakari, toml }) }); Box::new(iter) } fn parse_existing(path: &Utf8Path, contents: String) -> Result<Self::Existing> { Ok(HakariCargoToml::new_in_memory(path, contents)?) } fn is_changed((_, item): &Self::IterItem, existing: &Self::Existing) -> bool { existing.is_changed(&item.toml) } fn diff( _fixture: &'g JsonFixture, (_, item): &Self::IterItem, existing: Option<&Self::Existing>, ) -> String { static DEFAULT_EXISTING: Lazy<HakariCargoToml> = Lazy::new(|| { let contents = format!( "{}{}", HakariCargoToml::BEGIN_SECTION, HakariCargoToml::END_SECTION ); HakariCargoToml::new_in_memory("default", contents) .expect("contents are in correct format") }); let existing = existing.unwrap_or(&*DEFAULT_EXISTING); let diff = existing.diff_toml(&item.toml); let formatter = PatchFormatter::new(); format!("{}", formatter.fmt_patch(&diff)) // let package_id = guppy::PackageId::new( // "curl-sys 0.4.36+curl-7.71.1 (registry+https://github.com/rust-lang/crates.io-index)", // ); // let explain = item.hakari.explain(&package_id); // let explain = if let Ok(explain) = explain { // format!("{}", explain.display()) // } else { // "".to_owned() // }; // format!("{}\n\n{}", formatter.fmt_patch(&diff), explain) } fn write_to_string( fixture: &'g JsonFixture, (_, item): &Self::IterItem, out: &mut String, ) -> Result<()> { // XXX this should be unified with `DEFAULT_EXISTING` somehow, bleh let out_contents = format!( "# This file is @generated. To regenerate, run:\n\ # cargo run -p fixture-manager -- generate-hakari --fixture {}\n\ \n\ ### BEGIN HAKARI SECTION\n\ \n\ ### END HAKARI SECTION\n\ \n\ # This part of the file should be preserved at the end.\n", fixture.name() ); let new_toml = HakariCargoToml::new_in_memory("bogus", out_contents)?; Ok(new_toml.write_to_fmt(&item.toml, out)?) } } pub struct HakariTomlItem<'g> { #[allow(dead_code)] hakari: Hakari<'g>, toml: String, }
34.985915
101
0.57971
f90886c8e7e158020c8e86520c11fa408e68a42c
309
use crate::authentication::get_token::*; #[derive(Serialize, Deserialize)] pub struct RequestParameters { pub grant_type: String, pub client_id: String, pub code: String, pub code_verifier: String, #[serde(skip_serializing_if = "Option::is_none")] pub redirect_uri: Option<String>, }
25.75
53
0.708738
332fe51bc306dea067f09b0247cd67627007b948
414
// Take a look at the license at the top of the repository in the LICENSE file. use crate::subclass::prelude::WidgetImpl; use crate::Native; use glib::subclass::prelude::*; pub trait NativeImpl: WidgetImpl {} unsafe impl<T: NativeImpl> IsImplementable<T> for Native { fn interface_init(_iface: &mut glib::Interface<Self>) {} fn instance_init(_instance: &mut glib::subclass::InitializingObject<T>) {} }
29.571429
79
0.731884
14096de30fd7e25254adf363a4f2701a0c1186cb
10,637
#![allow(clippy::unused_unit)] use kompact::{prelude::*, serde_serialisers::*}; use kompact_examples::trusting::*; use serde::{Deserialize, Serialize}; use std::{ collections::HashSet, net::{IpAddr, Ipv4Addr, SocketAddr}, time::Duration, }; // ANCHOR: messages #[derive(Serialize, Deserialize, Debug, Clone, Copy)] struct CheckIn; impl SerialisationId for CheckIn { const SER_ID: SerId = 2345; } #[derive(Serialize, Deserialize, Debug, Clone)] struct UpdateProcesses(Vec<ActorPath>); impl SerialisationId for UpdateProcesses { const SER_ID: SerId = 3456; } // ANCHOR_END: messages // ANCHOR: state #[derive(ComponentDefinition)] struct BootstrapServer { ctx: ComponentContext<Self>, processes: HashSet<ActorPath>, } impl BootstrapServer { fn new() -> Self { BootstrapServer { ctx: ComponentContext::uninitialised(), processes: HashSet::new(), } } // ANCHOR_END: state // ANCHOR: behaviour fn broadcast_processess(&self) -> () { let procs: Vec<ActorPath> = self.processes.iter().cloned().collect(); let msg = UpdateProcesses(procs); self.processes.iter().for_each(|process| { process.tell((msg.clone(), Serde), self); }); } } ignore_lifecycle!(BootstrapServer); impl NetworkActor for BootstrapServer { type Deserialiser = Serde; type Message = CheckIn; fn receive(&mut self, source: Option<ActorPath>, _msg: Self::Message) -> Handled { if let Some(process) = source { if self.processes.insert(process) { self.broadcast_processess(); } } Handled::Ok } } // ANCHOR_END: behaviour // ANCHOR: ele_state #[derive(ComponentDefinition)] struct EventualLeaderElector { ctx: ComponentContext<Self>, omega_port: ProvidedPort<EventualLeaderDetection>, bootstrap_server: ActorPath, processes: Box<[ActorPath]>, candidates: HashSet<ActorPath>, period: Duration, delta: Duration, timer_handle: Option<ScheduledTimer>, leader: Option<ActorPath>, } impl EventualLeaderElector { fn new(bootstrap_server: ActorPath) -> Self { let minimal_period = Duration::from_millis(1); EventualLeaderElector { ctx: ComponentContext::uninitialised(), omega_port: ProvidedPort::uninitialised(), bootstrap_server, processes: Vec::new().into_boxed_slice(), candidates: HashSet::new(), period: minimal_period, delta: minimal_period, timer_handle: None, leader: None, } } // ANCHOR_END: ele_state fn select_leader(&mut self) -> Option<ActorPath> { let mut candidates: Vec<ActorPath> = self.candidates.drain().collect(); candidates.sort_unstable(); candidates.reverse(); // pick smallest instead of largest candidates.pop() } fn handle_timeout(&mut self, timeout_id: ScheduledTimer) -> Handled { match self.timer_handle.take() { Some(timeout) if timeout == timeout_id => { let new_leader = self.select_leader(); if new_leader != self.leader { self.period += self.delta; self.leader = new_leader; if let Some(ref leader) = self.leader { self.omega_port.trigger(Trust(leader.clone())); } self.cancel_timer(timeout); let new_timer = self.schedule_periodic(self.period, self.period, Self::handle_timeout); self.timer_handle = Some(new_timer); } else { // just put it back self.timer_handle = Some(timeout); } self.send_heartbeats(); Handled::Ok } Some(_) => Handled::Ok, // just ignore outdated timeouts None => { warn!(self.log(), "Got unexpected timeout: {:?}", timeout_id); Handled::Ok } // can happen during restart or teardown } } fn send_heartbeats(&self) { self.processes.iter().for_each(|process| { process.tell((Heartbeat, Serde), self); }); } } impl ComponentLifecycle for EventualLeaderElector { fn on_start(&mut self) -> Handled { // ANCHOR: checkin self.bootstrap_server.tell((CheckIn, Serde), self); // ANCHOR_END: checkin self.period = self.ctx.config()["omega"]["initial-period"] .as_duration() .expect("initial period"); self.delta = self.ctx.config()["omega"]["delta"] .as_duration() .expect("delta"); let timeout = self.schedule_periodic(self.period, self.period, Self::handle_timeout); self.timer_handle = Some(timeout); Handled::Ok } fn on_stop(&mut self) -> Handled { if let Some(timeout) = self.timer_handle.take() { self.cancel_timer(timeout); } Handled::Ok } fn on_kill(&mut self) -> Handled { self.on_stop() } } // Doesn't have any requests ignore_requests!(EventualLeaderDetection, EventualLeaderElector); // ANCHOR: actor impl Actor for EventualLeaderElector { type Message = Never; fn receive_local(&mut self, _msg: Self::Message) -> Handled { unreachable!(); } fn receive_network(&mut self, msg: NetMessage) -> Handled { let sender = msg.sender; match_deser! { (msg.data) { msg(_heartbeat): Heartbeat [using Serde] => { self.candidates.insert(sender); }, msg(UpdateProcesses(processes)): UpdateProcesses [using Serde] => { info!( self.log(), "Received new process set with {} processes", processes.len() ); self.processes = processes.into_boxed_slice(); }, } }; Handled::Ok } } // ANCHOR_END: actor // ANCHOR: main pub fn main() { let args: Vec<String> = std::env::args().collect(); match args.len() { 2 => { let bootstrap_port: u16 = args[1].parse().expect("port number"); let bootstrap_socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), bootstrap_port); let system = run_server(bootstrap_socket); system.await_termination(); // gotta quit it from command line } 3 => { let bootstrap_port: u16 = args[1].parse().expect("port number"); let bootstrap_socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), bootstrap_port); let client_port: u16 = args[2].parse().expect("port number"); let client_socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), client_port); let system = run_client(bootstrap_socket, client_socket); system.await_termination(); // gotta quit it from command line } x => panic!("Expected either 1 argument (the port for the bootstrap server to bind on) or 2 arguments (boostrap server and client port), but got {} instead!", x-1), } } // ANCHOR_END: main // ANCHOR: server const BOOTSTRAP_PATH: &str = "bootstrap"; pub fn run_server(socket: SocketAddr) -> KompactSystem { let mut cfg = KompactConfig::default(); cfg.load_config_file("./application.conf"); cfg.system_components(DeadletterBox::new, NetworkConfig::new(socket).build()); let system = cfg.build().expect("KompactSystem"); let (bootstrap, bootstrap_registration) = system.create_and_register(BootstrapServer::new); let bootstrap_service_registration = system.register_by_alias(&bootstrap, BOOTSTRAP_PATH); let _bootstrap_unique = bootstrap_registration .wait_expect(Duration::from_millis(1000), "bootstrap never registered"); let bootstrap_service = bootstrap_service_registration .wait_expect(Duration::from_millis(1000), "bootstrap never registered"); system.start(&bootstrap); let printer = system.create(TrustPrinter::new); let (detector, registration) = system.create_and_register(|| EventualLeaderElector::new(bootstrap_service)); biconnect_components::<EventualLeaderDetection, _, _>(&detector, &printer).expect("connection"); let _path = registration.wait_expect(Duration::from_millis(1000), "detector never registered"); system.start(&printer); system.start(&detector); system } // ANCHOR_END: server // ANCHOR: client pub fn run_client(bootstrap_socket: SocketAddr, client_socket: SocketAddr) -> KompactSystem { let mut cfg = KompactConfig::default(); cfg.load_config_file("./application.conf"); cfg.system_components( DeadletterBox::new, NetworkConfig::new(client_socket).build(), ); let system = cfg.build().expect("KompactSystem"); let bootstrap_service: ActorPath = NamedPath::with_socket( Transport::Tcp, bootstrap_socket, vec![BOOTSTRAP_PATH.into()], ) .into(); let printer = system.create(TrustPrinter::new); let (detector, registration) = system.create_and_register(|| EventualLeaderElector::new(bootstrap_service)); biconnect_components::<EventualLeaderDetection, _, _>(&detector, &printer).expect("connection"); let _path = registration.wait_expect(Duration::from_millis(1000), "detector never registered"); system.start(&printer); system.start(&detector); system } // ANCHOR_END: client #[cfg(test)] mod tests { use super::*; const SERVER_SOCKET: &str = "127.0.0.1:12345"; const CLIENT_SOCKET: &str = "127.0.0.1:0"; #[test] fn test_bootstrapping() { let server_socket: SocketAddr = SERVER_SOCKET.parse().unwrap(); let server_system = run_server(server_socket); let client_socket: SocketAddr = CLIENT_SOCKET.parse().unwrap(); let mut clients_systems: Vec<KompactSystem> = (0..3) .map(|_i| run_client(server_socket, client_socket)) .collect(); // let them settle std::thread::sleep(Duration::from_millis(1000)); // shut down systems one by one for sys in clients_systems.drain(..) { std::thread::sleep(Duration::from_millis(1000)); sys.shutdown().expect("shutdown"); } std::thread::sleep(Duration::from_millis(1000)); server_system.shutdown().expect("shutdown"); } }
33.768254
172
0.612015
4a197bc042b220a6447336dc2adc09481896ffaf
1,106
use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use common_traits::TokenMetadata; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_std::vec::Vec; #[derive( Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum CurrencyId { Native, // Karura KSM KSM, // Karura Dollar KUSD, } impl TokenMetadata for CurrencyId { fn name(&self) -> Vec<u8> { match self { CurrencyId::Native => b"Native currency".to_vec(), CurrencyId::KUSD => b"Karura Dollar".to_vec(), CurrencyId::KSM => b"Kusama".to_vec(), } } fn symbol(&self) -> Vec<u8> { match self { CurrencyId::Native => b"IMBU".to_vec(), CurrencyId::KUSD => b"KUSD".to_vec(), CurrencyId::KSM => b"KSM".to_vec(), } } fn decimals(&self) -> u8 { match self { CurrencyId::Native => 12, CurrencyId::KUSD | CurrencyId::KSM => 12, } } }
25.136364
96
0.573237
1ddd3c042637143555d78f715f733fc4fbd6cfba
88
// pp-exact // The next line should not be expanded mod issue_12590_b; fn main() { }
11
39
0.670455
b9e4ffa84cdea1d6b21b61b0f3fa3d2b333e0386
1,942
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Check that traits with various kinds of associated items cause // dropck to inject extra region constraints. #![allow(non_camel_case_types)] trait HasSelfMethod { fn m1(&self) { } } trait HasMethodWithSelfArg { fn m2(x: &Self) { } } trait HasType { type Something; } impl HasSelfMethod for i32 { } impl HasMethodWithSelfArg for i32 { } impl HasType for i32 { type Something = (); } impl<'a,T> HasSelfMethod for &'a T { } impl<'a,T> HasMethodWithSelfArg for &'a T { } impl<'a,T> HasType for &'a T { type Something = (); } // e.g., `impl_drop!(Send, D_Send)` expands to: // ```rust // struct D_Send<T:Send>(T); // impl<T:Send> Drop for D_Send<T> { fn drop(&mut self) { } } // ``` macro_rules! impl_drop { ($Bound:ident, $Id:ident) => { struct $Id<T:$Bound>(T); impl <T:$Bound> Drop for $Id<T> { fn drop(&mut self) { } } } } impl_drop!{HasSelfMethod, D_HasSelfMethod} impl_drop!{HasMethodWithSelfArg, D_HasMethodWithSelfArg} impl_drop!{HasType, D_HasType} fn f_sm() { let (_d, d1); d1 = D_HasSelfMethod(1); _d = D_HasSelfMethod(&d1); } //~^^ ERROR `d1` does not live long enough fn f_mwsa() { let (_d, d1); d1 = D_HasMethodWithSelfArg(1); _d = D_HasMethodWithSelfArg(&d1); } //~^^ ERROR `d1` does not live long enough fn f_t() { let (_d, d1); d1 = D_HasType(1); _d = D_HasType(&d1); } //~^^ ERROR `d1` does not live long enough fn main() { f_sm(); f_mwsa(); f_t(); }
28.558824
68
0.647786
89829a57c10499ae2e9f2d6437dcb554d958fb6a
1,650
/* pub fn write_datafile<T:Datafile,W:Writer>(df: &T, writer: &mut W) -> Result<IoResult<()>,()> { let compressed_data: Vec<Vec<u8>> = try!(result::collect(df.data_iter().map(|maybe_x| maybe_x.map(|x| { zlib::compress_vec(x).unwrap() (item_type.start, item_type.num) } } /* pub fn write_datafile<T:Datafile,W:Writer>(df: &T, writer: &mut W) -> Result<IoResult<()>,()> { let compressed_data: Vec<Vec<u8>> = result::collect(df.data_iter().map(|maybe_x| maybe_x.map(|x| { zlib::compress_vec(x).unwrap() })))?; let size_items = df.items().fold(0, |s, i| { s + i.data.len() * mem::size_of::<i32>() + mem::size_of::<DatafileItemHeader>() }); let size_data = compressed_data.iter().fold(0, |s, d| s + d.len()); DatafileHeaderVersion { magic: DATAFILE_MAGIC, version: 4, }.write(writer)?; DatafileHeader { _size: unimplemented!(), _swaplen: unimplemented!(), num_item_types: df.item_types().len(), num_items: df.items().len(), num_data: df.data_iter().len(), size_items: size_items, size_data: size_data, }.write(writer)?; for &type_id in df.item_types() { let (start, num) = df.item_type_indexes_start_num(type_id); DatafileItemType { type_id: type_id.as_i32().unwrap(), start: start.as_i32().unwrap(), num: num.as_i32().unwrap(), }.write(writer)?; } for DatafileItem { type_id, id, data } in df.items() { DatafileItemHeader::new(type_id, id, data.len()).write(writer)?; } unimplemented!(); Ok(Ok(())) } */ */
31.132075
107
0.580606
ccf80ec423042f020f96e049b83e63f9704e46d9
3,666
mod io { use git_features::pipe; use std::io::{BufRead, ErrorKind, Read, Write}; #[test] fn threaded_read_to_end() { let (mut writer, mut reader) = git_features::pipe::unidirectional(0); let message = "Hello, world!"; std::thread::spawn(move || { writer .write_all(message.as_bytes()) .expect("writes to work if reader is present") }); let mut received = String::new(); reader.read_to_string(&mut received).unwrap(); assert_eq!(&received, message); } #[test] fn lack_of_reader_fails_with_broken_pipe() { let (mut writer, _) = pipe::unidirectional(None); assert_eq!( writer.write_all(b"must fail").unwrap_err().kind(), ErrorKind::BrokenPipe ); } #[test] fn line_reading_one_by_one() { let (mut writer, mut reader) = pipe::unidirectional(2); writer.write_all(b"a\n").expect("success"); writer.write_all(b"b\nc").expect("success"); drop(writer); let mut buf = String::new(); for expected in &["a\n", "b\n", "c"] { buf.clear(); assert_eq!(reader.read_line(&mut buf).expect("success"), expected.len()); assert_eq!(buf, *expected); } } #[test] fn line_reading() { let (mut writer, reader) = pipe::unidirectional(2); writer.write_all(b"a\n").expect("success"); writer.write_all(b"b\nc\n").expect("success"); drop(writer); assert_eq!( reader.lines().flat_map(Result::ok).collect::<Vec<_>>(), vec!["a", "b", "c"] ) } #[test] fn writer_can_inject_errors() { let (writer, mut reader) = pipe::unidirectional(1); writer .channel .send(Err(std::io::Error::new(std::io::ErrorKind::Other, "the error"))) .expect("send success"); let mut buf = [0]; assert_eq!( reader.read(&mut buf).unwrap_err().to_string(), "the error", "using Read trait, errors are propagated" ); writer .channel .send(Err(std::io::Error::new(std::io::ErrorKind::Other, "the error"))) .expect("send success"); assert_eq!( reader.fill_buf().unwrap_err().to_string(), "the error", "using BufRead trait, errors are propagated" ); } #[test] fn continue_on_empty_writes() { let (mut writer, mut reader) = pipe::unidirectional(2); writer.write(&[]).expect("write successful and non-blocking"); let input = b"hello"; writer .write(input) .expect("second write works as well as there is capacity"); let mut buf = vec![0u8; input.len()]; assert_eq!(reader.read(&mut buf).expect("read succeeds"), input.len()); assert_eq!(buf, &input[..]); } #[test] fn small_reads() { const BLOCK_SIZE: usize = 20; let block_count = 20; let (mut writer, mut reader) = pipe::unidirectional(Some(4)); std::thread::spawn(move || { for _ in 0..block_count { let data = &[0; BLOCK_SIZE]; writer.write_all(data).unwrap(); } }); let mut small_read_buf = [0; BLOCK_SIZE / 2]; let mut bytes_read = 0; while let Ok(size) = reader.read(&mut small_read_buf) { if size == 0 { break; } bytes_read += size; } assert_eq!(block_count * BLOCK_SIZE, bytes_read); } }
31.333333
85
0.525368
bbcc62dc18cb77f9f11531b14e933110ee042539
3,748
use crate::error::Result; use crate::expr::{Expression, Sort, Variable}; use crate::util::CompactIterator; use std::collections::BTreeSet; use std::fmt; #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub enum Cache { Fetch(usize), // Fetch N bits into the cache Evict(usize), // Evict N bits from the cache } impl fmt::Display for Cache { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Fetch(bit_width) => write!(f, "(cache-fetch {})", bit_width), Self::Evict(bit_width) => write!(f, "(cache-evict {})", bit_width), } } } impl Cache { pub fn variable() -> Variable { let mut var = Variable::new("_cache", Sort::cache()); var.set_rollback_persistent(true); var } pub fn fetch(bit_width: usize, cache: Expression, addr: Expression) -> Result<Expression> { cache.sort().expect_cache()?; addr.sort().expect_word()?; Ok(Expression::new( Self::Fetch(bit_width).into(), vec![cache, addr], Sort::cache(), )) } pub fn evict(bit_width: usize, cache: Expression, addr: Expression) -> Result<Expression> { cache.sort().expect_cache()?; addr.sort().expect_word()?; Ok(Expression::new( Self::Evict(bit_width).into(), vec![cache, addr], Sort::cache(), )) } } pub enum CacheAddresses { EvictedFromFullCache(BTreeSet<u64>), FetchedIntoEmptyCache(BTreeSet<u64>), } #[derive(Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] pub struct CacheValue { addresses: BTreeSet<u64>, // Holds evicted addresses if default is cached, or fetched addresses if default is not cached. default_is_cached: bool, } impl CacheValue { pub fn empty() -> Self { Self { addresses: BTreeSet::new(), default_is_cached: false, } } pub fn full() -> Self { Self { addresses: BTreeSet::new(), default_is_cached: true, } } pub fn fetch(&mut self, addr: u64) { if self.default_is_cached { // Remove evicted self.addresses.remove(&addr); } else { // Add fetched self.addresses.insert(addr); } } pub fn evict(&mut self, addr: u64) { if self.default_is_cached { // Add evicted self.addresses.insert(addr); } else { // Removed fetched self.addresses.remove(&addr); } } pub fn is_cached(&self, addr: u64) -> bool { if self.default_is_cached { let evicted = self.addresses.contains(&addr); !evicted } else { self.addresses.contains(&addr) } } pub fn addresses(&self) -> CacheAddresses { if self.default_is_cached { CacheAddresses::EvictedFromFullCache(self.addresses.clone()) } else { CacheAddresses::FetchedIntoEmptyCache(self.addresses.clone()) } } } impl fmt::Display for CacheValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.default_is_cached { write!(f, "⊤ ∖ ")?; } write!(f, "{{")?; let mut is_first = true; for (addr_start, addr_end) in self.addresses.iter().compact(|&a, &b| a + 1 == *b) { if !is_first { write!(f, ", ")?; } if addr_start == addr_end { write!(f, "0x{:X}", addr_start)?; } else { write!(f, "0x{:X}…0x{:X}", addr_start, addr_end)?; } is_first = false; } write!(f, "}}") } }
27.357664
125
0.534685
296615e15ffa065cc1081035dfbff1e806a4697c
12,273
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use stack::Stack; use std::uint; use std::mem::transmute; use std::rt::stack; use std::raw; #[cfg(target_arch = "x86_64")] use std::simd; use libc; // FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing // SSE regs. It would be marginally better not to do this. In C++ we // use an attribute on a struct. // FIXME #7761: It would be nice to define regs as `Box<Option<Registers>>` // since the registers are sometimes empty, but the discriminant would // then misalign the regs again. pub struct Context { /// Hold the registers while the task or scheduler is suspended regs: Box<Registers>, /// Lower bound and upper bound for the stack stack_bounds: Option<(uint, uint)>, } pub type InitFn = extern "C" fn(uint, *mut (), *mut ()) -> !; impl Context { pub fn empty() -> Context { Context { regs: new_regs(), stack_bounds: None, } } /// Create a new context that will resume execution by running proc() /// /// The `init` function will be run with `arg` and the `start` procedure /// split up into code and env pointers. It is required that the `init` /// function never return. /// /// FIXME: this is basically an awful the interface. The main reason for /// this is to reduce the number of allocations made when a green /// task is spawned as much as possible pub fn new(init: InitFn, arg: uint, start: proc():Send, stack: &mut Stack) -> Context { let sp: *const uint = stack.end(); let sp: *mut uint = sp as *mut uint; // Save and then immediately load the current context, // which we will then modify to call the given function when restored let mut regs = new_regs(); initialize_call_frame(&mut *regs, init, arg, unsafe { transmute(start) }, sp); // Scheduler tasks don't have a stack in the "we allocated it" sense, // but rather they run on pthreads stacks. We have complete control over // them in terms of the code running on them (and hopefully they don't // overflow). Additionally, their coroutine stacks are listed as being // zero-length, so that's how we detect what's what here. let stack_base: *const uint = stack.start(); let bounds = if sp as libc::uintptr_t == stack_base as libc::uintptr_t { None } else { Some((stack_base as uint, sp as uint)) }; return Context { regs: regs, stack_bounds: bounds, } } /* Switch contexts Suspend the current execution context and resume another by saving the registers values of the executing thread to a Context then loading the registers from a previously saved Context. */ pub fn swap(out_context: &mut Context, in_context: &Context) { rtdebug!("swapping contexts"); let out_regs: &mut Registers = match out_context { &Context { regs: box ref mut r, .. } => r }; let in_regs: &Registers = match in_context { &Context { regs: box ref r, .. } => r }; rtdebug!("noting the stack limit and doing raw swap"); unsafe { // Right before we switch to the new context, set the new context's // stack limit in the OS-specified TLS slot. This also means that // we cannot call any more rust functions after record_stack_bounds // returns because they would all likely fail due to the limit being // invalid for the current task. Lucky for us `rust_swap_registers` // is a C function so we don't have to worry about that! match in_context.stack_bounds { Some((lo, hi)) => stack::record_rust_managed_stack_bounds(lo, hi), // If we're going back to one of the original contexts or // something that's possibly not a "normal task", then reset // the stack limit to 0 to make morestack never fail None => stack::record_rust_managed_stack_bounds(0, uint::MAX), } rust_swap_registers(out_regs, in_regs); } } } #[link(name = "context_switch", kind = "static")] extern { fn rust_swap_registers(out_regs: *mut Registers, in_regs: *const Registers); } // Register contexts used in various architectures // // These structures all represent a context of one task throughout its // execution. Each struct is a representation of the architecture's register // set. When swapping between tasks, these register sets are used to save off // the current registers into one struct, and load them all from another. // // Note that this is only used for context switching, which means that some of // the registers may go unused. For example, for architectures with // callee/caller saved registers, the context will only reflect the callee-saved // registers. This is because the caller saved registers are already stored // elsewhere on the stack (if it was necessary anyway). // // Additionally, there may be fields on various architectures which are unused // entirely because they only reflect what is theoretically possible for a // "complete register set" to show, but user-space cannot alter these registers. // An example of this would be the segment selectors for x86. // // These structures/functions are roughly in-sync with the source files inside // of src/rt/arch/$arch. The only currently used function from those folders is // the `rust_swap_registers` function, but that's only because for now segmented // stacks are disabled. #[cfg(target_arch = "x86")] #[repr(C)] struct Registers { eax: u32, ebx: u32, ecx: u32, edx: u32, ebp: u32, esi: u32, edi: u32, esp: u32, cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16, eflags: u32, eip: u32 } #[cfg(target_arch = "x86")] fn new_regs() -> Box<Registers> { box Registers { eax: 0, ebx: 0, ecx: 0, edx: 0, ebp: 0, esi: 0, edi: 0, esp: 0, cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0, eflags: 0, eip: 0 } } #[cfg(target_arch = "x86")] fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, procedure: raw::Procedure, sp: *mut uint) { let sp = sp as *mut uint; // x86 has interesting stack alignment requirements, so do some alignment // plus some offsetting to figure out what the actual stack should be. let sp = align_down(sp); let sp = mut_offset(sp, -4); unsafe { *mut_offset(sp, 2) = procedure.env as uint }; unsafe { *mut_offset(sp, 1) = procedure.code as uint }; unsafe { *mut_offset(sp, 0) = arg as uint }; let sp = mut_offset(sp, -1); unsafe { *sp = 0 }; // The final return address regs.esp = sp as u32; regs.eip = fptr as u32; // Last base pointer on the stack is 0 regs.ebp = 0; } // windows requires saving more registers (both general and XMM), so the windows // register context must be larger. #[cfg(windows, target_arch = "x86_64")] #[repr(C)] struct Registers { gpr:[libc::uintptr_t, ..14], _xmm:[simd::u32x4, ..10] } #[cfg(not(windows), target_arch = "x86_64")] #[repr(C)] struct Registers { gpr:[libc::uintptr_t, ..10], _xmm:[simd::u32x4, ..6] } #[cfg(windows, target_arch = "x86_64")] fn new_regs() -> Box<Registers> { box() Registers { gpr:[0,..14], _xmm:[simd::u32x4(0,0,0,0),..10] } } #[cfg(not(windows), target_arch = "x86_64")] fn new_regs() -> Box<Registers> { box() Registers { gpr:[0,..10], _xmm:[simd::u32x4(0,0,0,0),..6] } } #[cfg(target_arch = "x86_64")] fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, procedure: raw::Procedure, sp: *mut uint) { extern { fn rust_bootstrap_green_task(); } // Redefinitions from rt/arch/x86_64/regs.h static RUSTRT_RSP: uint = 1; static RUSTRT_IP: uint = 8; static RUSTRT_RBP: uint = 2; static RUSTRT_R12: uint = 4; static RUSTRT_R13: uint = 5; static RUSTRT_R14: uint = 6; static RUSTRT_R15: uint = 7; let sp = align_down(sp); let sp = mut_offset(sp, -1); // The final return address. 0 indicates the bottom of the stack unsafe { *sp = 0; } rtdebug!("creating call frame"); rtdebug!("fptr {:#x}", fptr as libc::uintptr_t); rtdebug!("arg {:#x}", arg); rtdebug!("sp {}", sp); // These registers are frobbed by rust_bootstrap_green_task into the right // location so we can invoke the "real init function", `fptr`. regs.gpr[RUSTRT_R12] = arg as libc::uintptr_t; regs.gpr[RUSTRT_R13] = procedure.code as libc::uintptr_t; regs.gpr[RUSTRT_R14] = procedure.env as libc::uintptr_t; regs.gpr[RUSTRT_R15] = fptr as libc::uintptr_t; // These registers are picked up by the regular context switch paths. These // will put us in "mostly the right context" except for frobbing all the // arguments to the right place. We have the small trampoline code inside of // rust_bootstrap_green_task to do that. regs.gpr[RUSTRT_RSP] = sp as libc::uintptr_t; regs.gpr[RUSTRT_IP] = rust_bootstrap_green_task as libc::uintptr_t; // Last base pointer on the stack should be 0 regs.gpr[RUSTRT_RBP] = 0; } #[cfg(target_arch = "arm")] type Registers = [libc::uintptr_t, ..32]; #[cfg(target_arch = "arm")] fn new_regs() -> Box<Registers> { box {[0, .. 32]} } #[cfg(target_arch = "arm")] fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, procedure: raw::Procedure, sp: *mut uint) { extern { fn rust_bootstrap_green_task(); } let sp = align_down(sp); // sp of arm eabi is 8-byte aligned let sp = mut_offset(sp, -2); // The final return address. 0 indicates the bottom of the stack unsafe { *sp = 0; } // ARM uses the same technique as x86_64 to have a landing pad for the start // of all new green tasks. Neither r1/r2 are saved on a context switch, so // the shim will copy r3/r4 into r1/r2 and then execute the function in r5 regs[0] = arg as libc::uintptr_t; // r0 regs[3] = procedure.code as libc::uintptr_t; // r3 regs[4] = procedure.env as libc::uintptr_t; // r4 regs[5] = fptr as libc::uintptr_t; // r5 regs[13] = sp as libc::uintptr_t; // #52 sp, r13 regs[14] = rust_bootstrap_green_task as libc::uintptr_t; // #56 pc, r14 --> lr } #[cfg(target_arch = "mips")] #[cfg(target_arch = "mipsel")] type Registers = [libc::uintptr_t, ..32]; #[cfg(target_arch = "mips")] #[cfg(target_arch = "mipsel")] fn new_regs() -> Box<Registers> { box {[0, .. 32]} } #[cfg(target_arch = "mips")] #[cfg(target_arch = "mipsel")] fn initialize_call_frame(regs: &mut Registers, fptr: InitFn, arg: uint, procedure: raw::Procedure, sp: *mut uint) { let sp = align_down(sp); // sp of mips o32 is 8-byte aligned let sp = mut_offset(sp, -2); // The final return address. 0 indicates the bottom of the stack unsafe { *sp = 0; } regs[4] = arg as libc::uintptr_t; regs[5] = procedure.code as libc::uintptr_t; regs[6] = procedure.env as libc::uintptr_t; regs[29] = sp as libc::uintptr_t; regs[25] = fptr as libc::uintptr_t; regs[31] = fptr as libc::uintptr_t; } fn align_down(sp: *mut uint) -> *mut uint { let sp = (sp as uint) & !(16 - 1); sp as *mut uint } // ptr::mut_offset is positive ints only #[inline] pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T { use std::mem::size_of; (ptr as int + count * (size_of::<T>() as int)) as *mut T }
37.303951
84
0.630734
d5197475f35c196f3a5f466df0ea5a0189173db6
63
mod loading; mod start; pub use loading::*; pub use start::*;
10.5
19
0.666667
188857a9821dec47690af33f04aeeb572c6ac4e9
15,098
//! Provides a linearizable register "shared memory" abstraction that can serve requests as long as //! a quorum of actors is available (e.g. 3 of 5). This code is based on the algorithm described //! in "[Sharing Memory Robustly in Message-Passing //! Systems](https://doi.org/10.1145/200836.200869)" by Attiya, Bar-Noy, and Dolev. "ABD" in the //! types refers to the author names. //! //! For a succinct overview of the algorithm, I recommend: //! http://muratbuffalo.blogspot.com/2012/05/replicatedfault-tolerant-atomic-storage.html use serde::{Deserialize, Serialize}; use std::borrow::Cow; use stateright::{Checker, Expectation, Model}; use stateright::actor::{ Actor, ActorModel, Id, majority, model_peers, Network, Out}; use stateright::actor::register::{ RegisterActor, RegisterMsg, RegisterMsg::*}; use stateright::semantics::LinearizabilityTester; use stateright::semantics::register::Register; use stateright::util::{HashableHashMap, HashableHashSet}; use std::fmt::Debug; use std::hash::Hash; type LogicalClock = u64; type RequestId = u64; type Seq = (LogicalClock, Id); type Value = char; #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(Serialize, Deserialize)] pub enum AbdMsg { Query(RequestId), AckQuery(RequestId, Seq, Value), Record(RequestId, Seq, Value), AckRecord(RequestId), } use AbdMsg::*; #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct AbdState { seq: Seq, val: Value, phase: Option<AbdPhase>, } #[derive(Clone, Debug, Eq, Hash, PartialEq)] enum AbdPhase { Phase1 { request_id: RequestId, requester_id: Id, write: Option<Value>, responses: HashableHashMap<Id, (Seq, Value)> }, Phase2 { request_id: RequestId, requester_id: Id, read: Option<Value>, acks: HashableHashSet<Id> }, } #[derive(Clone)] pub struct AbdActor { pub(crate) peers: Vec<Id>, } impl Actor for AbdActor { type Msg = RegisterMsg<RequestId, Value, AbdMsg>; type State = AbdState; fn on_start(&self, id: Id, _o: &mut Out<Self>) -> Self::State { AbdState { seq: (0, id), val: Value::default(), phase: None, } } fn on_msg(&self, id: Id, state: &mut Cow<Self::State>, src: Id, msg: Self::Msg, o: &mut Out<Self>) { match msg { Put(req_id, val) if state.phase.is_none() => { o.broadcast(&self.peers, &Internal(Query(req_id))); state.to_mut().phase = Some(AbdPhase::Phase1 { request_id: req_id, requester_id: src, write: Some(val), responses: { let mut responses = HashableHashMap::default(); responses.insert(id, (state.seq, state.val.clone())); responses }, }); } Get(req_id) if state.phase.is_none() => { o.broadcast(&self.peers, &Internal(Query(req_id))); state.to_mut().phase = Some(AbdPhase::Phase1 { request_id: req_id, requester_id: src, write: None, responses: { let mut responses = HashableHashMap::default(); responses.insert(id, (state.seq, state.val.clone())); responses }, }); } Internal(Query(req_id)) => { o.send(src, Internal(AckQuery(req_id, state.seq, state.val.clone()))); } Internal(AckQuery(expected_req_id, seq, val)) if matches!(state.phase, Some(AbdPhase::Phase1 { request_id, .. }) if request_id == expected_req_id) => { let mut state = state.to_mut(); if let Some(AbdPhase::Phase1 { request_id: req_id, requester_id: requester, write, responses, .. }) = &mut state.phase { responses.insert(src, (seq, val)); if responses.len() == majority(self.peers.len() + 1) { // Quorum reached. Move to phase 2. // Determine sequencer and value. let (_, (seq, val)) = responses.into_iter() // The following relies on the fact that sequencers are distinct. // Otherwise the chosen response can vary even when given the same // inputs due to the underlying `HashMap`'s random seed. .max_by_key(|(_, (seq, _))| seq) .unwrap(); let mut seq = *seq; let mut read = None; let val = if let Some(val) = std::mem::take(write) { seq = (seq.0 + 1, id); val } else { read = Some(val.clone()); val.clone() }; // A future optimization could skip the recording phase if the replicas // agree. o.broadcast(&self.peers, &Internal(Record(*req_id, seq, val.clone()))); // Self-send `Record`. if seq > state.seq { state.seq = seq; state.val = val; } // Self-send `AckRecord`. let mut acks = HashableHashSet::default(); acks.insert(id); state.phase = Some(AbdPhase::Phase2 { request_id: *req_id, requester_id: std::mem::take(requester), read, acks, }); } } } Internal(Record(req_id, seq, val)) => { o.send(src, Internal(AckRecord(req_id))); if seq > state.seq { let mut state = state.to_mut(); state.seq = seq; state.val = val; } } Internal(AckRecord(expected_req_id)) if matches!(state.phase, Some(AbdPhase::Phase2 { request_id, ref acks, .. }) if request_id == expected_req_id && !acks.contains(&src)) => { let mut state = state.to_mut(); if let Some(AbdPhase::Phase2 { request_id: req_id, requester_id: requester, read, acks, .. }) = &mut state.phase { acks.insert(src); if acks.len() == majority(self.peers.len() + 1) { let msg = if let Some(val) = read { GetOk(*req_id, std::mem::take(val)) } else { PutOk(*req_id) }; o.send(*requester, msg); state.phase = None; } } } _ => {} } } } #[derive(Clone)] struct AbdModelCfg { client_count: usize, server_count: usize, network: Network<<AbdActor as Actor>::Msg>, } impl AbdModelCfg { fn into_model(self) -> ActorModel< RegisterActor<AbdActor>, Self, LinearizabilityTester<Id, Register<Value>>> { ActorModel::new( self.clone(), LinearizabilityTester::new(Register(Value::default())) ) .actors((0..self.server_count) .map(|i| RegisterActor::Server(AbdActor { peers: model_peers(i, self.server_count), }))) .actors((0..self.client_count) .map(|_| RegisterActor::Client { put_count: 1, server_count: self.server_count, })) .init_network(self.network) .property(Expectation::Always, "linearizable", |_, state| { state.history.serialized_history().is_some() }) .property(Expectation::Sometimes, "value chosen", |_, state| { for env in state.network.iter_deliverable() { if let RegisterMsg::GetOk(_req_id, value) = env.msg { if *value != Value::default() { return true; } } } false }) .record_msg_in(RegisterMsg::record_returns) .record_msg_out(RegisterMsg::record_invocations) } } #[cfg(test)] #[test] fn can_model_linearizable_register() { use stateright::actor::ActorModelAction::Deliver; // BFS let checker = AbdModelCfg { client_count: 2, server_count: 2, network: Network::new_unordered_nonduplicating([]), } .into_model().checker().spawn_bfs().join(); checker.assert_properties(); checker.assert_discovery("value chosen", vec![ Deliver { src: Id::from(3), dst: Id::from(1), msg: Put(3, 'B') }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(Query(3)) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(AckQuery(3, (0, Id::from(0)), '\u{0}')) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(Record(3, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(AckRecord(3)) }, Deliver { src: Id::from(1), dst: Id::from(3), msg: PutOk(3) }, Deliver { src: Id::from(3), dst: Id::from(0), msg: Get(6) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(Query(6)) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(AckQuery(6, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(Record(6, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(AckRecord(6)) }, ]); assert_eq!(checker.unique_state_count(), 544); // DFS let checker = AbdModelCfg { client_count: 2, server_count: 2, network: Network::new_unordered_nonduplicating([]), } .into_model().checker().spawn_dfs().join(); checker.assert_properties(); checker.assert_discovery("value chosen", vec![ Deliver { src: Id::from(3), dst: Id::from(1), msg: Put(3, 'B') }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(Query(3)) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(AckQuery(3, (0, Id::from(0)), '\u{0}')) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(Record(3, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(AckRecord(3)) }, Deliver { src: Id::from(1), dst: Id::from(3), msg: PutOk(3) }, Deliver { src: Id::from(3), dst: Id::from(0), msg: Get(6) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(Query(6)) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(AckQuery(6, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(0), dst: Id::from(1), msg: Internal(Record(6, (1, Id::from(1)), 'B')) }, Deliver { src: Id::from(1), dst: Id::from(0), msg: Internal(AckRecord(6)) }, ]); assert_eq!(checker.unique_state_count(), 544); } fn main() -> Result<(), pico_args::Error> { use stateright::actor::spawn; use std::net::{SocketAddrV4, Ipv4Addr}; env_logger::init_from_env(env_logger::Env::default() .default_filter_or("info")); // `RUST_LOG=${LEVEL}` env variable to override let mut args = pico_args::Arguments::from_env(); match args.subcommand()?.as_deref() { Some("check") => { let client_count = args.opt_free_from_str()? .unwrap_or(2); let network = args.opt_free_from_str()? .unwrap_or(Network::new_unordered_nonduplicating([])) .into(); println!("Model checking a linearizable register with {} clients.", client_count); AbdModelCfg { client_count, server_count: 3, network, } .into_model().checker().threads(num_cpus::get()) .spawn_dfs().report(&mut std::io::stdout()); } Some("explore") => { let client_count = args.opt_free_from_str()? .unwrap_or(2); let address = args.opt_free_from_str()? .unwrap_or("localhost:3000".to_string()); let network = args.opt_free_from_str()? .unwrap_or(Network::new_unordered_nonduplicating([])) .into(); println!( "Exploring state space for linearizable register with {} clients on {}.", client_count, address); AbdModelCfg { client_count, server_count: 3, network, } .into_model().checker().threads(num_cpus::get()) .serve(address); } Some("spawn") => { let port = 3000; println!(" A server that implements a linearizable register."); println!(" You can interact with the server using netcat. Example:"); println!("$ nc -u localhost {}", port); println!("{}", serde_json::to_string(&RegisterMsg::Put::<RequestId, Value, ()>(1, 'X')).unwrap()); println!("{}", serde_json::to_string(&RegisterMsg::Get::<RequestId, Value, ()>(2)).unwrap()); println!(); let id0 = Id::from(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port + 0)); let id1 = Id::from(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port + 1)); let id2 = Id::from(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port + 2)); spawn( serde_json::to_vec, |bytes| serde_json::from_slice(bytes), vec![ (id0, AbdActor { peers: vec![id1, id2] }), (id1, AbdActor { peers: vec![id0, id2] }), (id2, AbdActor { peers: vec![id0, id1] }), ]).unwrap(); } _ => { println!("USAGE:"); println!(" ./linearizable-register check [CLIENT_COUNT] [NETWORK]"); println!(" ./linearizable-register explore [CLIENT_COUNT] [ADDRESS] [NETWORK]"); println!(" ./linearizable-register spawn"); println!("NETWORK: {}", Network::<<AbdActor as Actor>::Msg>::names().join(" | ")); } } Ok(()) }
41.822715
136
0.497086
62afaa2c6e84d7736c71df8fa8a59d9959ee9ac5
4,647
// This file is part of file-descriptors. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. No part of file-descriptors, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2018-2019 The developers of file-descriptors. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/file-descriptors/master/COPYRIGHT. #[cfg(target_pointer_width = "32")] #[derive(Default, Debug, Clone)] #[repr(C)] pub(crate) struct msghdr { pub(crate) msg_name: *mut c_void, pub(crate) msg_namelen: socklen_t, pub(crate) msg_iov: *mut iovec, pub(crate) msg_iovlen: socklen_t, pub(crate) msg_control: *mut c_void, pub(crate) msg_controllen: socklen_t, pub(crate) msg_flags: c_int, } #[cfg(target_pointer_width = "64")] #[derive(Debug, Clone)] #[repr(C)] pub(crate) struct msghdr { pub(crate) msg_name: *mut c_void, pub(crate) msg_namelen: socklen_t, pub(crate) msg_iov: *mut iovec, #[cfg(target_endian = "little")] pub(crate) msg_iovlen: socklen_t, #[cfg(target_endian = "little")] __pad1: u32, #[cfg(target_endian = "big")] __pad1: u32, #[cfg(target_endian = "big")] pub(crate) msg_iovlen: socklen_t, pub(crate) msg_control: *mut c_void, #[cfg(target_endian = "little")] pub(crate) msg_controllen: socklen_t, #[cfg(target_endian = "little")] __pad2: u32, #[cfg(target_endian = "big")] __pad2: u32, #[cfg(target_endian = "big")] pub(crate) msg_controllen: socklen_t, pub(crate) msg_flags: c_int, } impl Default for msghdr { #[inline(always)] fn default() -> Self { unsafe { zeroed() } } } impl msghdr { #[cfg(target_pointer_width = "32")] pub(crate) fn new(msg_name: *mut c_void, msg_namelen: socklen_t, msg_iov: *mut iovec, msg_iovlen: socklen_t, msg_control: *mut c_void, msg_controllen: socklen_t, msg_flags: c_int) -> Self { Self { msg_name, msg_namelen, msg_iov, msg_iovlen, msg_control, msg_controllen, msg_flags, } } #[cfg(target_pointer_width = "64")] pub(crate) fn new(msg_name: *mut c_void, msg_namelen: socklen_t, msg_iov: *mut iovec, msg_iovlen: socklen_t, msg_control: *mut c_void, msg_controllen: socklen_t, msg_flags: c_int) -> Self { #[allow(deprecated)] Self { msg_name, msg_namelen, msg_iov, msg_iovlen, __pad1: unsafe { uninitialized() }, msg_control, msg_controllen, __pad2: unsafe { uninitialized() }, msg_flags, } } #[inline(always)] pub(crate) fn initialize_sole_header<T: Sized>(&mut self, cmsg_level: c_int, cmsg_type: c_int, array: &[T]) { let control_length = { let first_header_mut = self.first_header_mut(); let first_header = first_header_mut.unwrap(); first_header.initialize(cmsg_level, cmsg_type, array); first_header.cmsg_len }; // Sum of the length of all control messages in the buffer. self.msg_controllen = control_length; } #[allow(dead_code)] #[inline(always)] pub(crate) fn message_headers_iterator<'a>(&'a self) -> MessageHeadersIterator<'a> { MessageHeadersIterator { parent: self, next: self.first_header(), } } /// Equivalent to the lib c macro `CMSG_FIRSTHDR()`. #[inline(always)] pub(crate) fn first_header(&self) -> Option<&cmsghdr> { if likely!(self.msg_controllen >= cmsghdr::Size) { debug_assert!(!self.msg_control.is_null(), "msg_control is null but msg_controllen is positive"); Some(unsafe { & * (self.msg_control as *const cmsghdr) }) } else { None } } /// Equivalent to the lib c macro `CMSG_FIRSTHDR()`. #[inline(always)] pub(crate) fn first_header_mut(&mut self) -> Option<&mut cmsghdr> { let there_is_one_or_more_headers = self.msg_controllen >= cmsghdr::Size; if likely!(there_is_one_or_more_headers) { debug_assert!(!self.msg_control.is_null(), "msg_control is null but msg_controllen is positive"); Some(unsafe { &mut * (self.msg_control as *mut cmsghdr) }) } else { None } } #[inline(always)] pub(crate) fn end(&self) -> usize { (self.msg_control as usize) + (self.msg_controllen as usize) } // #[inline(always)] // fn __MHDR_END(&mut self) -> *mut c_uchar // { // ((self.msg_control as usize) + (self.msg_controllen as usize)) as *mut c_char // } // // #[inline(always)] // fn CMSG_FIRSTHDR(&mut self) -> *mut cmsghdr // { // if (self.msg_controllen as usize) >= size_of::<cmsghdr>() // { // self.msg_control as *mut cmsghdr // } // else // { // null_mut() // } // } }
27.497041
403
0.695933
f494a99884b924b2b3a5cdc5b2f0960a3e161d3c
3,402
/// [asciicast] deserializer. /// /// [asciicast]: https://github.com/asciinema/asciinema/tree/develop/doc use std::collections::HashMap; #[derive(serde::Deserialize, Debug)] pub struct Theme { pub fg: String, pub bg: String, pub palette: String, } #[derive(serde::Deserialize, Debug)] pub struct Header { pub version: u64, pub width: usize, pub height: usize, pub timestamp: Option<u128>, pub duration: Option<f64>, pub idle_time_limit: Option<f64>, pub command: Option<String>, pub title: Option<String>, pub env: Option<HashMap<String, String>>, pub theme: Option<Theme>, } #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Copy, Clone)] pub enum EventType { Input, Output, } impl<'de> serde::de::Deserialize<'de> for EventType { fn deserialize<D>(deserializer: D) -> Result<EventType, D::Error> where D: serde::de::Deserializer<'de>, { match <char>::deserialize(deserializer) { Ok('i') => Ok(EventType::Input), Ok('o') => Ok(EventType::Output), Ok(x) => Err(serde::de::Error::invalid_value( serde::de::Unexpected::Char(x), &"an 'i' or 'o'", )), Err(e) => Err(e), } } } #[derive(serde::Deserialize, Debug)] pub struct Event(f64, EventType, String); impl Event { /// Create a new event. #[allow(dead_code)] pub fn new<T>(time: f64, etype: EventType, data: T) -> Event where T: ToString, { Event(time, etype, data.to_string()) } /// Get the time of the event. pub fn time(&self) -> f64 { self.0 } /// Get the event type. #[allow(dead_code)] pub fn event_type(&self) -> EventType { self.1 } /// Get the event data. pub fn event_data(&self) -> &str { self.2.as_str() } } #[cfg(test)] mod test { use super::*; #[test] fn header() { let json_str: &str = r#"{ "version": 2, "width": 80, "height": 24, "timestamp": 1504467315, "title": "Demo", "env": {"TERM": "xterm-256color", "SHELL": "/bin/zsh"} }"#; let header: Header = serde_json::from_str(json_str).unwrap(); assert_eq!(header.version, 2); assert_eq!(header.width, 80); assert_eq!(header.height, 24); assert_eq!(header.timestamp, Some(1504467315)); assert_eq!(header.title, Some("Demo".to_string())); let mut map: HashMap<String, String> = HashMap::new(); map.insert("TERM".into(), "xterm-256color".into()); map.insert("SHELL".into(), "/bin/zsh".into()); assert_eq!(header.env, Some(map)); assert!(header.theme.is_none()); } #[test] fn event() { let json_str: &str = r##"[ 0.248848, "o", "\u001b[1;31mHello \u001b[32mWorld!\u001b[0m\n" ]"##; let event: Event = serde_json::from_str(json_str).unwrap(); let expected: Event = Event::new( 0.248848, EventType::Output, "\u{001b}[1;31mHello \u{001b}[32mWorld!\u{001b}[0m\n", ); assert_eq!(event.event_type(), expected.event_type()); assert_eq!(event.event_data(), expected.event_data()); assert!((event.time() - expected.time()).abs() < 0.0000001); } }
27.216
72
0.547913
7657942cc9fc7c43b7a3275b82140d5e691af1ee
3,821
// Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2 use anyhow::Result; use starcoin_crypto::HashValue; use starcoin_state_api::{ChainState, ChainStateReader}; use starcoin_types::block::BlockIdAndNumber; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use starcoin_types::transaction::BlockTransactionInfo; use starcoin_types::{ block::{Block, BlockHeader, BlockInfo, BlockNumber}, transaction::Transaction, U256, }; use starcoin_vm_types::on_chain_resource::Epoch; use starcoin_vm_types::time::TimeService; use std::collections::HashMap; pub use starcoin_types::block::ExecutedBlock; pub struct VerifiedBlock(pub Block); pub type MintedUncleNumber = u64; pub trait ChainReader { fn info(&self) -> ChainInfo; fn status(&self) -> ChainStatus; /// Get latest block with block_info fn head_block(&self) -> ExecutedBlock; fn current_header(&self) -> BlockHeader; fn get_header(&self, hash: HashValue) -> Result<Option<BlockHeader>>; fn get_header_by_number(&self, number: BlockNumber) -> Result<Option<BlockHeader>>; fn get_block_by_number(&self, number: BlockNumber) -> Result<Option<Block>>; /// Get latest `count` blocks before `number`. if `number` is absent, use head block number. /// the block of `number` is inclusive. fn get_blocks_by_number(&self, number: Option<BlockNumber>, count: u64) -> Result<Vec<Block>>; fn get_block(&self, hash: HashValue) -> Result<Option<Block>>; /// Get block hash by block number, if not exist, return None fn get_hash_by_number(&self, number: BlockNumber) -> Result<Option<HashValue>>; fn get_transaction(&self, hash: HashValue) -> Result<Option<Transaction>>; /// Get transaction info by transaction's hash fn get_transaction_info(&self, txn_hash: HashValue) -> Result<Option<BlockTransactionInfo>>; /// get txn info at version in main chain. fn get_transaction_info_by_version(&self, version: u64) -> Result<Option<BlockTransactionInfo>>; fn chain_state_reader(&self) -> &dyn ChainStateReader; fn get_block_info(&self, block_id: Option<HashValue>) -> Result<Option<BlockInfo>>; fn get_total_difficulty(&self) -> Result<U256>; fn exist_block(&self, block_id: HashValue) -> Result<bool>; fn epoch(&self) -> &Epoch; /// Get block id vec by BlockNumber, `start_number`'s block id is include. fn get_block_ids( &self, start_number: BlockNumber, reverse: bool, max_size: u64, ) -> Result<Vec<HashValue>>; fn get_block_info_by_number(&self, number: BlockNumber) -> Result<Option<BlockInfo>>; fn time_service(&self) -> &dyn TimeService; fn fork(&self, block_id: HashValue) -> Result<Self> where Self: Sized; fn epoch_uncles(&self) -> &HashMap<HashValue, MintedUncleNumber>; /// Find two chain's ancestor fn find_ancestor(&self, another: &dyn ChainReader) -> Result<Option<BlockIdAndNumber>>; /// Verify block header and body, base current chain, but do not verify it execute state. fn verify(&self, block: Block) -> Result<VerifiedBlock>; /// Execute block and verify it execute state, and save result base current chain, but do not change current chain. fn execute(&self, block: VerifiedBlock) -> Result<ExecutedBlock>; } pub trait ChainWriter { fn can_connect(&self, executed_block: &ExecutedBlock) -> bool; /// Connect a executed block to current chain. fn connect(&mut self, executed_block: ExecutedBlock) -> Result<ExecutedBlock>; /// Verify, Execute and Connect block to current chain. fn apply(&mut self, block: Block) -> Result<ExecutedBlock>; fn chain_state(&mut self) -> &dyn ChainState; } /// `Chain` is a trait that defines a single Chain. pub trait Chain: ChainReader + ChainWriter {}
44.430233
119
0.712117
647953da809233cebee3cc80118a59222d33714c
2,893
use crate::ast::Node; use std::collections::BTreeMap; #[derive(Debug)] pub enum ConstantType { ConstValue(String), EnumValue { enum_name: String, variant: String }, } impl std::fmt::Display for ConstantType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::ConstValue(s) => write!(f, "{}", s), Self::EnumValue { enum_name, variant } => write!(f, "{}::{}", enum_name, variant), } } } #[derive(Debug)] pub struct ConstantIndex(pub BTreeMap<String, ConstantType>); impl ConstantIndex { /// Build an index of all consts / enums for use in the union switches. pub(crate) fn new<'a>(ast: &'a Node<'a>) -> ConstantIndex { let mut case_values = BTreeMap::new(); if let Node::Root(r) = ast { for item in r.iter() { match item { Node::Constant(vs) => { // Map constants to themselves, they do not require namespacing. if case_values .insert( vs[0].ident_str().to_string(), ConstantType::ConstValue(vs[1].ident_str().to_string()), ) .is_some() { panic!("duplicate case keys {}", vs[0].ident_str()); } } Node::Enum(e) => { // Enums require namespacing. // // `NFS_OK` defined in a hypothetical `Status` enum must // become `Status::NFS_OK`. for v in e.variants.iter() { if case_values .insert( v.name.as_str().to_string(), ConstantType::EnumValue { enum_name: e.name.to_string(), variant: v.name.as_str().to_string(), }, ) .is_some() { panic!("duplicate case keys {}", v.name.as_str()); } } } _ => continue, } } } ConstantIndex(case_values) } /// Returns the constant value as a string for `name`. pub fn get<T: AsRef<str>>(&self, name: T) -> Option<&ConstantType> { self.0.get(name.as_ref()) } /// Iterates over the types in the constant index. pub fn iter(&self) -> impl std::iter::Iterator<Item = (&String, &ConstantType)> { self.0.iter() } }
36.620253
94
0.414103
f86f9cb4f3e2e34b3d762a9ea09ab27c827c02dd
2,479
use crate::data::Readout; use colored::Colorize; use libmacchina::traits::ReadoutError; fn split_failed_items<'a>( failed_items: &'a [&Readout], ) -> (Vec<&'a Readout<'a>>, Vec<&'a Readout<'a>>) { let err_items: Vec<_> = failed_items .iter() .filter(|p| !matches!(p.1.as_ref().err(), Some(ReadoutError::Warning(_)))) .copied() .collect(); let warn_items: Vec<_> = failed_items .iter() .filter(|p| matches!(p.1.as_ref().err(), Some(ReadoutError::Warning(_)))) .copied() .collect(); (err_items, warn_items) } fn print_errors<'a>(err_items: &[&'a Readout<'a>]) { if err_items.is_empty() { println!("🎉 You are good to go! No failures detected."); } for failed_item in err_items { let key = failed_item.0; let error = failed_item.1.as_ref().err().unwrap().to_string(); println!( "Readout \"{}\" failed with message: {}", key.to_string().bright_blue(), error.bright_red() ); } } fn print_warnings<'a>(warn_items: &[&'a Readout<'a>], total_failed_items: usize) { if warn_items.is_empty() { return; } let warn_len = warn_items.len().to_string().bright_yellow(); let err_len = total_failed_items.to_string().bright_red(); println!( "\n{} of the {} unsuccessful read(s) resulted in a warning:", warn_len, err_len ); for warn_item in warn_items { let key = warn_item.0; let warn = warn_item.1.as_ref().err().unwrap().to_string(); println!( "Readout \"{}\" threw a warning with message: {}", key.to_string().bright_blue(), warn.yellow() ); } } pub(crate) fn print_doctor(data: &[Readout]) { let failed_items: Vec<_> = data.iter().filter(|p| p.1.is_err()).collect(); let (err_items, warn_items) = split_failed_items(&failed_items); println!( "Let's check your system for {}... Here's a summary:\n", "errors".bright_red() ); println!( "We've collected {} {}, including {} {} and {} read(s) which resulted in a {}.", data.len().to_string().bright_green(), "readouts".bright_green(), err_items.len().to_string().bright_red(), "failed read(s)".bright_red(), warn_items.len(), "warning".bright_yellow() ); print_errors(&err_items); print_warnings(&warn_items, failed_items.len()); }
28.825581
88
0.573215
147c223048fc884bf8ce50b4fec296a84fadd1fb
4,005
use anyhow::Context; use std::collections::HashMap; use http::Uri; use tonic::{ metadata::{Ascii, MetadataValue}, transport::{Channel, ClientTlsConfig}, }; use crate::error::Result; use crate::proto; use crate::proto::seabird::seabird_client::SeabirdClient; #[derive(Clone, Debug)] pub struct ClientConfig { pub url: String, pub token: String, } pub type InnerClient = tonic::codegen::InterceptedService<tonic::transport::Channel, AuthHeaderInterceptor>; // Client represents the running bot. #[derive(Debug)] pub struct Client { config: ClientConfig, inner: SeabirdClient<InnerClient>, } #[derive(Debug)] pub struct AuthHeaderInterceptor { auth_header: MetadataValue<Ascii>, } impl tonic::service::Interceptor for AuthHeaderInterceptor { fn call( &mut self, mut req: tonic::Request<()>, ) -> std::result::Result<tonic::Request<()>, tonic::Status> { req.metadata_mut() .insert("authorization", self.auth_header.clone()); Ok(req) } } impl Client { pub async fn new(config: ClientConfig) -> Result<Self> { let uri: Uri = config.url.parse().context("failed to parse seabird URL")?; let mut channel_builder = Channel::builder(uri.clone()); match uri.scheme_str() { None | Some("https") => { channel_builder = channel_builder.tls_config(ClientTlsConfig::new())?; } _ => {} } let channel = channel_builder .connect() .await .context("Failed to connect to seabird")?; let auth_header: MetadataValue<Ascii> = format!("Bearer {}", config.token).parse()?; let seabird_client = SeabirdClient::with_interceptor(channel, AuthHeaderInterceptor { auth_header }); Ok(Client { config, inner: seabird_client, }) } pub async fn perform_private_action( &mut self, user_id: impl Into<String>, text: impl Into<String>, tags: Option<HashMap<String, String>>, ) -> Result<()> { self.inner .perform_private_action(proto::PerformPrivateActionRequest { user_id: user_id.into(), text: text.into(), tags: tags.unwrap_or_else(|| HashMap::new()), }) .await?; Ok(()) } pub async fn perform_action( &mut self, channel_id: impl Into<String>, text: impl Into<String>, tags: Option<HashMap<String, String>>, ) -> Result<()> { self.inner .perform_action(proto::PerformActionRequest { channel_id: channel_id.into(), text: text.into(), tags: tags.unwrap_or_else(|| HashMap::new()), }) .await?; Ok(()) } pub async fn send_message( &mut self, channel_id: impl Into<String>, text: impl Into<String>, tags: Option<HashMap<String, String>>, ) -> Result<()> { self.inner .send_message(proto::SendMessageRequest { channel_id: channel_id.into(), text: text.into(), tags: tags.unwrap_or_else(|| HashMap::new()), }) .await?; Ok(()) } pub async fn send_private_message( &mut self, user_id: impl Into<String>, text: impl Into<String>, tags: Option<HashMap<String, String>>, ) -> Result<()> { self.inner .send_private_message(proto::SendPrivateMessageRequest { user_id: user_id.into(), text: text.into(), tags: tags.unwrap_or_else(|| HashMap::new()), }) .await?; Ok(()) } pub fn inner_ref(&self) -> &'_ SeabirdClient<InnerClient> { &self.inner } pub fn inner_mut_ref(&mut self) -> &'_ mut SeabirdClient<InnerClient> { &mut self.inner } }
27.431507
92
0.558801
f487e552bc3fbaf4cdffc16ef8325b26d4ecdab1
19,085
use crate::model::ParsingContext; use crate::pb::*; use tract_hir::internal::*; use tract_hir::ops; use tract_ndarray::prelude::*; pub fn gru( _ctx: &ParsingContext, pb: &NodeProto, ) -> TractResult<(Box<dyn InferenceOp>, Vec<String>)> { let mut gru = GRU::default(); let mut options = crate::model::optional_inputs(pb).skip(3); gru.optional_bias_input = options.next().unwrap(); gru.optional_sequence_lens_input = options.next().unwrap(); gru.optional_initial_h_input = options.next().unwrap(); let mut options = crate::model::optional_outputs(pb); gru.optional_y_output = options.next().unwrap(); gru.optional_y_h_output = options.next().unwrap(); Ok((Box::new(gru), vec![])) } #[derive(Debug, Clone, new, Hash)] pub struct GRU { pub optional_bias_input: Option<usize>, pub optional_sequence_lens_input: Option<usize>, pub optional_initial_h_input: Option<usize>, pub optional_y_output: Option<usize>, pub optional_y_h_output: Option<usize>, pub f: Box<dyn TypedOp>, pub g: Box<dyn TypedOp>, pub linear_before_reset: bool, } tract_linalg::impl_dyn_hash!(GRU); impl Default for GRU { fn default() -> GRU { GRU { optional_bias_input: None, optional_sequence_lens_input: None, optional_initial_h_input: None, optional_y_output: None, optional_y_h_output: None, f: Box::new(ops::nn::sigmoid()), g: Box::new(ops::math::tanh()), linear_before_reset: false, } } } impl Op for GRU { fn name(&self) -> Cow<str> { "GRU".into() } fn validation(&self) -> Validation { Validation::Rounding } op_onnx!(); not_a_typed_op!(); } impl InferenceRulesOp for GRU { fn rules<'r, 'p: 'r, 's: 'r>( &'s self, s: &mut Solver<'r>, inputs: &'p [TensorProxy], outputs: &'p [TensorProxy], ) -> TractResult<()> { let input_count = 3 + self.optional_bias_input.is_some() as usize + self.optional_sequence_lens_input.is_some() as usize + self.optional_initial_h_input.is_some() as usize; check_input_arity(&inputs, input_count)?; let output_count = self.optional_y_output.is_some() as usize + self.optional_y_h_output.is_some() as usize; check_output_arity(&outputs, output_count)?; s.equals(&inputs[0].datum_type, &inputs[1].datum_type)?; s.equals(&inputs[0].datum_type, &inputs[2].datum_type)?; s.equals(&inputs[0].datum_type, &outputs[0].datum_type)?; s.equals(&inputs[0].rank, 3)?; s.equals(&inputs[1].rank, 3)?; s.equals(&inputs[2].rank, 3)?; s.equals(&inputs[1].shape[0], &inputs[2].shape[0])?; // num_directions s.equals(&inputs[1].shape[1], &inputs[2].shape[1])?; // 4*hidden_size s.equals(&inputs[2].shape[1], 3 * inputs[2].shape[2].bex())?; // hidden_size if let Some(bias) = self.optional_bias_input { s.equals(&inputs[bias].datum_type, &inputs[0].datum_type)?; s.equals(&inputs[bias].rank, 2)?; s.equals(&inputs[bias].shape[0], &inputs[2].shape[0])?; // num_directions s.equals(&inputs[bias].shape[1], 6 * inputs[2].shape[2].bex())?; // 6 * hidden_size } if let Some(seq_len) = self.optional_sequence_lens_input { s.equals(&inputs[seq_len].rank, 1)?; s.equals(&inputs[seq_len].shape[0], &inputs[0].shape[1])?; // batch_size } if let Some(initial_h) = self.optional_initial_h_input { s.equals(&inputs[initial_h].datum_type, &inputs[0].datum_type)?; s.equals(&inputs[initial_h].rank, 3)?; s.equals(&inputs[initial_h].shape[0], &inputs[1].shape[0])?; // num_directions s.equals(&inputs[initial_h].shape[1], &inputs[0].shape[1])?; // batch_size s.equals(&inputs[initial_h].shape[2], &inputs[2].shape[2])?; // hidden_size } if let Some(y) = self.optional_y_output { s.equals(&outputs[y].datum_type, &inputs[0].datum_type)?; s.equals(&outputs[y].rank, 4)?; s.equals(&outputs[y].shape[0], &inputs[0].shape[0])?; // seq_lenght s.equals(&outputs[y].shape[1], &inputs[1].shape[0])?; // num_directions s.equals(&outputs[y].shape[2], &inputs[0].shape[1])?; // batch_size s.equals(&outputs[y].shape[3], &inputs[2].shape[2])?; // hidden_size } if let Some(y_h) = self.optional_y_h_output { s.equals(&outputs[y_h].datum_type, &inputs[0].datum_type)?; s.equals(&outputs[y_h].rank, 3)?; s.equals(&outputs[y_h].shape[0], &inputs[1].shape[0])?; // num_directions s.equals(&outputs[y_h].shape[1], &inputs[0].shape[1])?; // batch_size s.equals(&outputs[y_h].shape[2], &inputs[2].shape[2])?; // hidden_size } Ok(()) } fn nboutputs(&self) -> TractResult<usize> { Ok(self.optional_y_output.is_some() as usize + self.optional_y_h_output.is_some() as usize) } as_op!(); #[allow(non_snake_case)] fn to_typed( &self, _source: &InferenceModel, node: &InferenceNode, target: &mut TypedModel, mapping: &HashMap<OutletId, OutletId>, ) -> TractResult<TVec<OutletId>> { use ops::{array, math, matmul, scan}; let x_fact = target.outlet_fact(mapping[&node.inputs[0]])?.clone(); let r_fact = target.outlet_fact(mapping[&node.inputs[2]])?; let b_size = x_fact.shape.dim(1).to_integer().unwrap() as usize; let h_size = r_fact.shape.dim(2).to_integer().unwrap() as usize; // FIXME: bidi let mut body = TypedModel::default(); let mut outer_inputs = vec![]; let mut input_mapping = vec![]; macro_rules! target_wire { ($name: ident = $op: expr, $($param: expr),*) => { let $name = target.wire_node( format!("{}-{}", node.name, stringify!($name)), $op, [$($param),*].as_ref())?[0]; } }; macro_rules! wire { ($name: ident = $op: expr, $($param: expr),*) => { let $name = body.wire_node( format!("{}-{}", node.name, stringify!($name)), $op, [$($param),*].as_ref())?[0]; } }; // X: onnx interface: [seq_length, batch_size, input_size] // scan outer interface: idem // scann inner interface: [chunk=1, batch_size, input_size] // onnx inner interface: [batch_size, input_size] outer_inputs.push(mapping[&node.inputs[0]]); input_mapping.push(scan::InputMapping::Scan { slot: 0, axis: 0, chunk: 1.to_dim() }); let mut x_source_fact = x_fact.clone(); x_source_fact.shape.set_dim(0, 1.to_dim())?; let x_source = body.add_source("x_source", x_source_fact)?.into(); wire!(Xt = AxisOp::Rm(0), x_source); // W: onnx interface: [num_directions, 3*hidden_size, input_size] // scan interfaces: [3*hidden_size, input_size] target_wire!(w = AxisOp::Rm(0), mapping[&node.inputs[1]]); outer_inputs.push(w); input_mapping.push(scan::InputMapping::Full { slot: 1 }); let W = body.add_source("w", target.outlet_fact(w)?.clone())?.into(); // R: onnx interface: [num_directions, 3*hidden_size, hidden_size] // scan interfaces: [3*hidden_size, hidden_size] target_wire!(r = AxisOp::Rm(0), mapping[&node.inputs[2]]); outer_inputs.push(r); input_mapping.push(scan::InputMapping::Full { slot: 2 }); let R = body.add_source("r", target.outlet_fact(r)?.clone())?.into(); // B: onnx interface: [num_directions, 6*hidden_size] let b = if let Some(slot) = self.optional_bias_input { target_wire!(b = AxisOp::Rm(0), mapping[&node.inputs[slot]]); outer_inputs.push(b); input_mapping.push(scan::InputMapping::Full { slot }); let b = body.add_source("b", target.outlet_fact(b)?.clone())?.into(); Some(b) } else { None }; if let Some(slot) = self.optional_sequence_lens_input { outer_inputs.push(mapping[&node.inputs[slot]]); } // initial h, optional: onnx: [num_directions, batch_size, hidden_size] // scan outer: [chunk=1, batch_size, hidden_size] // scan inner: [chunk=1, batch_size, hidden_size] // onnx inner: [batch_size, hidden_size] let initializer = if let Some(initial_h_input) = self.optional_initial_h_input { target_wire!(h = AxisOp::Rm(0), mapping[&node.inputs[initial_h_input]]); target_wire!(h_chunk = AxisOp::Add(0), h); outer_inputs.push(h_chunk); scan::StateInitializer::FromInput(initial_h_input) } else { scan::StateInitializer::Value( tract_ndarray::Array3::<f32>::zeros((1, b_size, h_size)).into_arc_tensor(), ) }; input_mapping.push(scan::InputMapping::State { initializer }); let h_source = body .add_source( "h_source", TypedFact::dt_shape(x_fact.datum_type, [1, b_size, h_size].as_ref())?, )? .into(); wire!(Ht_1 = AxisOp::Rm(0), h_source); wire!(Rz = array::Slice::new(0, 0 * h_size, 1 * h_size), R); wire!(Rr = array::Slice::new(0, 1 * h_size, 2 * h_size), R); wire!(Rh = array::Slice::new(0, 2 * h_size, 3 * h_size), R); wire!(Wz = array::Slice::new(0, 0 * h_size, 1 * h_size), W); wire!(Wr = array::Slice::new(0, 1 * h_size, 2 * h_size), W); wire!(Wh = array::Slice::new(0, 2 * h_size, 3 * h_size), W); // zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) wire!(Xt_WzT = matmul::MatMul::default().with_b_trans(true), Xt, Wz); wire!(Ht_1_RzT = matmul::MatMul::default().with_b_trans(true), Ht_1, Rz); wire!(zt0 = math::add::bin_typed(), Xt_WzT, Ht_1_RzT); let mut zt0 = zt0; if let Some(b) = b { wire!(Wbz = array::Slice::new(0, 0 * h_size, 1 * h_size), b); wire!(Rbz = array::Slice::new(0, 3 * h_size, 4 * h_size), b); wire!(Wbz_Rbz = math::add::bin_typed(), Wbz, Rbz); wire!(zt0_biased = math::add::bin_typed(), zt0, Wbz_Rbz); zt0 = zt0_biased }; wire!(zt = self.f.clone(), zt0); // rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) wire!(Xt_WrT = matmul::MatMul::default().with_b_trans(true), Xt, Wr); wire!(Ht_1_RrT = matmul::MatMul::default().with_b_trans(true), Ht_1, Rr); wire!(rt0 = math::add::bin_typed(), Xt_WrT, Ht_1_RrT); let mut rt0 = rt0; if let Some(b) = b { wire!(Wbr = array::Slice::new(0, 1 * h_size, 2 * h_size), b); wire!(Rbr = array::Slice::new(0, 4 * h_size, 5 * h_size), b); wire!(Wbr_Rbr = math::add::bin_typed(), Wbr, Rbr); wire!(rt0_biased = math::add::bin_typed(), rt0, Wbr_Rbr); rt0 = rt0_biased }; wire!(rt = self.f.clone(), rt0); // ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0 // ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0 wire!(Xt_WhT = matmul::MatMul::default().with_b_trans(true), Xt, Wh); let rt_Ht_1_RhT = if self.linear_before_reset { wire!(Ht_1_RhT = matmul::MatMul::default().with_b_trans(true), Ht_1, Rh); wire!(rt_Ht_1_RhT = math::mul::bin_typed(), rt, Ht_1_RhT); rt_Ht_1_RhT } else { wire!(rt_Ht_1 = math::mul::bin_typed(), rt, Ht_1); wire!(rt_Ht_1_RhT = matmul::MatMul::default().with_b_trans(true), rt_Ht_1, Rh); rt_Ht_1_RhT }; wire!(ht0 = math::add::bin_typed(), Xt_WhT, rt_Ht_1_RhT); let mut ht0 = ht0; if let Some(b) = b { wire!(Wbh = array::Slice::new(0, 2 * h_size, 3 * h_size), b); wire!(Rbh = array::Slice::new(0, 5 * h_size, 6 * h_size), b); wire!(Wbh_Rbh = math::add::bin_typed(), Wbh, Rbh); wire!(ht0_biased = math::add::bin_typed(), ht0, Wbh_Rbh); ht0 = ht0_biased } wire!(ht = self.g.clone(), ht0); // Ht = (1 - zt) (.) ht + zt (.) Ht-1 let one: OutletId = body.add_const("one", tensor0(1f32))?.into(); wire!(one_sub_zt = math::sub::bin_typed(), one, zt); wire!(one_sub_zt_ht = math::mul::bin_typed(), one_sub_zt, ht); wire!(zt_Ht_1 = math::mul::bin_typed(), zt, Ht_1); wire!(Ht = math::add::bin_typed(), one_sub_zt_ht, zt_Ht_1); wire!(y_h = AxisOp::Add(0), Ht); body.set_output_outlets(&[y_h])?; let output_mapping = scan::OutputMapping { state: true, axis: 0, chunk: 1.to_dim(), full_dim_hint: None, last_value_slot: self.optional_y_h_output, full_slot: self.optional_y_output, }; let scan_outputs = target.wire_node( &*node.name, ops::scan::Scan::new( body, input_mapping, vec![output_mapping], self.optional_sequence_lens_input, )?, &outer_inputs, )?; let mut result = tvec!(); if let Some(slot) = self.optional_y_output { target_wire!(y = AxisOp::Add(0), scan_outputs[slot]); result.push(y); } if let Some(slot) = self.optional_y_h_output { result.push(scan_outputs[slot]); } Ok(result) } } impl StatelessOp for GRU { fn eval(&self, inputs: TVec<Arc<Tensor>>) -> TractResult<TVec<Arc<Tensor>>> { let x: ArrayView3<f32> = inputs[0].to_array_view::<f32>()?.into_dimensionality()?; // [seq_length, batch_size, input_size] let w: ArrayView3<f32> = inputs[1].to_array_view::<f32>()?.into_dimensionality()?; // [num_directions, 3*hidden_size, input_size] let r: ArrayView3<f32> = inputs[2].to_array_view::<f32>()?.into_dimensionality()?; // [num_directions, 3*hidden_size, hidden_size] let bias = if let Some(ix) = self.optional_bias_input { Some(inputs[ix].to_array_view::<f32>()?.into_dimensionality::<Ix2>()?) // [num_directions, 6*hidden_size] } else { None }; let seq_length = x.shape()[0]; let batch_size = x.shape()[1]; let num_directions = w.shape()[0]; let hidden_size = r.shape()[2]; let mut output_y = self .optional_y_output .map(|_| Array4::<f32>::zeros((seq_length, num_directions, batch_size, hidden_size))); let mut output_y_h = self .optional_y_h_output .map(|_| Array3::<f32>::zeros((num_directions, batch_size, hidden_size))); for dir in 0..num_directions { let w = w.index_axis_move(Axis(0), dir); let r = r.index_axis_move(Axis(0), dir); let mut ht = if let Some(ix) = self.optional_initial_h_input { inputs[ix] .to_array_view::<f32>()? .index_axis_move(Axis(0), dir) .to_owned() .into_dimensionality()? } else { Array2::<f32>::zeros((batch_size, hidden_size)).into() }; for ix in 0..seq_length { let ix = if dir == 0 { ix } else { seq_length - 1 - ix }; let x = x.index_axis_move(Axis(0), ix); // Xt*W_zrh^T + Wb_zrh let mut x_zrh = x.dot(&w.t()); // batch_size x 3*hidden_size if let Some(bias) = bias { x_zrh += &bias.slice(s!(dir, 0..3 * hidden_size)); } // Ht-1*R_zr let h_zr = ht.dot(&r.slice_axis(Axis(0), (0..2 * hidden_size).into()).t()); // batch_size x 3*hidden_size let x_zrh: Array3<f32> = x_zrh.into_shape((batch_size, 3, hidden_size))?; let h_zrh = h_zr.into_shape((batch_size, 2, hidden_size))?; let mut zt = x_zrh.index_axis(Axis(1), 0).to_owned() + h_zrh.index_axis(Axis(1), 0); if let Some(bias) = bias { zt += &bias.slice(s!(dir, 3 * hidden_size..4 * hidden_size)); } let zt: Array2<f32> = self .f .as_stateless() .unwrap() .eval(tvec!(zt.into_arc_tensor()))? .remove(0) .into_tensor() .into_array::<f32>()? .into_dimensionality()?; let mut rt = x_zrh.index_axis(Axis(1), 1).to_owned() + h_zrh.index_axis(Axis(1), 1); if let Some(bias) = bias { rt += &bias.slice(s!(dir, 4 * hidden_size..5 * hidden_size)); } let rt = self .f .as_stateless() .unwrap() .eval(tvec!(rt.into_arc_tensor()))? .remove(0) .into_tensor() .into_array::<f32>()?; let ht1: Array2<f32> = if self.linear_before_reset { let mut ht = ht.dot(&r.slice_axis(Axis(1), (2 * hidden_size..).into()).t()); if let Some(bias) = bias { ht += &bias.slice(s!(dir, 5 * hidden_size..6 * hidden_size)); } ht * rt + x_zrh.index_axis(Axis(1), 2) } else { let mut ht = ht.dot(&r.slice_axis(Axis(0), (2 * hidden_size..).into()).t()) * rt; if let Some(bias) = bias { ht += &bias.slice(s!(dir, 5 * hidden_size..6 * hidden_size)); } ht + x_zrh.index_axis(Axis(1), 2) }; let ht1 = self .g .as_stateless() .unwrap() .eval(tvec!(ht1.into_arc_tensor()))? .remove(0) .into_tensor() .into_array::<f32>()?; ht = (1.0 - &zt) * ht1 + ht * &zt; if let Some(ref mut o) = output_y { o.index_axis_mut(Axis(0), ix).index_axis_move(Axis(0), dir).assign(&ht); } } if let Some(ref mut o) = output_y_h { o.index_axis_mut(Axis(0), dir).assign(&ht); } } let mut outputs = tvec!(); outputs.extend(output_y.into_iter().map(|t| t.into_arc_tensor())); outputs.extend(output_y_h.into_iter().map(|t| t.into_arc_tensor())); Ok(outputs) } }
41.670306
138
0.534084
900a50c4ef0667d3fec59616a6f2c9be60b8335e
12,500
//! Measure dynamic memory usage of your types! //! //! ## About //! //! Memory-tracking is a common activity in large applications, particularly ones //! that receive data from a network and store it in memory. By monitoring how much //! memory is used by different areas of the application, memory pressure can be //! alleviated by ignoring new packets, or implementing random drop logic for DoS //! mitigation. //! //! Measuring memory use on the stack is easy, with [`std::mem::size_of`] and //! friends. Measuring memory allocated on the heap is more tricky. Applications can //! use a custom global allocator to track the memory usage of different areas. This //! isn't an option for reusable library code however, and the nearest alternative //! (using custom allocators for individual types) is currently only an experimental //! feature in nightly Rust ([`allocator_api`]). //! //! [`allocator_api`]: https://github.com/rust-lang/rust/issues/32838 //! //! This crate takes a different approach: it provides traits that library authors //! can use to expose dynamic memory usage information on their types. By composing //! these implementations, we gain the ability to query the amount of heap-allocated //! memory in use by specific instances of types at any point in time, without any //! changes to the way in which these types are constructed. //! //! ## Minimum Supported Rust Version //! //! Requires Rust **1.51** or newer. //! //! In the future, we reserve the right to change MSRV (i.e. MSRV is out-of-scope for this //! crate's SemVer guarantees), however when we do it will be accompanied by a minor //! version bump. //! //! ## Usage //! //! ``` //! # use std::collections::HashMap; //! use memuse::DynamicUsage; //! //! // Simple types don't allocate memory on the heap. //! assert_eq!(7u64.dynamic_usage(), 0); //! assert_eq!("I'm simple!".dynamic_usage(), 0); //! //! // When a type allocates memory, we can see it! //! assert_eq!(vec![7u64; 2].dynamic_usage(), 16); //! //! // We see the memory the type has allocated, even if it isn't being used. //! let empty: Vec<u32> = Vec::with_capacity(100); //! assert_eq!(empty.len(), 0); //! assert_eq!(empty.dynamic_usage(), 400); //! //! // For some types, we can't measure the exact memory usage, so we return a best //! // estimate. If you need precision, call `dynamic_usage_bounds` which returns a //! // lower bound, and (if known) an upper bound. //! let map: HashMap<u8, u64> = HashMap::with_capacity(27); //! let (lower, upper): (usize, Option<usize>) = map.dynamic_usage_bounds(); //! assert!(upper.is_none()); //! ``` #![forbid(unsafe_code)] // Catch documentation errors caused by code changes. #![deny(broken_intra_doc_links)] use core::mem; use std::collections::{BinaryHeap, LinkedList, VecDeque}; /// Trait for measuring the dynamic memory usage of types. pub trait DynamicUsage { /// Returns a best estimate of the amount of heap-allocated memory used by this type. /// /// For most types, this will return an exact value. However, for types that use a /// complex allocation strategy (such as a `HashMap`), `memuse` cannot provide an /// exact heap allocation value, as it does not have access to the internal details /// and can only infer allocations from observable properties (such as the number of /// elements in a collection, or constants extracted from the implementation of the /// type). In those cases, this method returns a "best estimate" inferred from the /// implemented behaviour of the type. As more crates implement this trait themselves, /// the estimates will become more precise. /// /// The value returned by this method will always fall between the bounds returned by /// [`DynamicUsage::dynamic_usage_bounds`]: /// /// ``` /// use std::collections::HashMap; /// use memuse::DynamicUsage; /// /// let a: HashMap<u8, u64> = HashMap::with_capacity(27); /// let usage = a.dynamic_usage(); /// let (lower, upper) = a.dynamic_usage_bounds(); /// /// assert!(lower <= usage); /// if let Some(upper) = upper { /// assert!(usage <= upper); /// } /// ``` fn dynamic_usage(&self) -> usize; /// Returns the lower and upper bounds on the amount of heap-allocated memory used by /// this type. /// /// The lower bound is always precise; a type cannot allocate fewer than zero bytes, /// and a collection cannot allocate fewer than the number of bytes required to store /// the entries it holds. /// /// The upper bound is only present if some property of the type ensures that its /// allocations do not exceed the bound, and is `None` otherwise (to indicate an /// unlimited upper bound). /// /// If the type's allocated memory is precisely known, then the lower and upper bounds /// will be equal. fn dynamic_usage_bounds(&self) -> (usize, Option<usize>); } // // Helper macros // macro_rules! impl_no_dynamic_usage { ($($type:ty),+) => { $( impl DynamicUsage for $type { #[inline(always)] fn dynamic_usage(&self) -> usize { 0 } #[inline(always)] fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { (0, Some(0)) } } )+ }; } macro_rules! impl_iterable_dynamic_usage { ($type:ty, $base_usage:expr) => { impl<T: DynamicUsage> DynamicUsage for $type { fn dynamic_usage(&self) -> usize { $base_usage(self) + self.iter().map(DynamicUsage::dynamic_usage).sum::<usize>() } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { let base = $base_usage(self); let (lower, upper) = self.iter().map(DynamicUsage::dynamic_usage_bounds).fold( (0, Some(0)), |(acc_lower, acc_upper), (lower, upper)| { (acc_lower + lower, acc_upper.zip(upper).map(|(a, b)| a + b)) }, ); (base + lower, upper.map(|u| base + u)) } } }; } // // Primitives // impl_no_dynamic_usage!(()); impl_no_dynamic_usage!(i8, i16, i32, i64, i128, isize); impl_no_dynamic_usage!(u8, u16, u32, u64, u128, usize); impl_no_dynamic_usage!(f32, f64, bool); impl_no_dynamic_usage!(char, str); // Tuples are handled below (so they render more nicely in docs) impl<T: DynamicUsage, const N: usize> DynamicUsage for [T; N] { fn dynamic_usage(&self) -> usize { self.iter().map(DynamicUsage::dynamic_usage).sum::<usize>() } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { self.iter().map(DynamicUsage::dynamic_usage_bounds).fold( (0, Some(0)), |(acc_lower, acc_upper), (lower, upper)| { (acc_lower + lower, acc_upper.zip(upper).map(|(a, b)| a + b)) }, ) } } impl_iterable_dynamic_usage!([T], |_| 0); // // Structs // impl DynamicUsage for String { fn dynamic_usage(&self) -> usize { self.capacity() } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { let usage = self.capacity(); (usage, Some(usage)) } } // // Containers // impl<T: DynamicUsage> DynamicUsage for Box<T> { fn dynamic_usage(&self) -> usize { mem::size_of::<T>() + self.as_ref().dynamic_usage() } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { let box_size = mem::size_of::<T>(); let (inner_lower, inner_upper) = self.as_ref().dynamic_usage_bounds(); (box_size + inner_lower, inner_upper.map(|u| box_size + u)) } } impl<T: DynamicUsage> DynamicUsage for Option<T> { fn dynamic_usage(&self) -> usize { self.as_ref().map(DynamicUsage::dynamic_usage).unwrap_or(0) } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { self.as_ref() .map(DynamicUsage::dynamic_usage_bounds) .unwrap_or((0, Some(0))) } } impl<T: DynamicUsage, E: DynamicUsage> DynamicUsage for Result<T, E> { fn dynamic_usage(&self) -> usize { match self { Ok(t) => t.dynamic_usage(), Err(e) => e.dynamic_usage(), } } fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) { match self { Ok(t) => t.dynamic_usage_bounds(), Err(e) => e.dynamic_usage_bounds(), } } } // // Collections // impl_iterable_dynamic_usage!(Vec<T>, |c: &Vec<T>| c.capacity() * mem::size_of::<T>()); impl_iterable_dynamic_usage!(BinaryHeap<T>, |c: &BinaryHeap<T>| { // BinaryHeap<T> is a wrapper around Vec<T> c.capacity() * mem::size_of::<T>() }); impl_iterable_dynamic_usage!(LinkedList<T>, |c: &LinkedList<T>| { c.len() * mem::size_of::<T>() }); impl_iterable_dynamic_usage!(VecDeque<T>, |c: &VecDeque<T>| { // +1 since the ringbuffer always leaves one space empty. (c.capacity() + 1) * mem::size_of::<T>() }); #[cfg(feature = "nonempty")] impl_iterable_dynamic_usage!(nonempty::NonEmpty<T>, |c: &nonempty::NonEmpty<T>| { // NonEmpty<T> stores its head element separately from its tail Vec<T>. (c.capacity() - 1) * mem::size_of::<T>() }); // // Larger definitions (placed at the end so they render more nicely in docs). // mod hash; mod tuple; #[cfg(test)] mod tests { use super::*; #[test] fn standard_types() { assert_eq!(129u8.dynamic_usage(), 0); assert_eq!(3i128.dynamic_usage(), 0); assert_eq!(7.0f32.dynamic_usage(), 0); assert_eq!("foobar".dynamic_usage(), 0); assert_eq!(129u8.dynamic_usage_bounds(), (0, Some(0))); assert_eq!(3i128.dynamic_usage_bounds(), (0, Some(0))); assert_eq!(7.0f32.dynamic_usage_bounds(), (0, Some(0))); assert_eq!("foobar".dynamic_usage_bounds(), (0, Some(0))); } #[test] fn string() { assert_eq!(String::new().dynamic_usage(), 0); assert_eq!("foobar".to_string().dynamic_usage(), 6); assert_eq!(String::new().dynamic_usage_bounds(), (0, Some(0))); assert_eq!("foobar".to_string().dynamic_usage_bounds(), (6, Some(6))); } #[test] fn boxed() { let a: u64 = 7; assert_eq!(a.dynamic_usage(), 0); assert_eq!(a.dynamic_usage_bounds(), (0, Some(0))); let b: Box<u64> = Box::new(42); assert_eq!(b.dynamic_usage(), 8); assert_eq!(b.dynamic_usage_bounds(), (8, Some(8))); let capacity = 7; let c: Box<Vec<u16>> = Box::new(Vec::with_capacity(capacity)); let expected = mem::size_of::<Vec<u16>>() + capacity * mem::size_of::<u16>(); assert_eq!(c.dynamic_usage(), expected); assert_eq!(c.dynamic_usage_bounds(), (expected, Some(expected))); } #[test] fn option() { let a: Option<Vec<u8>> = None; let b: Option<Vec<u8>> = Some(vec![7u8; 4]); assert_eq!(a.dynamic_usage(), 0); assert_eq!(a.dynamic_usage_bounds(), (0, Some(0))); assert_eq!(b.dynamic_usage(), 4); assert_eq!(b.dynamic_usage_bounds(), (4, Some(4))); } #[test] fn array() { let a = [7; 42]; assert_eq!(a.dynamic_usage(), 0); assert_eq!(a.dynamic_usage_bounds(), (0, Some(0))); let mut b = [None, None, None, None]; assert_eq!(b.dynamic_usage(), 0); assert_eq!(b.dynamic_usage_bounds(), (0, Some(0))); b[0] = Some(vec![4u8; 20]); assert_eq!(b.dynamic_usage(), 20); assert_eq!(b.dynamic_usage_bounds(), (20, Some(20))); } #[test] fn vec() { let capacity = 7; let mut a = Vec::with_capacity(capacity); a.push(42u64); let expected = capacity * mem::size_of::<u64>(); assert_eq!(a.dynamic_usage(), expected); assert_eq!(a.dynamic_usage_bounds(), (expected, Some(expected))); } #[cfg(feature = "nonempty")] #[test] fn nonempty() { let a = nonempty::NonEmpty::new(42); assert_eq!(a.dynamic_usage(), 0); assert_eq!(a.dynamic_usage_bounds(), (0, Some(0))); const CAPACITY: usize = 7; let b = nonempty::NonEmpty::from_slice(&[27u128; CAPACITY]).unwrap(); let expected = (CAPACITY - 1) * mem::size_of::<u128>(); assert_eq!(b.dynamic_usage(), expected); assert_eq!(b.dynamic_usage_bounds(), (expected, Some(expected))); } }
33.602151
95
0.60856
3869835e4fcc729a017c2d7f0d9690425d0b63c8
4,016
//! [Self adaptive instructions section](http://www.se.rit.edu/~swen-344/projects/selfadaptive/selfadaptive.html) use crate::{ adaptive::{get_load, get_num_servers_up, should_serve_adds_bf, Load, NumServers}, error::{err_to_rejection, Error}, state::{HttpsClient, State}, util, }; use chrono::Utc; use db::adaptive_health::{HealthRecord, NewHealthRecord}; use futures::future::Future; use log::info; use pool::PooledConn; use warp::{path, Filter, Rejection, Reply}; /// Api for serving the advertisement. /// /// # Arguments /// * state - State object reference required for accessing db connections, auth keys, /// and other stateful constructs. pub fn ad_api(state: &State) -> impl Filter<Extract = (impl Reply,), Error = Rejection> + Clone { info!("Attaching Ad Api"); let root = state.server_lib_root(); let ad_path = root.join("static/ad/rit_ad.png"); path("advertisement") .and(warp::get2()) .and(state.https_client()) .and_then(|client: HttpsClient| { // Get the stats asynchronously as a precondition to serving the request. let servers = get_num_servers_up(&client).map_err(Error::reject); let load = get_load(&client).map_err(Error::reject); servers.join(load) }) .untuple_one() // converts `(NumServers, Load)` to `NumServers, Load` .and(state.db()) .map(determine_and_record_ad_serving) .and_then(err_to_rejection) .untuple_one() // converts `()` to `` .and(warp::fs::file(ad_path)) // ad_path is immutable after startup, so restrictions related to `and_then` can be worked around by just using `and` } /// Api for accessing health information related to serving the advertisement. /// /// # Arguments /// * state - State object reference required for accessing db connections, auth keys, /// and other stateful constructs. pub fn health_api( state: &State, ) -> impl Filter<Extract = (impl Reply,), Error = Rejection> + Clone { info!("Attaching Health Api"); let all_health = warp::get2().and(state.db()).and_then(|conn: PooledConn| { HealthRecord::get_all(&conn) .map_err(Error::from_reject) .map(util::json) }); // requirements only ask for one week, so this isn't getting parameterized. let last_week_health = warp::get2() .and(path("week")) .and(state.db()) .and_then(|conn: PooledConn| { HealthRecord::get_last_7_days(&conn) .map_err(Error::from_reject) .map(util::json) }); path("health").and(all_health.or(last_week_health)) } /// Determines if the add should be served and records the result. /// /// # Arguments /// * available_servers - The number of servers that are available. /// * load - The "load" currently on those servers. /// * conn - The connection to the database. /// /// # Note /// It returns Ok(()) if the add should be served, and throws an 500 internal server error if it can't be sent. fn determine_and_record_ad_serving( available_servers: NumServers, load: Load, conn: PooledConn, ) -> Result<(), Error> { let should_send_advertisement = should_serve_adds_bf(load, available_servers); info!( "Add serving, load: {}, available_servers: {}, serving: {}", load.0, available_servers.0, should_send_advertisement ); let new_health_record = NewHealthRecord { available_servers: available_servers.0 as i32, load: load.0 as i32, did_serve: should_send_advertisement, time_recorded: Utc::now().naive_utc(), }; HealthRecord::create(new_health_record, &conn).map_err(Error::from)?; if should_send_advertisement { Ok(()) } else { Err(Error::internal_server_error(format!(r##"The server load was determined to be too high, and therefore the "advertisement" was not sent. load: {}, servers: {}"##, load.0, available_servers.0))) } }
37.185185
204
0.651394
f5adf7a081fd751947376c81f7d6051214a8bcb6
4,158
use super::{resolve_engine_binary, Engine, EngineError}; use std::{io::Write, path::{Path, PathBuf}, process::Command, result::Result}; use tempfile::TempPath; /// Contents of the signature policy file used by Buildah (normally /// present at /etc/containers/policy.json.) /// /// Our policy will be to default to accepting everything (which is /// also the default given by RPM installations of buildah). /// /// See https://www.mankier.com/5/containers-policy.json for further /// information. const SIGNATURE_POLICY: &str = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/defaults/containers-policy.json")); #[derive(Debug)] pub(super) struct BuildahEngine { binary: PathBuf, /// Path to a signature policy file that we control, not /// `/etc/containers/policy.json`. /// /// The file will be removed when this struct is dropped. policy: TempPath, } #[derive(Debug, Fail)] enum BuildahError { #[fail(display = "Could not create signature policy file for Buildah: {}", _0)] SignaturePolicyError(std::io::Error), } impl From<BuildahError> for EngineError { fn from(b: BuildahError) -> EngineError { EngineError::EngineSpecificError(b.into()) } } impl BuildahEngine { pub fn new() -> Result<Self, EngineError> { let binary = resolve_engine_binary("buildah")?; let policy = Self::signature_policy()?; Ok(BuildahEngine { binary, policy }) } /// Write out a permissive default signature policy to a temporary /// file, and return the path to that file. /// /// The file will be removed when that `TempPath` is dropped. fn signature_policy() -> Result<TempPath, BuildahError> { let mut policy = tempfile::NamedTempFile::new().map_err(BuildahError::SignaturePolicyError)?; policy.write_all(SIGNATURE_POLICY.as_bytes()) .map_err(BuildahError::SignaturePolicyError)?; Ok(policy.into_temp_path()) } } impl Engine for BuildahEngine { /// `buildah images -q mycompany/coolapp` fn image_id_command(&self, image_reference: &str) -> Command { let mut cmd = Command::new(&self.binary); cmd.args(&["images", "-q", image_reference]); cmd } /// `buildah rmi mycompany/coolapp` fn image_removal_command(&self, image_reference: &str) -> Command { let mut cmd = Command::new(&self.binary); cmd.args(&["rmi", image_reference]); cmd } /// `buildah push --authfile=/path/to/local/config.json push mycompany/mycoolapp` fn image_push_command(&self, image_reference: &str, config_dir: &Path) -> Command { let mut cmd = Command::new(&self.binary); cmd.args(&["push", "--authfile", &config_dir.join("config.json").to_string_lossy(), image_reference]); cmd } fn build_command(&self, build_context: &Path, tags: &[String], memory: Option<&str>) -> Command { let mut cmd = Command::new(&self.binary); cmd.current_dir(build_context); cmd.arg("build-using-dockerfile") .arg("--layers") .arg("--force-rm"); // Need this (Buildah's default format is OCI) because // apparently DockerHub has problems with OCI images. // // https://github.com/docker/hub-feedback/issues/1871 // // (This is only really a problem when *pushing* images, but // since DockerHub is the 800 lb gorilla, we'll defer to it // for now.) cmd.args(&["--format", "docker"]); // Have to override the policy file location because we don't // control /etc/containers/policy.json cmd.arg("--signature-policy"); cmd.arg(&self.policy); if let Some(mem) = memory { cmd.arg("--memory").arg(mem); } for tag in tags { cmd.arg("--tag").arg(&tag); } cmd.arg("."); cmd } }
33.264
90
0.589947
e2c2918c1fe59917082ad8acb4a6a2db4f9a656f
12,765
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::model::resolver::{Resolver, ResolverError, ResolverFut}, anyhow::format_err, cm_fidl_validator, fidl::endpoints::ClientEnd, fidl_fuchsia_io::{self as fio, DirectoryMarker}, fidl_fuchsia_sys::LoaderProxy, fidl_fuchsia_sys2 as fsys, fuchsia_url::pkg_url::PkgUrl, std::path::Path, }; #[allow(unused)] pub static SCHEME: &str = "fuchsia-pkg"; /// Resolves component URLs with the "fuchsia-pkg" scheme by proxying to an existing /// fuchsia.sys.Loader service (which is the CFv1 equivalent of fuchsia.sys2.ComponentResolver). /// /// This resolver implementation is used to bridge the v1 and v2 component runtime worlds in /// situations where the v2 runtime runs under the v1 runtime. /// /// See the fuchsia_pkg_url crate for URL syntax. pub struct FuchsiaPkgResolver { loader: LoaderProxy, } impl FuchsiaPkgResolver { pub fn new(loader: LoaderProxy) -> FuchsiaPkgResolver { FuchsiaPkgResolver { loader } } async fn resolve_async<'a>( &'a self, component_url: &'a str, ) -> Result<fsys::Component, ResolverError> { // Parse URL. let fuchsia_pkg_url = PkgUrl::parse(component_url) .map_err(|e| ResolverError::url_parse_error(component_url, e))?; let cm_path = Path::new( fuchsia_pkg_url .resource() .ok_or(ResolverError::url_missing_resource_error(component_url))?, ); let package_url = fuchsia_pkg_url.root_url().to_string(); // Resolve package. let package = self .loader .load_url(&package_url) .await .map_err(|e| ResolverError::component_not_available(component_url, e))? .ok_or(ResolverError::component_not_available( component_url, format_err!("package not available"), ))?; let dir = package.directory.ok_or(ResolverError::component_not_available( component_url, format_err!("package is missing directory handle"), ))?; // Read component manifest from package. let dir = ClientEnd::<DirectoryMarker>::new(dir) .into_proxy() .expect("failed to create directory proxy"); let file = io_util::open_file(&dir, cm_path, fio::OPEN_RIGHT_READABLE) .map_err(|e| ResolverError::manifest_not_available(component_url, e))?; let component_decl = io_util::read_file_fidl(&file).await.map_err(|e| { match e.downcast_ref::<io_util::file::ReadError>() { Some(_) => ResolverError::manifest_not_available(component_url, e), None => ResolverError::manifest_invalid(component_url, e), } })?; // Validate the component manifest cm_fidl_validator::validate(&component_decl) .map_err(|e| ResolverError::manifest_invalid(component_url, e))?; let package_dir = ClientEnd::new( dir.into_channel().expect("could not convert proxy to channel").into_zx_channel(), ); let package = fsys::Package { package_url: Some(package_url), package_dir: Some(package_dir) }; Ok(fsys::Component { resolved_url: Some(component_url.to_string()), decl: Some(component_decl), package: Some(package), }) } } impl Resolver for FuchsiaPkgResolver { fn resolve<'a>(&'a self, component_url: &'a str) -> ResolverFut { Box::pin(self.resolve_async(component_url)) } } #[cfg(test)] mod tests { use { super::*, fidl::encoding::encode_persistent, fidl::endpoints::{self, ServerEnd}, fidl_fuchsia_data as fdata, fidl_fuchsia_sys::{LoaderMarker, LoaderRequest, Package}, fuchsia_async as fasync, fuchsia_zircon as zx, futures::TryStreamExt, std::path::Path, vfs::{ self, directory::entry::DirectoryEntry, execution_scope::ExecutionScope, file::pcb::asynchronous::read_only_static, pseudo_directory, }, }; struct MockLoader {} impl MockLoader { fn start() -> LoaderProxy { let (proxy, server): (_, ServerEnd<LoaderMarker>) = endpoints::create_proxy().unwrap(); fasync::Task::local(async move { let loader = MockLoader {}; let mut stream = server.into_stream().unwrap(); while let Some(LoaderRequest::LoadUrl { url, responder }) = stream.try_next().await.expect("failed to read request") { let mut package = loader.load_url(&url); let package = package.as_mut(); responder.send(package).expect("responder failed"); } }) .detach(); proxy } // TODO(fxb/37534): This can be simplified to no longer need to use the test's real package // directory once Rust vfs supports OPEN_RIGHT_EXECUTABLE. fn load_url(&self, package_url: &str) -> Option<Package> { let (dir_c, dir_s) = zx::Channel::create().unwrap(); let parsed_url = PkgUrl::parse(&package_url).expect("bad url"); // Simulate a package server that only contains the "hello-world" package. match parsed_url.name() { "hello-world" => { let path = Path::new("/pkg"); io_util::connect_in_namespace( path.to_str().unwrap(), dir_s, fio::OPEN_RIGHT_READABLE | fio::OPEN_RIGHT_EXECUTABLE, ) .expect("could not connect to /pkg"); return Some(Package { data: None, directory: Some(dir_c), resolved_url: package_url.to_string(), }); } "invalid-cm" => { // Provide a cm that will fail due to multiple runners being configured. let sub_dir = pseudo_directory! { "meta" => pseudo_directory! { "invalid.cm" => read_only_static( encode_persistent(&mut fsys::ComponentDecl { program: None, uses: Some(vec![ fsys::UseDecl::Runner( fsys::UseRunnerDecl { source_name: Some("elf".to_string()), } ), fsys::UseDecl::Runner ( fsys::UseRunnerDecl { source_name: Some("web".to_string()) } ) ]), exposes: None, offers: None, capabilities: None, children: None, collections: None, environments: None, facets: None }).unwrap() ), } }; sub_dir.open( ExecutionScope::from_executor(Box::new(fasync::EHandle::local())), fio::OPEN_RIGHT_READABLE, fio::MODE_TYPE_DIRECTORY, vfs::path::Path::empty(), ServerEnd::new(dir_s), ); return Some(Package { data: None, directory: Some(dir_c), resolved_url: package_url.to_string(), }); } _ => return None, } } } #[fuchsia_async::run_singlethreaded(test)] async fn resolve_test() { let loader = MockLoader::start(); let resolver = FuchsiaPkgResolver::new(loader); let url = "fuchsia-pkg://fuchsia.com/hello-world#meta/hello-world.cm"; let component = resolver.resolve_async(url).await.expect("resolve failed"); // Check that both the returned component manifest and the component manifest in // the returned package dir match the expected value. This also tests that // the resolver returned the right package dir. let fsys::Component { resolved_url, decl, package } = component; assert_eq!(resolved_url.unwrap(), url); let program = fdata::Dictionary { entries: Some(vec![fdata::DictionaryEntry { key: "binary".to_string(), value: Some(Box::new(fdata::DictionaryValue::Str("bin/hello_world".to_string()))), }]), }; let expected_decl = fsys::ComponentDecl { program: Some(program), uses: Some(vec![ fsys::UseDecl::Runner(fsys::UseRunnerDecl { source_name: Some("elf".to_string()) }), fsys::UseDecl::Protocol(fsys::UseProtocolDecl { source: Some(fsys::Ref::Parent(fsys::ParentRef {})), source_path: Some("/svc/fuchsia.logger.LogSink".to_string()), target_path: Some("/svc/fuchsia.logger.LogSink".to_string()), }), ]), exposes: None, offers: None, facets: None, capabilities: None, children: None, collections: None, environments: None, }; assert_eq!(decl.unwrap(), expected_decl); let fsys::Package { package_url, package_dir } = package.unwrap(); assert_eq!(package_url.unwrap(), "fuchsia-pkg://fuchsia.com/hello-world"); let dir_proxy = package_dir.unwrap().into_proxy().unwrap(); let path = Path::new("meta/hello-world.cm"); let file_proxy = io_util::open_file(&dir_proxy, path, fio::OPEN_RIGHT_READABLE) .expect("could not open cm"); assert_eq!( io_util::read_file_fidl::<fsys::ComponentDecl>(&file_proxy) .await .expect("could not read cm"), expected_decl ); // Try to load an executable file, like a binary, reusing the library_loader helper that // opens with OPEN_RIGHT_EXECUTABLE and gets a VMO with VMO_FLAG_EXEC. library_loader::load_vmo(&dir_proxy, "bin/hello_world") .await .expect("failed to open executable file"); } macro_rules! test_resolve_error { ($resolver:ident, $url:expr, $resolver_error_expected:ident) => { let url = $url; let res = $resolver.resolve_async(url).await; match res.err().expect("unexpected success") { ResolverError::$resolver_error_expected { url: u, .. } => { assert_eq!(u, url); } e => panic!("unexpected error {:?}", e), } }; } #[fuchsia_async::run_singlethreaded(test)] async fn resolve_errors_test() { let loader = MockLoader::start(); let resolver = FuchsiaPkgResolver::new(loader); test_resolve_error!( resolver, "fuchsia-pkg:///hello-world#meta/hello-world.cm", UrlParseError ); test_resolve_error!( resolver, "fuchsia-pkg://fuchsia.com/hello-world", UrlMissingResourceError ); test_resolve_error!( resolver, "fuchsia-pkg://fuchsia.com/goodbye-world#meta/hello-world.cm", ComponentNotAvailable ); test_resolve_error!( resolver, "fuchsia-pkg://fuchsia.com/hello-world#meta/does_not_exist.cm", ManifestNotAvailable ); test_resolve_error!( resolver, "fuchsia-pkg://fuchsia.com/hello-world#meta/component_manager_tests_invalid.cm", ManifestInvalid ); test_resolve_error!( resolver, "fuchsia-pkg://fuchsia.com/invalid-cm#meta/invalid.cm", ManifestInvalid ); } }
40.52381
100
0.528085
23720af6d74294ee874030fbc93d1e6a059ab0c5
1,448
/// Not intended for public use. #[macro_export] macro_rules! fold_only_key { () => { fn fold_class_member(&mut self, m: ClassMember) -> ClassMember { match m { ClassMember::Method(m) => ClassMember::Method(ClassMethod { key: m.key.fold_with(self), ..m }), ClassMember::PrivateMethod(m) => ClassMember::PrivateMethod(PrivateMethod { key: m.key.fold_with(self), ..m }), ClassMember::ClassProp(p) => ClassMember::ClassProp(ClassProp { key: p.key.fold_with(self), ..p }), ClassMember::PrivateProp(p) => ClassMember::PrivateProp(PrivateProp { key: p.key.fold_with(self), ..p }), _ => m, } } }; } #[macro_export] macro_rules! visit_mut_only_key { () => { fn visit_mut_class_member(&mut self, m: &mut ClassMember) { match m { ClassMember::Method(m) => m.key.visit_mut_with(self), ClassMember::PrivateMethod(m) => m.key.visit_mut_with(self), ClassMember::ClassProp(m) => m.key.visit_mut_with(self), ClassMember::PrivateProp(m) => m.key.visit_mut_with(self), _ => {} } } }; }
34.47619
91
0.471685
ac12f60c96deabac66941b5df0da17c0a633bf5b
115
extern crate cc; fn main() { cc::Build::new() .file("src/double.c") .compile("libdouble.a"); }
16.428571
32
0.521739
014bb7bdc2092eb5fa479a7c7cb5ef5aa424ce71
4,558
// Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Explicit endian types useful for embedding in structs or reinterpreting data. //! //! Each endian type is guarnteed to have the same size and alignment as a regular unsigned primiive //! of the equal size. //! //! # Examples //! //! ``` //! # use data_model::*; //! let b: Be32 = From::from(3); //! let l: Le32 = From::from(3); //! //! assert_eq!(b.to_native(), 3); //! assert_eq!(l.to_native(), 3); //! assert!(b == 3); //! assert!(l == 3); //! //! let b_trans: u32 = unsafe { std::mem::transmute(b) }; //! let l_trans: u32 = unsafe { std::mem::transmute(l) }; //! //! #[cfg(target_endian = "little")] //! assert_eq!(l_trans, 3); //! #[cfg(target_endian = "big")] //! assert_eq!(b_trans, 3); //! //! assert_ne!(b_trans, l_trans); //! ``` use assertions::const_assert; use std::mem::{align_of, size_of}; use crate::DataInit; macro_rules! endian_type { ($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => { /// An unsigned integer type of with an explicit endianness. /// /// See module level documentation for examples. #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)] pub struct $new_type($old_type); impl $new_type { fn _assert() { const_assert!(align_of::<$new_type>() == align_of::<$old_type>()); const_assert!(size_of::<$new_type>() == size_of::<$old_type>()); } /// Converts `self` to the native endianness. pub fn to_native(self) -> $old_type { $old_type::$from_new(self.0) } } unsafe impl DataInit for $new_type {} impl PartialEq<$old_type> for $new_type { fn eq(&self, other: &$old_type) -> bool { self.0 == $old_type::$to_new(*other) } } impl PartialEq<$new_type> for $old_type { fn eq(&self, other: &$new_type) -> bool { $old_type::$to_new(other.0) == *self } } impl Into<$old_type> for $new_type { fn into(self) -> $old_type { $old_type::$from_new(self.0) } } impl From<$old_type> for $new_type { fn from(v: $old_type) -> $new_type { $new_type($old_type::$to_new(v)) } } }; } endian_type!(u16, Le16, to_le, from_le); endian_type!(u32, Le32, to_le, from_le); endian_type!(u64, Le64, to_le, from_le); endian_type!(usize, LeSize, to_le, from_le); endian_type!(u16, Be16, to_be, from_be); endian_type!(u32, Be32, to_be, from_be); endian_type!(u64, Be64, to_be, from_be); endian_type!(usize, BeSize, to_be, from_be); #[cfg(test)] mod tests { use super::*; use std::convert::From; use std::mem::transmute; #[cfg(target_endian = "little")] const NATIVE_LITTLE: bool = true; #[cfg(target_endian = "big")] const NATIVE_LITTLE: bool = false; const NATIVE_BIG: bool = !NATIVE_LITTLE; macro_rules! endian_test { ($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => { mod $test_name { use super::*; #[allow(overflowing_literals)] #[test] fn equality() { let v = 0x0123456789ABCDEF as $old_type; let endian_v: $new_type = From::from(v); let endian_into: $old_type = endian_v.into(); let endian_transmute: $old_type = unsafe { transmute(endian_v) }; if $native { assert_eq!(endian_v, endian_transmute); } else { assert_eq!(endian_v, endian_transmute.swap_bytes()); } assert_eq!(v, endian_into); assert!(v == endian_v); assert!(endian_v == v); } } }; } endian_test!(u16, Le16, test_le16, NATIVE_LITTLE); endian_test!(u32, Le32, test_le32, NATIVE_LITTLE); endian_test!(u64, Le64, test_le64, NATIVE_LITTLE); endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE); endian_test!(u16, Be16, test_be16, NATIVE_BIG); endian_test!(u32, Be32, test_be32, NATIVE_BIG); endian_test!(u64, Be64, test_be64, NATIVE_BIG); endian_test!(usize, BeSize, test_be_size, NATIVE_BIG); }
31.652778
100
0.555068
03ab62295a00538a7942b386114038bf5b271d67
189
#[test] fn test_test_regex() { assert_emscripten_output!( "../../emtests/test_regex.wasm", "test_regex", vec![], "../../emtests/test_regex.out" ); }
18.9
40
0.52381
e2a5cd9670e0c50d889eb56a733c089e092aeb1a
6,359
use crate::infer::canonical::{ Canonical, Canonicalized, CanonicalizedQueryResponse, OriginalQueryValues, QueryRegionConstraints, QueryResponse, }; use crate::infer::{InferCtxt, InferOk}; use std::fmt; use std::rc::Rc; use crate::traits::query::Fallible; use crate::traits::ObligationCause; use crate::ty::fold::TypeFoldable; use crate::ty::{ParamEnvAnd, TyCtxt}; pub mod ascribe_user_type; pub mod custom; pub mod eq; pub mod implied_outlives_bounds; pub mod normalize; pub mod outlives; pub mod prove_predicate; use self::prove_predicate::ProvePredicate; pub mod subtype; /// "Type ops" are used in NLL to perform some particular action and /// extract out the resulting region constraints (or an error if it /// cannot be completed). pub trait TypeOp<'tcx>: Sized + fmt::Debug { type Output; /// Processes the operation and all resulting obligations, /// returning the final result along with any region constraints /// (they will be given over to the NLL region solver). fn fully_perform( self, infcx: &InferCtxt<'_, 'tcx>, ) -> Fallible<(Self::Output, Option<Rc<QueryRegionConstraints<'tcx>>>)>; } /// "Query type ops" are type ops that are implemented using a /// [canonical query][c]. The `Self` type here contains the kernel of /// information needed to do the operation -- `TypeOp` is actually /// implemented for `ParamEnvAnd<Self>`, since we always need to bring /// along a parameter environment as well. For query type-ops, we will /// first canonicalize the key and then invoke the query on the tcx, /// which produces the resulting query region constraints. /// /// [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html pub trait QueryTypeOp<'tcx>: fmt::Debug + Sized + TypeFoldable<'tcx> + 'tcx { type QueryResponse: TypeFoldable<'tcx>; /// Give query the option for a simple fast path that never /// actually hits the tcx cache lookup etc. Return `Some(r)` with /// a final result or `None` to do the full path. fn try_fast_path( tcx: TyCtxt<'tcx>, key: &ParamEnvAnd<'tcx, Self>, ) -> Option<Self::QueryResponse>; /// Performs the actual query with the canonicalized key -- the /// real work happens here. This method is not given an `infcx` /// because it shouldn't need one -- and if it had access to one, /// it might do things like invoke `sub_regions`, which would be /// bad, because it would create subregion relationships that are /// not captured in the return value. fn perform_query( tcx: TyCtxt<'tcx>, canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>, ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self::QueryResponse>>; /// Casts a lifted query result (which is in the gcx lifetime) /// into the tcx lifetime. This is always just an identity cast, /// but the generic code doesn't realize it -- put another way, in /// the generic code, we have a `Lifted<'tcx, Self::QueryResponse>` /// and we want to convert that to a `Self::QueryResponse`. This is /// not a priori valid, so we can't do it -- but in practice, it /// is always a no-op (e.g., the lifted form of a type, /// `Ty<'tcx>`, is a subtype of `Ty<'tcx>`). So we have to push /// the operation into the impls that know more specifically what /// `QueryResponse` is. This operation would (maybe) be nicer with /// something like HKTs or GATs, since then we could make /// `QueryResponse` parametric and `'tcx` and `'tcx` etc. fn shrink_to_tcx_lifetime( lifted_query_result: &'a CanonicalizedQueryResponse<'tcx, Self::QueryResponse>, ) -> &'a Canonical<'tcx, QueryResponse<'tcx, Self::QueryResponse>>; fn fully_perform_into( query_key: ParamEnvAnd<'tcx, Self>, infcx: &InferCtxt<'_, 'tcx>, output_query_region_constraints: &mut QueryRegionConstraints<'tcx>, ) -> Fallible<Self::QueryResponse> { if let Some(result) = QueryTypeOp::try_fast_path(infcx.tcx, &query_key) { return Ok(result); } // FIXME(#33684) -- We need to use // `canonicalize_hr_query_hack` here because of things // like the subtype query, which go awry around // `'static` otherwise. let mut canonical_var_values = OriginalQueryValues::default(); let canonical_self = infcx.canonicalize_hr_query_hack(&query_key, &mut canonical_var_values); let canonical_result = Self::perform_query(infcx.tcx, canonical_self)?; let canonical_result = Self::shrink_to_tcx_lifetime(&canonical_result); let param_env = query_key.param_env; let InferOk { value, obligations } = infcx .instantiate_nll_query_response_and_region_obligations( &ObligationCause::dummy(), param_env, &canonical_var_values, canonical_result, output_query_region_constraints, )?; // Typically, instantiating NLL query results does not // create obligations. However, in some cases there // are unresolved type variables, and unify them *can* // create obligations. In that case, we have to go // fulfill them. We do this via a (recursive) query. for obligation in obligations { let () = ProvePredicate::fully_perform_into( obligation .param_env .and(ProvePredicate::new(obligation.predicate)), infcx, output_query_region_constraints, )?; } Ok(value) } } impl<'tcx, Q> TypeOp<'tcx> for ParamEnvAnd<'tcx, Q> where Q: QueryTypeOp<'tcx>, { type Output = Q::QueryResponse; fn fully_perform( self, infcx: &InferCtxt<'_, 'tcx>, ) -> Fallible<(Self::Output, Option<Rc<QueryRegionConstraints<'tcx>>>)> { let mut region_constraints = QueryRegionConstraints::default(); let r = Q::fully_perform_into(self, infcx, &mut region_constraints)?; // Promote the final query-region-constraints into a // (optional) ref-counted vector: let opt_qrc = if region_constraints.is_empty() { None } else { Some(Rc::new(region_constraints)) }; Ok((r, opt_qrc)) } }
40.246835
87
0.655292
feb38e202cf07d28655db6ff5b9fac35d4943610
3,288
#[doc = "Register `APP_LEDC_INT_MAP` reader"] pub struct R(crate::R<APP_LEDC_INT_MAP_SPEC>); impl core::ops::Deref for R { type Target = crate::R<APP_LEDC_INT_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<APP_LEDC_INT_MAP_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<APP_LEDC_INT_MAP_SPEC>) -> Self { R(reader) } } #[doc = "Register `APP_LEDC_INT_MAP` writer"] pub struct W(crate::W<APP_LEDC_INT_MAP_SPEC>); impl core::ops::Deref for W { type Target = crate::W<APP_LEDC_INT_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<APP_LEDC_INT_MAP_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<APP_LEDC_INT_MAP_SPEC>) -> Self { W(writer) } } #[doc = "Field `APP_LEDC_INT_MAP` reader - "] pub struct APP_LEDC_INT_MAP_R(crate::FieldReader<u8, u8>); impl APP_LEDC_INT_MAP_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { APP_LEDC_INT_MAP_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for APP_LEDC_INT_MAP_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `APP_LEDC_INT_MAP` writer - "] pub struct APP_LEDC_INT_MAP_W<'a> { w: &'a mut W, } impl<'a> APP_LEDC_INT_MAP_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x1f) | (value as u32 & 0x1f); self.w } } impl R { #[doc = "Bits 0:4"] #[inline(always)] pub fn app_ledc_int_map(&self) -> APP_LEDC_INT_MAP_R { APP_LEDC_INT_MAP_R::new((self.bits & 0x1f) as u8) } } impl W { #[doc = "Bits 0:4"] #[inline(always)] pub fn app_ledc_int_map(&mut self) -> APP_LEDC_INT_MAP_W { APP_LEDC_INT_MAP_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [app_ledc_int_map](index.html) module"] pub struct APP_LEDC_INT_MAP_SPEC; impl crate::RegisterSpec for APP_LEDC_INT_MAP_SPEC { type Ux = u32; } #[doc = "`read()` method returns [app_ledc_int_map::R](R) reader structure"] impl crate::Readable for APP_LEDC_INT_MAP_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [app_ledc_int_map::W](W) writer structure"] impl crate::Writable for APP_LEDC_INT_MAP_SPEC { type Writer = W; } #[doc = "`reset()` method sets APP_LEDC_INT_MAP to value 0x10"] impl crate::Resettable for APP_LEDC_INT_MAP_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x10 } }
31.615385
397
0.631995
50cd3188510777fe48f442a4812352438d7aacb8
21,991
use std::borrow::Cow; use rustc::{mir, ty}; use rustc::ty::Instance; use rustc::ty::layout::{self, TyLayout, LayoutOf}; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; use super::{ GlobalId, InterpResult, PointerArithmetic, InterpCx, Machine, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup, FnVal, }; impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub(super) fn eval_terminator( &mut self, terminator: &mir::Terminator<'tcx>, ) -> InterpResult<'tcx> { use rustc::mir::TerminatorKind::*; match terminator.kind { Return => { self.frame().return_place.map(|r| self.dump_place(*r)); self.pop_stack_frame(/* unwinding */ false)? } Goto { target } => self.go_to_block(target), SwitchInt { ref discr, ref values, ref targets, .. } => { let discr = self.read_immediate(self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { // Compare using binary_op, to also support pointer values let res = self.overflowing_binary_op(mir::BinOp::Eq, discr, ImmTy::from_uint(const_int, discr.layout), )?.0; if res.to_bool()? { target_block = targets[index]; break; } } self.go_to_block(target_block); } Call { ref func, ref args, ref destination, ref cleanup, .. } => { let func = self.eval_operand(func, None)?; let (fn_val, abi) = match func.layout.ty.kind { ty::FnPtr(sig) => { let caller_abi = sig.abi(); let fn_ptr = self.read_scalar(func)?.not_undef()?; let fn_val = self.memory.get_fn(fn_ptr)?; (fn_val, caller_abi) } ty::FnDef(def_id, substs) => { let sig = func.layout.ty.fn_sig(*self.tcx); (FnVal::Instance(self.resolve(def_id, substs)?), sig.abi()) }, _ => { bug!("invalid callee of type {:?}", func.layout.ty) } }; let args = self.eval_operands(args)?; let ret = match destination { Some((dest, ret)) => Some((self.eval_place(dest)?, *ret)), None => None, }; self.eval_fn_call( fn_val, terminator.source_info.span, abi, &args[..], ret, *cleanup )?; } Drop { ref location, target, unwind, } => { // FIXME(CTFE): forbid drop in const eval let place = self.eval_place(location)?; let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); let instance = Instance::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place( place, instance, terminator.source_info.span, target, unwind )?; } Assert { ref cond, expected, ref msg, target, .. } => { let cond_val = self.read_immediate(self.eval_operand(cond, None)?)? .to_scalar()?.to_bool()?; if expected == cond_val { self.go_to_block(target); } else { // Compute error message use rustc::mir::interpret::PanicInfo::*; return Err(match msg { BoundsCheck { ref len, ref index } => { let len = self .read_immediate(self.eval_operand(len, None)?) .expect("can't eval len") .to_scalar()? .to_bits(self.memory.pointer_size())? as u64; let index = self .read_immediate(self.eval_operand(index, None)?) .expect("can't eval index") .to_scalar()? .to_bits(self.memory.pointer_size())? as u64; err_panic!(BoundsCheck { len, index }) } Overflow(op) => err_panic!(Overflow(*op)), OverflowNeg => err_panic!(OverflowNeg), DivisionByZero => err_panic!(DivisionByZero), RemainderByZero => err_panic!(RemainderByZero), ResumedAfterReturn(generator_kind) => err_panic!(ResumedAfterReturn(*generator_kind)), ResumedAfterPanic(generator_kind) => err_panic!(ResumedAfterPanic(*generator_kind)), Panic { .. } => bug!("`Panic` variant cannot occur in MIR"), } .into()); } } // When we encounter Resume, we've finished unwinding // cleanup for the current stack frame. We pop it in order // to continue unwinding the next frame Resume => { trace!("unwinding: resuming from cleanup"); // By definition, a Resume terminator means // that we're unwinding self.pop_stack_frame(/* unwinding */ true)?; return Ok(()) }, Yield { .. } | GeneratorDrop | DropAndReplace { .. } | Abort => unimplemented!("{:#?}", terminator.kind), FalseEdges { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), FalseUnwind { .. } => bug!("should have been eliminated by\ `simplify_branches` mir pass"), Unreachable => throw_ub!(Unreachable), } Ok(()) } fn check_argument_compat( rust_abi: bool, caller: TyLayout<'tcx>, callee: TyLayout<'tcx>, ) -> bool { if caller.ty == callee.ty { // No question return true; } if !rust_abi { // Don't risk anything return false; } // Compare layout match (&caller.abi, &callee.abi) { // Different valid ranges are okay (once we enforce validity, // that will take care to make it UB to leave the range, just // like for transmute). (layout::Abi::Scalar(ref caller), layout::Abi::Scalar(ref callee)) => caller.value == callee.value, (layout::Abi::ScalarPair(ref caller1, ref caller2), layout::Abi::ScalarPair(ref callee1, ref callee2)) => caller1.value == callee1.value && caller2.value == callee2.value, // Be conservative _ => false } } /// Pass a single argument, checking the types for compatibility. fn pass_argument( &mut self, rust_abi: bool, caller_arg: &mut impl Iterator<Item=OpTy<'tcx, M::PointerTag>>, callee_arg: PlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx> { if rust_abi && callee_arg.layout.is_zst() { // Nothing to do. trace!("Skipping callee ZST"); return Ok(()); } let caller_arg = caller_arg.next() .ok_or_else(|| err_unsup!(FunctionArgCountMismatch)) ?; if rust_abi { debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out"); } // Now, check if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) { throw_unsup!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty)) } // We allow some transmutes here self.copy_op_transmute(caller_arg, callee_arg) } /// Call this function -- pushing the stack frame and initializing the arguments. fn eval_fn_call( &mut self, fn_val: FnVal<'tcx, M::ExtraFnVal>, span: Span, caller_abi: Abi, args: &[OpTy<'tcx, M::PointerTag>], ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, unwind: Option<mir::BasicBlock> ) -> InterpResult<'tcx> { trace!("eval_fn_call: {:#?}", fn_val); let instance = match fn_val { FnVal::Instance(instance) => instance, FnVal::Other(extra) => { return M::call_extra_fn(self, extra, args, ret, unwind); } }; // ABI check { let callee_abi = { let instance_ty = instance.ty(*self.tcx); match instance_ty.kind { ty::FnDef(..) => instance_ty.fn_sig(*self.tcx).abi(), ty::Closure(..) => Abi::RustCall, ty::Generator(..) => Abi::Rust, _ => bug!("unexpected callee ty: {:?}", instance_ty), } }; let normalize_abi = |abi| match abi { Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic => // These are all the same ABI, really. Abi::Rust, abi => abi, }; if normalize_abi(caller_abi) != normalize_abi(callee_abi) { throw_unsup!(FunctionAbiMismatch(caller_abi, callee_abi)) } } match instance.def { ty::InstanceDef::Intrinsic(..) => { assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic); return M::call_intrinsic(self, span, instance, args, ret, unwind); } ty::InstanceDef::VtableShim(..) | ty::InstanceDef::ReifyShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::DropGlue(..) | ty::InstanceDef::CloneShim(..) | ty::InstanceDef::Item(_) => { // If this function is a `const fn` then as an optimization we can query this // evaluation immediately. // // For the moment we only do this for functions which take no arguments // (or all arguments are ZSTs) so that we don't memoize too much. if self.tcx.is_const_fn_raw(instance.def.def_id()) && args.iter().all(|a| a.layout.is_zst()) { let gid = GlobalId { instance, promoted: None }; return self.eval_const_fn_call(gid, ret); } // We need MIR for this fn let body = match M::find_fn(self, instance, args, ret, unwind)? { Some(body) => body, None => return Ok(()), }; self.push_stack_frame( instance, span, body, ret.map(|p| p.0), StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind } )?; // We want to pop this frame again in case there was an error, to put // the blame in the right location. Until the 2018 edition is used in // the compiler, we have to do this with an immediately invoked function. let res = (||{ trace!( "caller ABI: {:?}, args: {:#?}", caller_abi, args.iter() .map(|arg| (arg.layout.ty, format!("{:?}", **arg))) .collect::<Vec<_>>() ); trace!( "spread_arg: {:?}, locals: {:#?}", body.spread_arg, body.args_iter() .map(|local| (local, self.layout_of_local(self.frame(), local, None).unwrap().ty) ) .collect::<Vec<_>>() ); // Figure out how to pass which arguments. // The Rust ABI is special: ZST get skipped. let rust_abi = match caller_abi { Abi::Rust | Abi::RustCall => true, _ => false }; // We have two iterators: Where the arguments come from, // and where they go to. // For where they come from: If the ABI is RustCall, we untuple the // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). let caller_args : Cow<'_, [OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple let (&untuple_arg, args) = args.split_last().unwrap(); trace!("eval_fn_call: Will pass last argument by untupling"); Cow::from(args.iter().map(|&a| Ok(a)) .chain((0..untuple_arg.layout.fields.count()).into_iter() .map(|i| self.operand_field(untuple_arg, i as u64)) ) .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>()?) } else { // Plain arg passing Cow::from(args) }; // Skip ZSTs let mut caller_iter = caller_args.iter() .filter(|op| !rust_abi || !op.layout.is_zst()) .map(|op| *op); // Now we have to spread them out across the callee's locals, // taking into account the `spread_arg`. If we could write // this is a single iterator (that handles `spread_arg`), then // `pass_argument` would be the loop body. It takes care to // not advance `caller_iter` for ZSTs. let mut locals_iter = body.args_iter(); while let Some(local) = locals_iter.next() { let dest = self.eval_place( &mir::Place::from(local) )?; if Some(local) == body.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { let dest = self.place_field(dest, i as u64)?; self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } else { // Normal argument self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } // Now we should have no more caller args if caller_iter.next().is_some() { trace!("Caller has passed too many args"); throw_unsup!(FunctionArgCountMismatch) } // Don't forget to check the return type! if let Some((caller_ret, _)) = ret { let callee_ret = self.eval_place( &mir::Place::return_place() )?; if !Self::check_argument_compat( rust_abi, caller_ret.layout, callee_ret.layout, ) { throw_unsup!( FunctionRetMismatch(caller_ret.layout.ty, callee_ret.layout.ty) ) } } else { let local = mir::RETURN_PLACE; let callee_layout = self.layout_of_local(self.frame(), local, None)?; if !callee_layout.abi.is_uninhabited() { throw_unsup!(FunctionRetMismatch( self.tcx.types.never, callee_layout.ty )) } } Ok(()) })(); match res { Err(err) => { self.stack.pop(); Err(err) } Ok(v) => Ok(v) } } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let mut args = args.to_vec(); // We have to implement all "object safe receivers". Currently we // support built-in pointers (&, &mut, Box) as well as unsized-self. We do // not yet support custom self types. // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs. let receiver_place = match args[0].layout.ty.builtin_deref(true) { Some(_) => { // Built-in pointer. self.deref_operand(args[0])? } None => { // Unsized self. args[0].assert_mem_place() } }; // Find and consult vtable let vtable = receiver_place.vtable(); let drop_fn = self.get_vtable_slot(vtable, idx)?; // `*mut receiver_place.layout.ty` is almost the layout that we // want for args[0]: We have to project to field 0 because we want // a thin pointer. assert!(receiver_place.layout.is_unsized()); let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?; // Adjust receiver argument. args[0] = OpTy::from(ImmTy { layout: this_receiver_ptr, imm: receiver_place.ptr.into() }); trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(drop_fn, span, caller_abi, &args, ret, unwind) } } } /// Evaluate a const function where all arguments (if any) are zero-sized types. /// The evaluation is memoized thanks to the query system. fn eval_const_fn_call( &mut self, gid: GlobalId<'tcx>, ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>, ) -> InterpResult<'tcx> { trace!("eval_const_fn_call: {:?}", gid); let place = self.const_eval_raw(gid)?; let dest = ret.ok_or_else(|| err_ub!(Unreachable))?.0; self.copy_op(place.into(), dest)?; self.return_to_block(ret.map(|r| r.1))?; self.dump_place(*dest); return Ok(()) } fn drop_in_place( &mut self, place: PlaceTy<'tcx, M::PointerTag>, instance: ty::Instance<'tcx>, span: Span, target: mir::BasicBlock, unwind: Option<mir::BasicBlock> ) -> InterpResult<'tcx> { trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance); // We take the address of the object. This may well be unaligned, which is fine // for us here. However, unaligned accesses will probably make the actual drop // implementation fail -- a problem shared by rustc. let place = self.force_allocation(place)?; let (instance, place) = match place.layout.ty.kind { ty::Dynamic(..) => { // Dropping a trait object. self.unpack_dyn_trait(place)? } _ => (instance, place), }; let arg = ImmTy { imm: place.to_ref(), layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; let ty = self.tcx.mk_unit(); // return type is () let dest = MPlaceTy::dangling(self.layout_of(ty)?, self); self.eval_fn_call( FnVal::Instance(instance), span, Abi::Rust, &[arg.into()], Some((dest.into(), target)), unwind ) } }
41.887619
100
0.449366
18e9f3a80609a5e3440ea8cd009d17ae2802cdf0
5,987
use neon::types::{JsString, JsArray, JsBoolean, JsUndefined, JsNull, JsNumber, JsValue, JsObject, Value, JsFunction}; use neon::handle::{Handle, Root}; use neon::context::{Context, FunctionContext}; use neon::object::{Object}; use std::rc::Rc; use std::cmp::Ordering; use crate::parser::ExpressionKind; use crate::compiler::CompilerContext; use crate::error::{FinchError, FinchResult}; pub struct RawObject<T: Value>(Root<T>); impl<T: Value> PartialEq<RawObject<T>> for RawObject<T> { fn eq(&self, _other: &RawObject<T>) -> bool { false } } #[derive(std::cmp::PartialEq)] pub enum RawValue { String(String), Number(f64), Boolean(bool), Vec(Rc<Vec<RawValue>>), Object(RawObject<JsObject>), Function(RawObject<JsFunction>), Undefined, Null } impl RawValue { pub fn clone(&self, cx: &mut FunctionContext) -> Self { match self { Self::String(string) => Self::String(string.clone()), Self::Number(num) => Self::Number(*num), Self::Vec(v) => Self::Vec(v.clone()), Self::Boolean(bol) => Self::Boolean(*bol), Self::Object(obj) => Self::Object(RawObject(obj.0.clone(cx))), Self::Function(func) => Self::Function(RawObject(func.0.clone(cx))), Self::Undefined => Self::Undefined, Self::Null => Self::Null } } } pub trait IntoRawValue<'a> { fn raw(&self, cx: &mut FunctionContext<'a>) -> RawValue; } impl<'a> IntoRawValue<'a> for Handle<'a, JsValue> { fn raw(&self, cx: &mut FunctionContext<'a>) -> RawValue { if let Ok(str_handle) = self.downcast::<JsString, _>(cx) { RawValue::String(str_handle.value(cx)) } else if let Ok(num_handle) = self.downcast::<JsNumber, _>(cx) { RawValue::Number(num_handle.value(cx)) } else if let Ok(bool_handle) = self.downcast::<JsBoolean, _>(cx) { RawValue::Boolean(bool_handle.value(cx)) } else if self.is_a::<JsNull, _>(cx) { RawValue::Null } else if self.is_a::<JsUndefined, _>(cx) { RawValue::Undefined } else if let Ok(arr_handle) = self.downcast::<JsArray, _>(cx) { if let Ok(vec) = arr_handle.to_vec(cx) { RawValue::Vec(Rc::new(vec.into_iter().map(|i| i.raw(cx)).collect())) } else { RawValue::Undefined } } else if let Ok(obj_handle) = self.downcast::<JsObject, _>(cx) { RawValue::Object(RawObject(obj_handle.root(cx))) } else if let Ok(fn_handle) = self.downcast::<JsFunction, _>(cx) { RawValue::Function(RawObject(fn_handle.root(cx))) } else { RawValue::Undefined } } } impl RawValue { pub fn js<'a>(&self, cx: &mut FunctionContext<'a>) -> Handle<'a, JsValue> { match self { RawValue::String(val) => cx.string(val).upcast::<JsValue>(), RawValue::Number(num) => cx.number(*num).upcast::<JsValue>(), RawValue::Boolean(b) => cx.boolean(*b).upcast::<JsValue>(), RawValue::Undefined => cx.undefined().upcast::<JsValue>(), RawValue::Null => cx.null().upcast::<JsValue>(), RawValue::Vec(v) => { let arr = JsArray::new(cx, v.len() as u32); for (ind, val) in v.iter().enumerate() { let js_val = val.clone(cx).js(cx); arr.set(cx, ind as u32, js_val).unwrap(); }; arr.upcast::<JsValue>() }, RawValue::Object(obj) => obj.0.to_inner(cx).upcast::<JsValue>(), RawValue::Function(func) => func.0.to_inner(cx).upcast::<JsValue>() } } pub fn is_falsey(&self) -> bool { match self { Self::String(str) => str.is_empty(), Self::Number(num) => *num == 0.0, Self::Boolean(bol) => !(*bol), Self::Null | Self::Undefined => true, Self::Vec(_) | Self::Object(_) | Self::Function(_) => false } } pub fn into_string(self) -> String { match self { Self::String(st) => st, Self::Number(num) => num.to_string(), Self::Boolean(bol) => bol.to_string(), Self::Undefined => String::from("undefined"), Self::Null => String::from("null"), Self::Vec(v) => v.iter().map(|val| val.to_string()).collect::<Vec<String>>().join(", "), Self::Object(_) => String::from("[object Object]"), Self::Function(_) => String::from("[function]") } } } impl std::string::ToString for RawValue { fn to_string(&self) -> String { match self { Self::String(st) => st.clone(), Self::Number(num) => num.to_string(), Self::Boolean(bol) => bol.to_string(), Self::Undefined => String::from("undefined"), Self::Null => String::from("null"), Self::Vec(v) => v.iter().map(|val| val.to_string()).collect::<Vec<String>>().join(", "), Self::Object(_) => String::from("[object Object]"), Self::Function(_) => String::from("[function]") } } } pub fn compare_vals(left: &ExpressionKind, right: &ExpressionKind, ctx: &mut CompilerContext) -> FinchResult<Ordering> { let num_left = if let ExpressionKind::Number(num) = left { num.clone() } else { if let RawValue::Number(num) = left.compile(ctx)? { num } else { return Err(FinchError::NotNumbers); } }; let num_right = if let ExpressionKind::Number(num) = right { num.clone() } else { if let RawValue::Number(num) = right.compile(ctx)? { num } else { return Err(FinchError::NotNumbers); } }; Ok(if num_left > num_right { Ordering::Greater } else if num_left == num_right { Ordering::Equal } else { Ordering::Less }) }
35.426036
120
0.543344
239c3782084feb8b816ffde96cd7a5a76f664ca3
13,518
//! Provides Rust Errors for OpenCL's status. use std::{fmt, error}; #[derive(Debug, Copy, Clone)] /// Defines OpenCL errors. pub enum Error { /// Failure with provided value. InvalidValue(&'static str), /// Failure with memory allocation. OutOfMemory(&'static str), /// Failure with Cuda initialization. NotInitialized(&'static str), /// Failure with Cuda initialization. Deinitialized(&'static str), /// Failure with Profiler. ProfilerDisabled(&'static str), /// Failure with Profiler. ProfilerNotInitialized(&'static str), /// Failure with Profiler. ProfilerAlreadyStarted(&'static str), /// Failure with Profiler. ProfilerAlreadyStopped(&'static str), /// Failure with Cuda devices. NoDevice(&'static str), /// Failure with provided Cuda device. InvalidDevice(&'static str), /// Failure with provided Cuda image. InvalidImage(&'static str), /// Failure with provided Cuda context. InvalidContext(&'static str), /// Failure with provided Cuda context. ContextAlreadyCurrent(&'static str), /// Failure MapFailed(&'static str), /// Failure UnmapFailed(&'static str), /// Failure ArrayIsMapped(&'static str), /// Failure AlreadyMapped(&'static str), /// Failure with binary. NoBinaryForGpu(&'static str), /// Failure AlreadyAquired(&'static str), /// Failure NotMapped(&'static str), /// Failure NotMappedAsArray(&'static str), /// Failure NotMappedAsPointer(&'static str), /// Failure EccUncorrectable(&'static str), /// Failure UnsupportedLimit(&'static str), /// Failure with context. ContextAlreadyInUse(&'static str), /// Failure PeerAccessUnsupported(&'static str), /// Failure with provided PTX. InvalidPtx(&'static str), /// Failure InvalidGraphicsContent(&'static str), /// Failure InvalidSource(&'static str), /// Failure FileNotFound(&'static str), /// Failure SharedObjectSymbolNotFound(&'static str), /// Failure SharedObjectInitFailed(&'static str), /// Failure OperatingSystem(&'static str), /// Failure InvalidHandle(&'static str), /// Failure NotFound(&'static str), /// Failure NotReady(&'static str), /// Failure IllegalAddress(&'static str), /// Failure LaunchOutOfResources(&'static str), /// Failure LaunchTimeout(&'static str), /// Failure LauncIncompatibleTexturing(&'static str), /// Failure PeerAccessAlreadyEnabled(&'static str), /// Failure PeerAccessNotEnabled(&'static str), /// Failure PrimaryContextActive(&'static str), /// Failure ContextIsDestroyed(&'static str), /// Failure Assert(&'static str), /// Failure TooManyPeers(&'static str), /// Failure HostMemoryAlreadyRegistered(&'static str), /// Failure HostMemoryNotRegistered(&'static str), /// Failure HardwareStackError(&'static str), /// Failure IllegalInstruction(&'static str), /// Failure MisalignedAddress(&'static str), /// Failure InvalidAddressSpace(&'static str), /// Failure InvalidPc(&'static str), /// Failure LaunchFailed(&'static str), /// Failure NotPermitted(&'static str), /// Failure NotSupported(&'static str), /// Failure Unknown(&'static str), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::ProfilerNotInitialized(ref err) => write!(f, "{:?}", err), Error::ProfilerDisabled(ref err) => write!(f, "{:?}", err), Error::Deinitialized(ref err) => write!(f, "{:?}", err), Error::NotInitialized(ref err) => write!(f, "{:?}", err), Error::OutOfMemory(ref err) => write!(f, "{:?}", err), Error::InvalidValue(ref err) => write!(f, "{:?}", err), Error::NoBinaryForGpu(ref err) => write!(f, "{:?}", err), Error::AlreadyMapped(ref err) => write!(f, "{:?}", err), Error::ArrayIsMapped(ref err) => write!(f, "{:?}", err), Error::UnmapFailed(ref err) => write!(f, "{:?}", err), Error::MapFailed(ref err) => write!(f, "{:?}", err), Error::ContextAlreadyCurrent(ref err) => write!(f, "{:?}", err), Error::InvalidContext(ref err) => write!(f, "{:?}", err), Error::InvalidImage(ref err) => write!(f, "{:?}", err), Error::InvalidDevice(ref err) => write!(f, "{:?}", err), Error::NoDevice(ref err) => write!(f, "{:?}", err), Error::ProfilerAlreadyStopped(ref err) => write!(f, "{:?}", err), Error::ProfilerAlreadyStarted(ref err) => write!(f, "{:?}", err), Error::IllegalAddress(ref err) => write!(f, "{:?}", err), Error::NotReady(ref err) => write!(f, "{:?}", err), Error::NotFound(ref err) => write!(f, "{:?}", err), Error::InvalidHandle(ref err) => write!(f, "{:?}", err), Error::OperatingSystem(ref err) => write!(f, "{:?}", err), Error::SharedObjectInitFailed(ref err) => write!(f, "{:?}", err), Error::SharedObjectSymbolNotFound(ref err) => write!(f, "{:?}", err), Error::FileNotFound(ref err) => write!(f, "{:?}", err), Error::InvalidSource(ref err) => write!(f, "{:?}", err), Error::InvalidGraphicsContent(ref err) => write!(f, "{:?}", err), Error::InvalidPtx(ref err) => write!(f, "{:?}", err), Error::PeerAccessUnsupported(ref err) => write!(f, "{:?}", err), Error::ContextAlreadyInUse(ref err) => write!(f, "{:?}", err), Error::UnsupportedLimit(ref err) => write!(f, "{:?}", err), Error::EccUncorrectable(ref err) => write!(f, "{:?}", err), Error::NotMappedAsPointer(ref err) => write!(f, "{:?}", err), Error::NotMappedAsArray(ref err) => write!(f, "{:?}", err), Error::NotMapped(ref err) => write!(f, "{:?}", err), Error::AlreadyAquired(ref err) => write!(f, "{:?}", err), Error::Unknown(ref err) => write!(f, "{:?}", err), Error::NotSupported(ref err) => write!(f, "{:?}", err), Error::NotPermitted(ref err) => write!(f, "{:?}", err), Error::LaunchFailed(ref err) => write!(f, "{:?}", err), Error::InvalidPc(ref err) => write!(f, "{:?}", err), Error::InvalidAddressSpace(ref err) => write!(f, "{:?}", err), Error::MisalignedAddress(ref err) => write!(f, "{:?}", err), Error::IllegalInstruction(ref err) => write!(f, "{:?}", err), Error::HardwareStackError(ref err) => write!(f, "{:?}", err), Error::HostMemoryNotRegistered(ref err) => write!(f, "{:?}", err), Error::HostMemoryAlreadyRegistered(ref err) => write!(f, "{:?}", err), Error::TooManyPeers(ref err) => write!(f, "{:?}", err), Error::Assert(ref err) => write!(f, "{:?}", err), Error::ContextIsDestroyed(ref err) => write!(f, "{:?}", err), Error::PrimaryContextActive(ref err) => write!(f, "{:?}", err), Error::PeerAccessNotEnabled(ref err) => write!(f, "{:?}", err), Error::PeerAccessAlreadyEnabled(ref err) => write!(f, "{:?}", err), Error::LauncIncompatibleTexturing(ref err) => write!(f, "{:?}", err), Error::LaunchTimeout(ref err) => write!(f, "{:?}", err), Error::LaunchOutOfResources(ref err) => write!(f, "{:?}", err), } } } impl error::Error for Error { fn description(&self) -> &str { match *self { Error::ProfilerNotInitialized(ref err) => err, Error::ProfilerDisabled(ref err) => err, Error::Deinitialized(ref err) => err, Error::NotInitialized(ref err) => err, Error::OutOfMemory(ref err) => err, Error::InvalidValue(ref err) => err, Error::NoBinaryForGpu(ref err) => err, Error::AlreadyMapped(ref err) => err, Error::ArrayIsMapped(ref err) => err, Error::UnmapFailed(ref err) => err, Error::MapFailed(ref err) => err, Error::ContextAlreadyCurrent(ref err) => err, Error::InvalidContext(ref err) => err, Error::InvalidImage(ref err) => err, Error::InvalidDevice(ref err) => err, Error::NoDevice(ref err) => err, Error::ProfilerAlreadyStopped(ref err) => err, Error::ProfilerAlreadyStarted(ref err) => err, Error::IllegalAddress(ref err) => err, Error::NotReady(ref err) => err, Error::NotFound(ref err) => err, Error::InvalidHandle(ref err) => err, Error::OperatingSystem(ref err) => err, Error::SharedObjectInitFailed(ref err) => err, Error::SharedObjectSymbolNotFound(ref err) => err, Error::FileNotFound(ref err) => err, Error::InvalidSource(ref err) => err, Error::InvalidGraphicsContent(ref err) => err, Error::InvalidPtx(ref err) => err, Error::PeerAccessUnsupported(ref err) => err, Error::ContextAlreadyInUse(ref err) => err, Error::UnsupportedLimit(ref err) => err, Error::EccUncorrectable(ref err) => err, Error::NotMappedAsPointer(ref err) => err, Error::NotMappedAsArray(ref err) => err, Error::NotMapped(ref err) => err, Error::AlreadyAquired(ref err) => err, Error::Unknown(ref err) => err, Error::NotSupported(ref err) => err, Error::NotPermitted(ref err) => err, Error::LaunchFailed(ref err) => err, Error::InvalidPc(ref err) => err, Error::InvalidAddressSpace(ref err) => err, Error::MisalignedAddress(ref err) => err, Error::IllegalInstruction(ref err) => err, Error::HardwareStackError(ref err) => err, Error::HostMemoryNotRegistered(ref err) => err, Error::HostMemoryAlreadyRegistered(ref err) => err, Error::TooManyPeers(ref err) => err, Error::Assert(ref err) => err, Error::ContextIsDestroyed(ref err) => err, Error::PrimaryContextActive(ref err) => err, Error::PeerAccessNotEnabled(ref err) => err, Error::PeerAccessAlreadyEnabled(ref err) => err, Error::LauncIncompatibleTexturing(ref err) => err, Error::LaunchTimeout(ref err) => err, Error::LaunchOutOfResources(ref err) => err, } } fn cause(&self) -> Option<&error::Error> { match *self { Error::ProfilerNotInitialized(_) => None, Error::ProfilerDisabled(_) => None, Error::Deinitialized(_) => None, Error::NotInitialized(_) => None, Error::OutOfMemory(_) => None, Error::InvalidValue(_) => None, Error::NoBinaryForGpu(_) => None, Error::AlreadyMapped(_) => None, Error::ArrayIsMapped(_) => None, Error::UnmapFailed(_) => None, Error::MapFailed(_) => None, Error::ContextAlreadyCurrent(_) => None, Error::InvalidContext(_) => None, Error::InvalidImage(_) => None, Error::InvalidDevice(_) => None, Error::NoDevice(_) => None, Error::ProfilerAlreadyStopped(_) => None, Error::ProfilerAlreadyStarted(_) => None, Error::IllegalAddress(_) => None, Error::NotReady(_) => None, Error::NotFound(_) => None, Error::InvalidHandle(_) => None, Error::OperatingSystem(_) => None, Error::SharedObjectInitFailed(_) => None, Error::SharedObjectSymbolNotFound(_) => None, Error::FileNotFound(_) => None, Error::InvalidSource(_) => None, Error::InvalidGraphicsContent(_) => None, Error::InvalidPtx(_) => None, Error::PeerAccessUnsupported(_) => None, Error::ContextAlreadyInUse(_) => None, Error::UnsupportedLimit(_) => None, Error::EccUncorrectable(_) => None, Error::NotMappedAsPointer(_) => None, Error::NotMappedAsArray(_) => None, Error::NotMapped(_) => None, Error::AlreadyAquired(_) => None, Error::Unknown(_) => None, Error::NotSupported(_) => None, Error::NotPermitted(_) => None, Error::LaunchFailed(_) => None, Error::InvalidPc(_) => None, Error::InvalidAddressSpace(_) => None, Error::MisalignedAddress(_) => None, Error::IllegalInstruction(_) => None, Error::HardwareStackError(_) => None, Error::HostMemoryNotRegistered(_) => None, Error::HostMemoryAlreadyRegistered(_) => None, Error::TooManyPeers(_) => None, Error::Assert(_) => None, Error::ContextIsDestroyed(_) => None, Error::PrimaryContextActive(_) => None, Error::PeerAccessNotEnabled(_) => None, Error::PeerAccessAlreadyEnabled(_) => None, Error::LauncIncompatibleTexturing(_) => None, Error::LaunchTimeout(_) => None, Error::LaunchOutOfResources(_) => None, } } }
43.188498
82
0.553336
2847c762b3e1c92807b75debe193976e95ee8b97
53,674
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Overloadable operators. //! //! Implementing these traits allows you to get an effect similar to //! overloading operators. //! //! Some of these traits are imported by the prelude, so they are available in //! every Rust program. //! //! Many of the operators take their operands by value. In non-generic //! contexts involving built-in types, this is usually not a problem. //! However, using these operators in generic code, requires some //! attention if values have to be reused as opposed to letting the operators //! consume them. One option is to occasionally use `clone()`. //! Another option is to rely on the types involved providing additional //! operator implementations for references. For example, for a user-defined //! type `T` which is supposed to support addition, it is probably a good //! idea to have both `T` and `&T` implement the traits `Add<T>` and `Add<&T>` //! so that generic code can be written without unnecessary cloning. //! //! # Examples //! //! This example creates a `Point` struct that implements `Add` and `Sub`, and //! then demonstrates adding and subtracting two `Point`s. //! //! ```rust //! use std::ops::{Add, Sub}; //! //! #[derive(Debug)] //! struct Point { //! x: i32, //! y: i32, //! } //! //! impl Add for Point { //! type Output = Point; //! //! fn add(self, other: Point) -> Point { //! Point {x: self.x + other.x, y: self.y + other.y} //! } //! } //! //! impl Sub for Point { //! type Output = Point; //! //! fn sub(self, other: Point) -> Point { //! Point {x: self.x - other.x, y: self.y - other.y} //! } //! } //! fn main() { //! println!("{:?}", Point {x: 1, y: 0} + Point {x: 2, y: 3}); //! println!("{:?}", Point {x: 1, y: 0} - Point {x: 2, y: 3}); //! } //! ``` //! //! See the documentation for each trait for a minimum implementation that //! prints something to the screen. #![stable(feature = "rust1", since = "1.0.0")] use cmp::PartialOrd; use fmt; use convert::From; use marker::{Sized, Unsize}; use num::One; /// The `Drop` trait is used to run some code when a value goes out of scope. /// This is sometimes called a 'destructor'. /// /// # Examples /// /// A trivial implementation of `Drop`. The `drop` method is called when `_x` /// goes out of scope, and therefore `main` prints `Dropping!`. /// /// ``` /// struct HasDrop; /// /// impl Drop for HasDrop { /// fn drop(&mut self) { /// println!("Dropping!"); /// } /// } /// /// fn main() { /// let _x = HasDrop; /// } /// ``` #[lang = "drop"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Drop { /// A method called when the value goes out of scope. /// /// When this method has been called, `self` has not yet been deallocated. /// If it were, `self` would be a dangling reference. /// /// After this function is over, the memory of `self` will be deallocated. /// /// # Panics /// /// Given that a `panic!` will call `drop()` as it unwinds, any `panic!` in /// a `drop()` implementation will likely abort. #[stable(feature = "rust1", since = "1.0.0")] fn drop(&mut self); } // implements the unary operator "op &T" // based on "op T" where T is expected to be `Copy`able macro_rules! forward_ref_unop { (impl $imp:ident, $method:ident for $t:ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> $imp for &'a $t { type Output = <$t as $imp>::Output; #[inline] fn $method(self) -> <$t as $imp>::Output { $imp::$method(*self) } } } } // implements binary operators "&T op U", "T op &U", "&T op &U" // based on "T op U" where T and U are expected to be `Copy`able macro_rules! forward_ref_binop { (impl $imp:ident, $method:ident for $t:ty, $u:ty) => { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> $imp<$u> for &'a $t { type Output = <$t as $imp<$u>>::Output; #[inline] fn $method(self, other: $u) -> <$t as $imp<$u>>::Output { $imp::$method(*self, other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> $imp<&'a $u> for $t { type Output = <$t as $imp<$u>>::Output; #[inline] fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { $imp::$method(self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, 'b> $imp<&'a $u> for &'b $t { type Output = <$t as $imp<$u>>::Output; #[inline] fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { $imp::$method(*self, *other) } } } } /// The `Add` trait is used to specify the functionality of `+`. /// /// # Examples /// /// A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up /// calling `add`, and therefore, `main` prints `Adding!`. /// /// ``` /// use std::ops::Add; /// /// struct Foo; /// /// impl Add for Foo { /// type Output = Foo; /// /// fn add(self, _rhs: Foo) -> Foo { /// println!("Adding!"); /// self /// } /// } /// /// fn main() { /// Foo + Foo; /// } /// ``` #[lang = "add"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Add<RHS=Self> { /// The resulting type after applying the `+` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `+` operator #[stable(feature = "rust1", since = "1.0.0")] fn add(self, rhs: RHS) -> Self::Output; } macro_rules! add_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Add for $t { type Output = $t; #[inline] fn add(self, other: $t) -> $t { self + other } } forward_ref_binop! { impl Add, add for $t, $t } )*) } add_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `Sub` trait is used to specify the functionality of `-`. /// /// # Examples /// /// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up /// calling `sub`, and therefore, `main` prints `Subtracting!`. /// /// ``` /// use std::ops::Sub; /// /// struct Foo; /// /// impl Sub for Foo { /// type Output = Foo; /// /// fn sub(self, _rhs: Foo) -> Foo { /// println!("Subtracting!"); /// self /// } /// } /// /// fn main() { /// Foo - Foo; /// } /// ``` #[lang = "sub"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Sub<RHS=Self> { /// The resulting type after applying the `-` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `-` operator #[stable(feature = "rust1", since = "1.0.0")] fn sub(self, rhs: RHS) -> Self::Output; } macro_rules! sub_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Sub for $t { type Output = $t; #[inline] fn sub(self, other: $t) -> $t { self - other } } forward_ref_binop! { impl Sub, sub for $t, $t } )*) } sub_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `Mul` trait is used to specify the functionality of `*`. /// /// # Examples /// /// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up /// calling `mul`, and therefore, `main` prints `Multiplying!`. /// /// ``` /// use std::ops::Mul; /// /// struct Foo; /// /// impl Mul for Foo { /// type Output = Foo; /// /// fn mul(self, _rhs: Foo) -> Foo { /// println!("Multiplying!"); /// self /// } /// } /// /// fn main() { /// Foo * Foo; /// } /// ``` #[lang = "mul"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Mul<RHS=Self> { /// The resulting type after applying the `*` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `*` operator #[stable(feature = "rust1", since = "1.0.0")] fn mul(self, rhs: RHS) -> Self::Output; } macro_rules! mul_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Mul for $t { type Output = $t; #[inline] fn mul(self, other: $t) -> $t { self * other } } forward_ref_binop! { impl Mul, mul for $t, $t } )*) } mul_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `Div` trait is used to specify the functionality of `/`. /// /// # Examples /// /// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up /// calling `div`, and therefore, `main` prints `Dividing!`. /// /// ``` /// use std::ops::Div; /// /// struct Foo; /// /// impl Div for Foo { /// type Output = Foo; /// /// fn div(self, _rhs: Foo) -> Foo { /// println!("Dividing!"); /// self /// } /// } /// /// fn main() { /// Foo / Foo; /// } /// ``` #[lang = "div"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Div<RHS=Self> { /// The resulting type after applying the `/` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `/` operator #[stable(feature = "rust1", since = "1.0.0")] fn div(self, rhs: RHS) -> Self::Output; } macro_rules! div_impl_integer { ($($t:ty)*) => ($( /// This operation rounds towards zero, truncating any /// fractional part of the exact result. #[stable(feature = "rust1", since = "1.0.0")] impl Div for $t { type Output = $t; #[inline] fn div(self, other: $t) -> $t { self / other } } forward_ref_binop! { impl Div, div for $t, $t } )*) } div_impl_integer! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 } macro_rules! div_impl_float { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Div for $t { type Output = $t; #[inline] fn div(self, other: $t) -> $t { self / other } } forward_ref_binop! { impl Div, div for $t, $t } )*) } div_impl_float! { f32 f64 } /// The `Rem` trait is used to specify the functionality of `%`. /// /// # Examples /// /// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up /// calling `rem`, and therefore, `main` prints `Remainder-ing!`. /// /// ``` /// use std::ops::Rem; /// /// struct Foo; /// /// impl Rem for Foo { /// type Output = Foo; /// /// fn rem(self, _rhs: Foo) -> Foo { /// println!("Remainder-ing!"); /// self /// } /// } /// /// fn main() { /// Foo % Foo; /// } /// ``` #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Rem<RHS=Self> { /// The resulting type after applying the `%` operator #[stable(feature = "rust1", since = "1.0.0")] type Output = Self; /// The method for the `%` operator #[stable(feature = "rust1", since = "1.0.0")] fn rem(self, rhs: RHS) -> Self::Output; } macro_rules! rem_impl_integer { ($($t:ty)*) => ($( /// This operation satisfies `n % d == n - (n / d) * d`. The /// result has the same sign as the left operand. #[stable(feature = "rust1", since = "1.0.0")] impl Rem for $t { type Output = $t; #[inline] fn rem(self, other: $t) -> $t { self % other } } forward_ref_binop! { impl Rem, rem for $t, $t } )*) } rem_impl_integer! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 } macro_rules! rem_impl_float { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Rem for $t { type Output = $t; #[inline] fn rem(self, other: $t) -> $t { self % other } } forward_ref_binop! { impl Rem, rem for $t, $t } )*) } rem_impl_float! { f32 f64 } /// The `Neg` trait is used to specify the functionality of unary `-`. /// /// # Examples /// /// A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling /// `neg`, and therefore, `main` prints `Negating!`. /// /// ``` /// use std::ops::Neg; /// /// struct Foo; /// /// impl Neg for Foo { /// type Output = Foo; /// /// fn neg(self) -> Foo { /// println!("Negating!"); /// self /// } /// } /// /// fn main() { /// -Foo; /// } /// ``` #[lang = "neg"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Neg { /// The resulting type after applying the `-` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the unary `-` operator #[stable(feature = "rust1", since = "1.0.0")] fn neg(self) -> Self::Output; } macro_rules! neg_impl_core { ($id:ident => $body:expr, $($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Neg for $t { type Output = $t; #[inline] fn neg(self) -> $t { let $id = self; $body } } forward_ref_unop! { impl Neg, neg for $t } )*) } macro_rules! neg_impl_numeric { ($($t:ty)*) => { neg_impl_core!{ x => -x, $($t)*} } } macro_rules! neg_impl_unsigned { ($($t:ty)*) => { neg_impl_core!{ x => { !x.wrapping_add(1) }, $($t)*} } } // neg_impl_unsigned! { usize u8 u16 u32 u64 } neg_impl_numeric! { isize i8 i16 i32 i64 f32 f64 } /// The `Not` trait is used to specify the functionality of unary `!`. /// /// # Examples /// /// A trivial implementation of `Not`. When `!Foo` happens, it ends up calling /// `not`, and therefore, `main` prints `Not-ing!`. /// /// ``` /// use std::ops::Not; /// /// struct Foo; /// /// impl Not for Foo { /// type Output = Foo; /// /// fn not(self) -> Foo { /// println!("Not-ing!"); /// self /// } /// } /// /// fn main() { /// !Foo; /// } /// ``` #[lang = "not"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Not { /// The resulting type after applying the `!` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the unary `!` operator #[stable(feature = "rust1", since = "1.0.0")] fn not(self) -> Self::Output; } macro_rules! not_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl Not for $t { type Output = $t; #[inline] fn not(self) -> $t { !self } } forward_ref_unop! { impl Not, not for $t } )*) } not_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `BitAnd` trait is used to specify the functionality of `&`. /// /// # Examples /// /// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up /// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`. /// /// ``` /// use std::ops::BitAnd; /// /// struct Foo; /// /// impl BitAnd for Foo { /// type Output = Foo; /// /// fn bitand(self, _rhs: Foo) -> Foo { /// println!("Bitwise And-ing!"); /// self /// } /// } /// /// fn main() { /// Foo & Foo; /// } /// ``` #[lang = "bitand"] #[stable(feature = "rust1", since = "1.0.0")] pub trait BitAnd<RHS=Self> { /// The resulting type after applying the `&` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `&` operator #[stable(feature = "rust1", since = "1.0.0")] fn bitand(self, rhs: RHS) -> Self::Output; } macro_rules! bitand_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl BitAnd for $t { type Output = $t; #[inline] fn bitand(self, rhs: $t) -> $t { self & rhs } } forward_ref_binop! { impl BitAnd, bitand for $t, $t } )*) } bitand_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `BitOr` trait is used to specify the functionality of `|`. /// /// # Examples /// /// A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up /// calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`. /// /// ``` /// use std::ops::BitOr; /// /// struct Foo; /// /// impl BitOr for Foo { /// type Output = Foo; /// /// fn bitor(self, _rhs: Foo) -> Foo { /// println!("Bitwise Or-ing!"); /// self /// } /// } /// /// fn main() { /// Foo | Foo; /// } /// ``` #[lang = "bitor"] #[stable(feature = "rust1", since = "1.0.0")] pub trait BitOr<RHS=Self> { /// The resulting type after applying the `|` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `|` operator #[stable(feature = "rust1", since = "1.0.0")] fn bitor(self, rhs: RHS) -> Self::Output; } macro_rules! bitor_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl BitOr for $t { type Output = $t; #[inline] fn bitor(self, rhs: $t) -> $t { self | rhs } } forward_ref_binop! { impl BitOr, bitor for $t, $t } )*) } bitor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `BitXor` trait is used to specify the functionality of `^`. /// /// # Examples /// /// A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up /// calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`. /// /// ``` /// use std::ops::BitXor; /// /// struct Foo; /// /// impl BitXor for Foo { /// type Output = Foo; /// /// fn bitxor(self, _rhs: Foo) -> Foo { /// println!("Bitwise Xor-ing!"); /// self /// } /// } /// /// fn main() { /// Foo ^ Foo; /// } /// ``` #[lang = "bitxor"] #[stable(feature = "rust1", since = "1.0.0")] pub trait BitXor<RHS=Self> { /// The resulting type after applying the `^` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `^` operator #[stable(feature = "rust1", since = "1.0.0")] fn bitxor(self, rhs: RHS) -> Self::Output; } macro_rules! bitxor_impl { ($($t:ty)*) => ($( #[stable(feature = "rust1", since = "1.0.0")] impl BitXor for $t { type Output = $t; #[inline] fn bitxor(self, other: $t) -> $t { self ^ other } } forward_ref_binop! { impl BitXor, bitxor for $t, $t } )*) } bitxor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `Shl` trait is used to specify the functionality of `<<`. /// /// # Examples /// /// A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up /// calling `shl`, and therefore, `main` prints `Shifting left!`. /// /// ``` /// use std::ops::Shl; /// /// struct Foo; /// /// impl Shl<Foo> for Foo { /// type Output = Foo; /// /// fn shl(self, _rhs: Foo) -> Foo { /// println!("Shifting left!"); /// self /// } /// } /// /// fn main() { /// Foo << Foo; /// } /// ``` #[lang = "shl"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Shl<RHS> { /// The resulting type after applying the `<<` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `<<` operator #[stable(feature = "rust1", since = "1.0.0")] fn shl(self, rhs: RHS) -> Self::Output; } macro_rules! shl_impl { ($t:ty, $f:ty) => ( #[stable(feature = "rust1", since = "1.0.0")] impl Shl<$f> for $t { type Output = $t; #[inline] fn shl(self, other: $f) -> $t { self << other } } forward_ref_binop! { impl Shl, shl for $t, $f } ) } macro_rules! shl_impl_all { ($($t:ty)*) => ($( shl_impl! { $t, u8 } shl_impl! { $t, u16 } shl_impl! { $t, u32 } shl_impl! { $t, u64 } shl_impl! { $t, usize } shl_impl! { $t, i8 } shl_impl! { $t, i16 } shl_impl! { $t, i32 } shl_impl! { $t, i64 } shl_impl! { $t, isize } )*) } shl_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// The `Shr` trait is used to specify the functionality of `>>`. /// /// # Examples /// /// A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up /// calling `shr`, and therefore, `main` prints `Shifting right!`. /// /// ``` /// use std::ops::Shr; /// /// struct Foo; /// /// impl Shr<Foo> for Foo { /// type Output = Foo; /// /// fn shr(self, _rhs: Foo) -> Foo { /// println!("Shifting right!"); /// self /// } /// } /// /// fn main() { /// Foo >> Foo; /// } /// ``` #[lang = "shr"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Shr<RHS> { /// The resulting type after applying the `>>` operator #[stable(feature = "rust1", since = "1.0.0")] type Output; /// The method for the `>>` operator #[stable(feature = "rust1", since = "1.0.0")] fn shr(self, rhs: RHS) -> Self::Output; } macro_rules! shr_impl { ($t:ty, $f:ty) => ( #[stable(feature = "rust1", since = "1.0.0")] impl Shr<$f> for $t { type Output = $t; #[inline] fn shr(self, other: $f) -> $t { self >> other } } forward_ref_binop! { impl Shr, shr for $t, $f } ) } macro_rules! shr_impl_all { ($($t:ty)*) => ($( shr_impl! { $t, u8 } shr_impl! { $t, u16 } shr_impl! { $t, u32 } shr_impl! { $t, u64 } shr_impl! { $t, usize } shr_impl! { $t, i8 } shr_impl! { $t, i16 } shr_impl! { $t, i32 } shr_impl! { $t, i64 } shr_impl! { $t, isize } )*) } shr_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// The `AddAssign` trait is used to specify the functionality of `+=`. /// /// # Examples /// /// A trivial implementation of `AddAssign`. When `Foo += Foo` happens, it ends up /// calling `add_assign`, and therefore, `main` prints `Adding!`. /// /// ``` /// use std::ops::AddAssign; /// /// struct Foo; /// /// impl AddAssign for Foo { /// fn add_assign(&mut self, _rhs: Foo) { /// println!("Adding!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo += Foo; /// } /// ``` #[lang = "add_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait AddAssign<Rhs=Self> { /// The method for the `+=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn add_assign(&mut self, Rhs); } macro_rules! add_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl AddAssign for $t { #[inline] fn add_assign(&mut self, other: $t) { *self += other } } )+) } add_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `SubAssign` trait is used to specify the functionality of `-=`. /// /// # Examples /// /// A trivial implementation of `SubAssign`. When `Foo -= Foo` happens, it ends up /// calling `sub_assign`, and therefore, `main` prints `Subtracting!`. /// /// ``` /// use std::ops::SubAssign; /// /// struct Foo; /// /// impl SubAssign for Foo { /// fn sub_assign(&mut self, _rhs: Foo) { /// println!("Subtracting!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo -= Foo; /// } /// ``` #[lang = "sub_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait SubAssign<Rhs=Self> { /// The method for the `-=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn sub_assign(&mut self, Rhs); } macro_rules! sub_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl SubAssign for $t { #[inline] fn sub_assign(&mut self, other: $t) { *self -= other } } )+) } sub_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `MulAssign` trait is used to specify the functionality of `*=`. /// /// # Examples /// /// A trivial implementation of `MulAssign`. When `Foo *= Foo` happens, it ends up /// calling `mul_assign`, and therefore, `main` prints `Multiplying!`. /// /// ``` /// use std::ops::MulAssign; /// /// struct Foo; /// /// impl MulAssign for Foo { /// fn mul_assign(&mut self, _rhs: Foo) { /// println!("Multiplying!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo *= Foo; /// } /// ``` #[lang = "mul_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait MulAssign<Rhs=Self> { /// The method for the `*=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn mul_assign(&mut self, Rhs); } macro_rules! mul_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl MulAssign for $t { #[inline] fn mul_assign(&mut self, other: $t) { *self *= other } } )+) } mul_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `DivAssign` trait is used to specify the functionality of `/=`. /// /// # Examples /// /// A trivial implementation of `DivAssign`. When `Foo /= Foo` happens, it ends up /// calling `div_assign`, and therefore, `main` prints `Dividing!`. /// /// ``` /// use std::ops::DivAssign; /// /// struct Foo; /// /// impl DivAssign for Foo { /// fn div_assign(&mut self, _rhs: Foo) { /// println!("Dividing!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo /= Foo; /// } /// ``` #[lang = "div_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait DivAssign<Rhs=Self> { /// The method for the `/=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn div_assign(&mut self, Rhs); } macro_rules! div_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl DivAssign for $t { #[inline] fn div_assign(&mut self, other: $t) { *self /= other } } )+) } div_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `RemAssign` trait is used to specify the functionality of `%=`. /// /// # Examples /// /// A trivial implementation of `RemAssign`. When `Foo %= Foo` happens, it ends up /// calling `rem_assign`, and therefore, `main` prints `Remainder-ing!`. /// /// ``` /// use std::ops::RemAssign; /// /// struct Foo; /// /// impl RemAssign for Foo { /// fn rem_assign(&mut self, _rhs: Foo) { /// println!("Remainder-ing!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo %= Foo; /// } /// ``` #[lang = "rem_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait RemAssign<Rhs=Self> { /// The method for the `%=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn rem_assign(&mut self, Rhs); } macro_rules! rem_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl RemAssign for $t { #[inline] fn rem_assign(&mut self, other: $t) { *self %= other } } )+) } rem_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// The `BitAndAssign` trait is used to specify the functionality of `&=`. /// /// # Examples /// /// A trivial implementation of `BitAndAssign`. When `Foo &= Foo` happens, it ends up /// calling `bitand_assign`, and therefore, `main` prints `Bitwise And-ing!`. /// /// ``` /// use std::ops::BitAndAssign; /// /// struct Foo; /// /// impl BitAndAssign for Foo { /// fn bitand_assign(&mut self, _rhs: Foo) { /// println!("Bitwise And-ing!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo &= Foo; /// } /// ``` #[lang = "bitand_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitAndAssign<Rhs=Self> { /// The method for the `&` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitand_assign(&mut self, Rhs); } macro_rules! bitand_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitAndAssign for $t { #[inline] fn bitand_assign(&mut self, other: $t) { *self &= other } } )+) } bitand_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `BitOrAssign` trait is used to specify the functionality of `|=`. /// /// # Examples /// /// A trivial implementation of `BitOrAssign`. When `Foo |= Foo` happens, it ends up /// calling `bitor_assign`, and therefore, `main` prints `Bitwise Or-ing!`. /// /// ``` /// use std::ops::BitOrAssign; /// /// struct Foo; /// /// impl BitOrAssign for Foo { /// fn bitor_assign(&mut self, _rhs: Foo) { /// println!("Bitwise Or-ing!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo |= Foo; /// } /// ``` #[lang = "bitor_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitOrAssign<Rhs=Self> { /// The method for the `|=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitor_assign(&mut self, Rhs); } macro_rules! bitor_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitOrAssign for $t { #[inline] fn bitor_assign(&mut self, other: $t) { *self |= other } } )+) } bitor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `BitXorAssign` trait is used to specify the functionality of `^=`. /// /// # Examples /// /// A trivial implementation of `BitXorAssign`. When `Foo ^= Foo` happens, it ends up /// calling `bitxor_assign`, and therefore, `main` prints `Bitwise Xor-ing!`. /// /// ``` /// use std::ops::BitXorAssign; /// /// struct Foo; /// /// impl BitXorAssign for Foo { /// fn bitxor_assign(&mut self, _rhs: Foo) { /// println!("Bitwise Xor-ing!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo ^= Foo; /// } /// ``` #[lang = "bitxor_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitXorAssign<Rhs=Self> { /// The method for the `^=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitxor_assign(&mut self, Rhs); } macro_rules! bitxor_assign_impl { ($($t:ty)+) => ($( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitXorAssign for $t { #[inline] fn bitxor_assign(&mut self, other: $t) { *self ^= other } } )+) } bitxor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// The `ShlAssign` trait is used to specify the functionality of `<<=`. /// /// # Examples /// /// A trivial implementation of `ShlAssign`. When `Foo <<= Foo` happens, it ends up /// calling `shl_assign`, and therefore, `main` prints `Shifting left!`. /// /// ``` /// use std::ops::ShlAssign; /// /// struct Foo; /// /// impl ShlAssign<Foo> for Foo { /// fn shl_assign(&mut self, _rhs: Foo) { /// println!("Shifting left!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo <<= Foo; /// } /// ``` #[lang = "shl_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait ShlAssign<Rhs> { /// The method for the `<<=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn shl_assign(&mut self, Rhs); } macro_rules! shl_assign_impl { ($t:ty, $f:ty) => ( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for $t { #[inline] fn shl_assign(&mut self, other: $f) { *self <<= other } } ) } macro_rules! shl_assign_impl_all { ($($t:ty)*) => ($( shl_assign_impl! { $t, u8 } shl_assign_impl! { $t, u16 } shl_assign_impl! { $t, u32 } shl_assign_impl! { $t, u64 } shl_assign_impl! { $t, usize } shl_assign_impl! { $t, i8 } shl_assign_impl! { $t, i16 } shl_assign_impl! { $t, i32 } shl_assign_impl! { $t, i64 } shl_assign_impl! { $t, isize } )*) } shl_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// The `ShrAssign` trait is used to specify the functionality of `>>=`. /// /// # Examples /// /// A trivial implementation of `ShrAssign`. When `Foo >>= Foo` happens, it ends up /// calling `shr_assign`, and therefore, `main` prints `Shifting right!`. /// /// ``` /// use std::ops::ShrAssign; /// /// struct Foo; /// /// impl ShrAssign<Foo> for Foo { /// fn shr_assign(&mut self, _rhs: Foo) { /// println!("Shifting right!"); /// } /// } /// /// # #[allow(unused_assignments)] /// fn main() { /// let mut foo = Foo; /// foo >>= Foo; /// } /// ``` #[lang = "shr_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait ShrAssign<Rhs=Self> { /// The method for the `>>=` operator #[stable(feature = "op_assign_traits", since = "1.8.0")] fn shr_assign(&mut self, Rhs); } macro_rules! shr_assign_impl { ($t:ty, $f:ty) => ( #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for $t { #[inline] fn shr_assign(&mut self, other: $f) { *self >>= other } } ) } macro_rules! shr_assign_impl_all { ($($t:ty)*) => ($( shr_assign_impl! { $t, u8 } shr_assign_impl! { $t, u16 } shr_assign_impl! { $t, u32 } shr_assign_impl! { $t, u64 } shr_assign_impl! { $t, usize } shr_assign_impl! { $t, i8 } shr_assign_impl! { $t, i16 } shr_assign_impl! { $t, i32 } shr_assign_impl! { $t, i64 } shr_assign_impl! { $t, isize } )*) } shr_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// The `Index` trait is used to specify the functionality of indexing operations /// like `arr[idx]` when used in an immutable context. /// /// # Examples /// /// A trivial implementation of `Index`. When `Foo[Bar]` happens, it ends up /// calling `index`, and therefore, `main` prints `Indexing!`. /// /// ``` /// use std::ops::Index; /// /// #[derive(Copy, Clone)] /// struct Foo; /// struct Bar; /// /// impl Index<Bar> for Foo { /// type Output = Foo; /// /// fn index<'a>(&'a self, _index: Bar) -> &'a Foo { /// println!("Indexing!"); /// self /// } /// } /// /// fn main() { /// Foo[Bar]; /// } /// ``` #[lang = "index"] #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Index<Idx: ?Sized> { /// The returned type after indexing #[stable(feature = "rust1", since = "1.0.0")] type Output: ?Sized; /// The method for the indexing (`Foo[Bar]`) operation #[stable(feature = "rust1", since = "1.0.0")] fn index(&self, index: Idx) -> &Self::Output; } /// The `IndexMut` trait is used to specify the functionality of indexing /// operations like `arr[idx]`, when used in a mutable context. /// /// # Examples /// /// A trivial implementation of `IndexMut`. When `Foo[Bar]` happens, it ends up /// calling `index_mut`, and therefore, `main` prints `Indexing!`. /// /// ``` /// use std::ops::{Index, IndexMut}; /// /// #[derive(Copy, Clone)] /// struct Foo; /// struct Bar; /// /// impl Index<Bar> for Foo { /// type Output = Foo; /// /// fn index<'a>(&'a self, _index: Bar) -> &'a Foo { /// self /// } /// } /// /// impl IndexMut<Bar> for Foo { /// fn index_mut<'a>(&'a mut self, _index: Bar) -> &'a mut Foo { /// println!("Indexing!"); /// self /// } /// } /// /// fn main() { /// &mut Foo[Bar]; /// } /// ``` #[lang = "index_mut"] #[rustc_on_unimplemented = "the type `{Self}` cannot be mutably indexed by `{Idx}`"] #[stable(feature = "rust1", since = "1.0.0")] pub trait IndexMut<Idx: ?Sized>: Index<Idx> { /// The method for the indexing (`Foo[Bar]`) operation #[stable(feature = "rust1", since = "1.0.0")] fn index_mut(&mut self, index: Idx) -> &mut Self::Output; } /// An unbounded range. #[derive(Copy, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFull; #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for RangeFull { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "..") } } /// A (half-open) range which is bounded at both ends. #[derive(Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Range<Idx> { /// The lower bound of the range (inclusive). #[stable(feature = "rust1", since = "1.0.0")] pub start: Idx, /// The upper bound of the range (exclusive). #[stable(feature = "rust1", since = "1.0.0")] pub end: Idx, } #[stable(feature = "rust1", since = "1.0.0")] impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{:?}..{:?}", self.start, self.end) } } /// A range which is only bounded below. #[derive(Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFrom<Idx> { /// The lower bound of the range (inclusive). #[stable(feature = "rust1", since = "1.0.0")] pub start: Idx, } #[stable(feature = "rust1", since = "1.0.0")] impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{:?}..", self.start) } } /// A range which is only bounded above. #[derive(Copy, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeTo<Idx> { /// The upper bound of the range (exclusive). #[stable(feature = "rust1", since = "1.0.0")] pub end: Idx, } #[stable(feature = "rust1", since = "1.0.0")] impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "..{:?}", self.end) } } /// An inclusive range which is bounded at both ends. #[derive(Copy, Clone, PartialEq, Eq)] #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] pub enum RangeInclusive<Idx> { /// Empty range (iteration has finished) #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] Empty { /// The point at which iteration finished #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] at: Idx }, /// Non-empty range (iteration will yield value(s)) #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] NonEmpty { /// The lower bound of the range (inclusive). #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] start: Idx, /// The upper bound of the range (inclusive). #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] end: Idx, }, } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { use self::RangeInclusive::*; match *self { Empty { ref at } => write!(fmt, "[empty range @ {:?}]", at), NonEmpty { ref start, ref end } => write!(fmt, "{:?}...{:?}", start, end), } } } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl<Idx: PartialOrd + One + Sub<Output=Idx>> From<Range<Idx>> for RangeInclusive<Idx> { fn from(range: Range<Idx>) -> RangeInclusive<Idx> { use self::RangeInclusive::*; if range.start < range.end { NonEmpty { start: range.start, end: range.end - Idx::one() // can't underflow because end > start >= MIN } } else { Empty { at: range.start } } } } /// An inclusive range which is only bounded above. #[derive(Copy, Clone, PartialEq, Eq)] #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] pub struct RangeToInclusive<Idx> { /// The upper bound of the range (inclusive) #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] pub end: Idx, } #[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "...{:?}", self.end) } } // RangeToInclusive<Idx> cannot impl From<RangeTo<Idx>> // because underflow would be possible with (..0).into() /// The `Deref` trait is used to specify the functionality of dereferencing /// operations, like `*v`. /// /// `Deref` also enables ['`Deref` coercions'][coercions]. /// /// [coercions]: ../../book/deref-coercions.html /// /// # Examples /// /// A struct with a single field which is accessible via dereferencing the /// struct. /// /// ``` /// use std::ops::Deref; /// /// struct DerefExample<T> { /// value: T /// } /// /// impl<T> Deref for DerefExample<T> { /// type Target = T; /// /// fn deref(&self) -> &T { /// &self.value /// } /// } /// /// fn main() { /// let x = DerefExample { value: 'a' }; /// assert_eq!('a', *x); /// } /// ``` #[lang = "deref"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Deref { /// The resulting type after dereferencing #[stable(feature = "rust1", since = "1.0.0")] type Target: ?Sized; /// The method called to dereference a value #[stable(feature = "rust1", since = "1.0.0")] fn deref(&self) -> &Self::Target; } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Deref for &'a T { type Target = T; fn deref(&self) -> &T { *self } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Deref for &'a mut T { type Target = T; fn deref(&self) -> &T { *self } } /// The `DerefMut` trait is used to specify the functionality of dereferencing /// mutably like `*v = 1;` /// /// `DerefMut` also enables ['`Deref` coercions'][coercions]. /// /// [coercions]: ../../book/deref-coercions.html /// /// # Examples /// /// A struct with a single field which is modifiable via dereferencing the /// struct. /// /// ``` /// use std::ops::{Deref, DerefMut}; /// /// struct DerefMutExample<T> { /// value: T /// } /// /// impl<T> Deref for DerefMutExample<T> { /// type Target = T; /// /// fn deref<'a>(&'a self) -> &'a T { /// &self.value /// } /// } /// /// impl<T> DerefMut for DerefMutExample<T> { /// fn deref_mut<'a>(&'a mut self) -> &'a mut T { /// &mut self.value /// } /// } /// /// fn main() { /// let mut x = DerefMutExample { value: 'a' }; /// *x = 'b'; /// assert_eq!('b', *x); /// } /// ``` #[lang = "deref_mut"] #[stable(feature = "rust1", since = "1.0.0")] pub trait DerefMut: Deref { /// The method called to mutably dereference a value #[stable(feature = "rust1", since = "1.0.0")] fn deref_mut(&mut self) -> &mut Self::Target; } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> DerefMut for &'a mut T { fn deref_mut(&mut self) -> &mut T { *self } } /// A version of the call operator that takes an immutable receiver. #[lang = "fn"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] #[fundamental] // so that regex can rely that `&str: !FnMut` pub trait Fn<Args> : FnMut<Args> { /// This is called when the call operator is used. #[unstable(feature = "fn_traits", issue = "29625")] extern "rust-call" fn call(&self, args: Args) -> Self::Output; } /// A version of the call operator that takes a mutable receiver. #[lang = "fn_mut"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] #[fundamental] // so that regex can rely that `&str: !FnMut` pub trait FnMut<Args> : FnOnce<Args> { /// This is called when the call operator is used. #[unstable(feature = "fn_traits", issue = "29625")] extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; } /// A version of the call operator that takes a by-value receiver. #[lang = "fn_once"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] #[fundamental] // so that regex can rely that `&str: !FnMut` pub trait FnOnce<Args> { /// The returned type after the call operator is used. #[unstable(feature = "fn_traits", issue = "29625")] type Output; /// This is called when the call operator is used. #[unstable(feature = "fn_traits", issue = "29625")] extern "rust-call" fn call_once(self, args: Args) -> Self::Output; } mod impls { use marker::Sized; use super::{Fn, FnMut, FnOnce}; #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> Fn<A> for &'a F where F : Fn<A> { extern "rust-call" fn call(&self, args: A) -> F::Output { (**self).call(args) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> FnMut<A> for &'a F where F : Fn<A> { extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output { (**self).call(args) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> FnOnce<A> for &'a F where F : Fn<A> { type Output = F::Output; extern "rust-call" fn call_once(self, args: A) -> F::Output { (*self).call(args) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> FnMut<A> for &'a mut F where F : FnMut<A> { extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output { (*self).call_mut(args) } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> FnOnce<A> for &'a mut F where F : FnMut<A> { type Output = F::Output; extern "rust-call" fn call_once(mut self, args: A) -> F::Output { (*self).call_mut(args) } } } /// Trait that indicates that this is a pointer or a wrapper for one, /// where unsizing can be performed on the pointee. #[unstable(feature = "coerce_unsized", issue = "27732")] #[lang="coerce_unsized"] pub trait CoerceUnsized<T> { // Empty. } // &mut T -> &mut U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} // &mut T -> &U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, 'b: 'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {} // &mut T -> *mut U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {} // &mut T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {} // &T -> &U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, 'b: 'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} // &T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {} // *mut T -> *mut U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} // *mut T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {} // *const T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl<T: ?Sized+Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {} /// Both `in (PLACE) EXPR` and `box EXPR` desugar into expressions /// that allocate an intermediate "place" that holds uninitialized /// state. The desugaring evaluates EXPR, and writes the result at /// the address returned by the `pointer` method of this trait. /// /// A `Place` can be thought of as a special representation for a /// hypothetical `&uninit` reference (which Rust cannot currently /// express directly). That is, it represents a pointer to /// uninitialized storage. /// /// The client is responsible for two steps: First, initializing the /// payload (it can access its address via `pointer`). Second, /// converting the agent to an instance of the owning pointer, via the /// appropriate `finalize` method (see the `InPlace`. /// /// If evaluating EXPR fails, then the destructor for the /// implementation of Place to clean up any intermediate state /// (e.g. deallocate box storage, pop a stack, etc). #[unstable(feature = "placement_new_protocol", issue = "27779")] pub trait Place<Data: ?Sized> { /// Returns the address where the input value will be written. /// Note that the data at this address is generally uninitialized, /// and thus one should use `ptr::write` for initializing it. fn pointer(&mut self) -> *mut Data; } /// Interface to implementations of `in (PLACE) EXPR`. /// /// `in (PLACE) EXPR` effectively desugars into: /// /// ```rust,ignore /// let p = PLACE; /// let mut place = Placer::make_place(p); /// let raw_place = Place::pointer(&mut place); /// let value = EXPR; /// unsafe { /// std::ptr::write(raw_place, value); /// InPlace::finalize(place) /// } /// ``` /// /// The type of `in (PLACE) EXPR` is derived from the type of `PLACE`; /// if the type of `PLACE` is `P`, then the final type of the whole /// expression is `P::Place::Owner` (see the `InPlace` and `Boxed` /// traits). /// /// Values for types implementing this trait usually are transient /// intermediate values (e.g. the return value of `Vec::emplace_back`) /// or `Copy`, since the `make_place` method takes `self` by value. #[unstable(feature = "placement_new_protocol", issue = "27779")] pub trait Placer<Data: ?Sized> { /// `Place` is the intermedate agent guarding the /// uninitialized state for `Data`. type Place: InPlace<Data>; /// Creates a fresh place from `self`. fn make_place(self) -> Self::Place; } /// Specialization of `Place` trait supporting `in (PLACE) EXPR`. #[unstable(feature = "placement_new_protocol", issue = "27779")] pub trait InPlace<Data: ?Sized>: Place<Data> { /// `Owner` is the type of the end value of `in (PLACE) EXPR` /// /// Note that when `in (PLACE) EXPR` is solely used for /// side-effecting an existing data-structure, /// e.g. `Vec::emplace_back`, then `Owner` need not carry any /// information at all (e.g. it can be the unit type `()` in that /// case). type Owner; /// Converts self into the final value, shifting /// deallocation/cleanup responsibilities (if any remain), over to /// the returned instance of `Owner` and forgetting self. unsafe fn finalize(self) -> Self::Owner; } /// Core trait for the `box EXPR` form. /// /// `box EXPR` effectively desugars into: /// /// ```rust,ignore /// let mut place = BoxPlace::make_place(); /// let raw_place = Place::pointer(&mut place); /// let value = EXPR; /// unsafe { /// ::std::ptr::write(raw_place, value); /// Boxed::finalize(place) /// } /// ``` /// /// The type of `box EXPR` is supplied from its surrounding /// context; in the above expansion, the result type `T` is used /// to determine which implementation of `Boxed` to use, and that /// `<T as Boxed>` in turn dictates determines which /// implementation of `BoxPlace` to use, namely: /// `<<T as Boxed>::Place as BoxPlace>`. #[unstable(feature = "placement_new_protocol", issue = "27779")] pub trait Boxed { /// The kind of data that is stored in this kind of box. type Data; /* (`Data` unused b/c cannot yet express below bound.) */ /// The place that will negotiate the storage of the data. type Place: BoxPlace<Self::Data>; /// Converts filled place into final owning value, shifting /// deallocation/cleanup responsibilities (if any remain), over to /// returned instance of `Self` and forgetting `filled`. unsafe fn finalize(filled: Self::Place) -> Self; } /// Specialization of `Place` trait supporting `box EXPR`. #[unstable(feature = "placement_new_protocol", issue = "27779")] pub trait BoxPlace<Data: ?Sized> : Place<Data> { /// Creates a globally fresh place. fn make_place() -> Self; }
27.51102
97
0.552204
f52d201abecb90a6bee88466146fefe33650519e
23,390
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use rustc_data_structures::graph; use cfg::*; use middle::region; use ty::{self, TyCtxt}; use syntax::ptr::P; use hir::{self, PatKind}; use hir::def_id::DefId; struct CFGBuilder<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId, tables: &'a ty::TypeckTables<'tcx>, graph: CFGGraph, fn_exit: CFGIndex, loop_scopes: Vec<LoopScope>, breakable_block_scopes: Vec<BlockScope>, } #[derive(Copy, Clone)] struct BlockScope { block_expr_id: hir::ItemLocalId, // id of breakable block expr node break_index: CFGIndex, // where to go on `break` } #[derive(Copy, Clone)] struct LoopScope { loop_id: hir::ItemLocalId, // id of loop/while node continue_index: CFGIndex, // where to go on a `loop` break_index: CFGIndex, // where to go on a `break` } pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, body: &hir::Body) -> CFG { let mut graph = graph::Graph::new(); let entry = graph.add_node(CFGNodeData::Entry); // `fn_exit` is target of return exprs, which lies somewhere // outside input `body`. (Distinguishing `fn_exit` and `body_exit` // also resolves chicken-and-egg problem that arises if you try to // have return exprs jump to `body_exit` during construction.) let fn_exit = graph.add_node(CFGNodeData::Exit); let body_exit; // Find the tables for this body. let owner_def_id = tcx.hir.local_def_id(tcx.hir.body_owner(body.id())); let tables = tcx.typeck_tables_of(owner_def_id); let mut cfg_builder = CFGBuilder { tcx, owner_def_id, tables, graph, fn_exit, loop_scopes: Vec::new(), breakable_block_scopes: Vec::new(), }; body_exit = cfg_builder.expr(&body.value, entry); cfg_builder.add_contained_edge(body_exit, fn_exit); let CFGBuilder { graph, .. } = cfg_builder; CFG { owner_def_id, graph, entry, exit: fn_exit, } } impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn block(&mut self, blk: &hir::Block, pred: CFGIndex) -> CFGIndex { if blk.targeted_by_break { let expr_exit = self.add_ast_node(blk.hir_id.local_id, &[]); self.breakable_block_scopes.push(BlockScope { block_expr_id: blk.hir_id.local_id, break_index: expr_exit, }); let mut stmts_exit = pred; for stmt in &blk.stmts { stmts_exit = self.stmt(stmt, stmts_exit); } let blk_expr_exit = self.opt_expr(&blk.expr, stmts_exit); self.add_contained_edge(blk_expr_exit, expr_exit); self.breakable_block_scopes.pop(); expr_exit } else { let mut stmts_exit = pred; for stmt in &blk.stmts { stmts_exit = self.stmt(stmt, stmts_exit); } let expr_exit = self.opt_expr(&blk.expr, stmts_exit); self.add_ast_node(blk.hir_id.local_id, &[expr_exit]) } } fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex { let hir_id = self.tcx.hir.node_to_hir_id(stmt.node.id()); match stmt.node { hir::StmtDecl(ref decl, _) => { let exit = self.decl(&decl, pred); self.add_ast_node(hir_id.local_id, &[exit]) } hir::StmtExpr(ref expr, _) | hir::StmtSemi(ref expr, _) => { let exit = self.expr(&expr, pred); self.add_ast_node(hir_id.local_id, &[exit]) } } } fn decl(&mut self, decl: &hir::Decl, pred: CFGIndex) -> CFGIndex { match decl.node { hir::DeclLocal(ref local) => { let init_exit = self.opt_expr(&local.init, pred); self.pat(&local.pat, init_exit) } hir::DeclItem(_) => pred, } } fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex { match pat.node { PatKind::Binding(.., None) | PatKind::Path(_) | PatKind::Lit(..) | PatKind::Range(..) | PatKind::Wild => self.add_ast_node(pat.hir_id.local_id, &[pred]), PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) | PatKind::Binding(.., Some(ref subpat)) => { let subpat_exit = self.pat(&subpat, pred); self.add_ast_node(pat.hir_id.local_id, &[subpat_exit]) } PatKind::TupleStruct(_, ref subpats, _) | PatKind::Tuple(ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter(), pred); self.add_ast_node(pat.hir_id.local_id, &[pats_exit]) } PatKind::Struct(_, ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter().map(|f| &f.node.pat), pred); self.add_ast_node(pat.hir_id.local_id, &[pats_exit]) } PatKind::Slice(ref pre, ref vec, ref post) => { let pre_exit = self.pats_all(pre.iter(), pred); let vec_exit = self.pats_all(vec.iter(), pre_exit); let post_exit = self.pats_all(post.iter(), vec_exit); self.add_ast_node(pat.hir_id.local_id, &[post_exit]) } } } fn pats_all<'b, I: Iterator<Item=&'b P<hir::Pat>>>(&mut self, pats: I, pred: CFGIndex) -> CFGIndex { //! Handles case where all of the patterns must match. pats.fold(pred, |pred, pat| self.pat(&pat, pred)) } fn expr(&mut self, expr: &hir::Expr, pred: CFGIndex) -> CFGIndex { match expr.node { hir::ExprBlock(ref blk, _) => { let blk_exit = self.block(&blk, pred); self.add_ast_node(expr.hir_id.local_id, &[blk_exit]) } hir::ExprIf(ref cond, ref then, None) => { // // [pred] // | // v 1 // [cond] // | // / \ // / \ // v 2 * // [then] | // | | // v 3 v 4 // [..expr..] // let cond_exit = self.expr(&cond, pred); // 1 let then_exit = self.expr(&then, cond_exit); // 2 self.add_ast_node(expr.hir_id.local_id, &[cond_exit, then_exit]) // 3,4 } hir::ExprIf(ref cond, ref then, Some(ref otherwise)) => { // // [pred] // | // v 1 // [cond] // | // / \ // / \ // v 2 v 3 // [then][otherwise] // | | // v 4 v 5 // [..expr..] // let cond_exit = self.expr(&cond, pred); // 1 let then_exit = self.expr(&then, cond_exit); // 2 let else_exit = self.expr(&otherwise, cond_exit); // 3 self.add_ast_node(expr.hir_id.local_id, &[then_exit, else_exit]) // 4, 5 } hir::ExprWhile(ref cond, ref body, _) => { // // [pred] // | // v 1 // [loopback] <--+ 5 // | | // v 2 | // +-----[cond] | // | | | // | v 4 | // | [body] -----+ // v 3 // [expr] // // Note that `break` and `continue` statements // may cause additional edges. let loopback = self.add_dummy_node(&[pred]); // 1 // Create expr_exit without pred (cond_exit) let expr_exit = self.add_ast_node(expr.hir_id.local_id, &[]); // 3 // The LoopScope needs to be on the loop_scopes stack while evaluating the // condition and the body of the loop (both can break out of the loop) self.loop_scopes.push(LoopScope { loop_id: expr.hir_id.local_id, continue_index: loopback, break_index: expr_exit }); let cond_exit = self.expr(&cond, loopback); // 2 // Add pred (cond_exit) to expr_exit self.add_contained_edge(cond_exit, expr_exit); let body_exit = self.block(&body, cond_exit); // 4 self.add_contained_edge(body_exit, loopback); // 5 self.loop_scopes.pop(); expr_exit } hir::ExprLoop(ref body, _, _) => { // // [pred] // | // v 1 // [loopback] <---+ // | 4 | // v 3 | // [body] ------+ // // [expr] 2 // // Note that `break` and `loop` statements // may cause additional edges. let loopback = self.add_dummy_node(&[pred]); // 1 let expr_exit = self.add_ast_node(expr.hir_id.local_id, &[]); // 2 self.loop_scopes.push(LoopScope { loop_id: expr.hir_id.local_id, continue_index: loopback, break_index: expr_exit, }); let body_exit = self.block(&body, loopback); // 3 self.add_contained_edge(body_exit, loopback); // 4 self.loop_scopes.pop(); expr_exit } hir::ExprMatch(ref discr, ref arms, _) => { self.match_(expr.hir_id.local_id, &discr, &arms, pred) } hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { // // [pred] // | // v 1 // [l] // | // / \ // / \ // v 2 * // [r] | // | | // v 3 v 4 // [..exit..] // let l_exit = self.expr(&l, pred); // 1 let r_exit = self.expr(&r, l_exit); // 2 self.add_ast_node(expr.hir_id.local_id, &[l_exit, r_exit]) // 3,4 } hir::ExprRet(ref v) => { let v_exit = self.opt_expr(v, pred); let b = self.add_ast_node(expr.hir_id.local_id, &[v_exit]); self.add_returning_edge(expr, b); self.add_unreachable_node() } hir::ExprBreak(destination, ref opt_expr) => { let v = self.opt_expr(opt_expr, pred); let (target_scope, break_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Break); let b = self.add_ast_node(expr.hir_id.local_id, &[v]); self.add_exiting_edge(expr, b, target_scope, break_dest); self.add_unreachable_node() } hir::ExprContinue(destination) => { let (target_scope, cont_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Continue); let a = self.add_ast_node(expr.hir_id.local_id, &[pred]); self.add_exiting_edge(expr, a, target_scope, cont_dest); self.add_unreachable_node() } hir::ExprArray(ref elems) => { self.straightline(expr, pred, elems.iter().map(|e| &*e)) } hir::ExprCall(ref func, ref args) => { self.call(expr, pred, &func, args.iter().map(|e| &*e)) } hir::ExprMethodCall(.., ref args) => { self.call(expr, pred, &args[0], args[1..].iter().map(|e| &*e)) } hir::ExprIndex(ref l, ref r) | hir::ExprBinary(_, ref l, ref r) if self.tables.is_method_call(expr) => { self.call(expr, pred, &l, Some(&**r).into_iter()) } hir::ExprUnary(_, ref e) if self.tables.is_method_call(expr) => { self.call(expr, pred, &e, None::<hir::Expr>.iter()) } hir::ExprTup(ref exprs) => { self.straightline(expr, pred, exprs.iter().map(|e| &*e)) } hir::ExprStruct(_, ref fields, ref base) => { let field_cfg = self.straightline(expr, pred, fields.iter().map(|f| &*f.expr)); self.opt_expr(base, field_cfg) } hir::ExprAssign(ref l, ref r) | hir::ExprAssignOp(_, ref l, ref r) => { self.straightline(expr, pred, [r, l].iter().map(|&e| &**e)) } hir::ExprIndex(ref l, ref r) | hir::ExprBinary(_, ref l, ref r) => { // NB: && and || handled earlier self.straightline(expr, pred, [l, r].iter().map(|&e| &**e)) } hir::ExprBox(ref e) | hir::ExprAddrOf(_, ref e) | hir::ExprCast(ref e, _) | hir::ExprType(ref e, _) | hir::ExprUnary(_, ref e) | hir::ExprField(ref e, _) | hir::ExprYield(ref e) | hir::ExprRepeat(ref e, _) => { self.straightline(expr, pred, Some(&**e).into_iter()) } hir::ExprInlineAsm(_, ref outputs, ref inputs) => { let post_outputs = self.exprs(outputs.iter().map(|e| &*e), pred); let post_inputs = self.exprs(inputs.iter().map(|e| &*e), post_outputs); self.add_ast_node(expr.hir_id.local_id, &[post_inputs]) } hir::ExprClosure(..) | hir::ExprLit(..) | hir::ExprPath(_) => { self.straightline(expr, pred, None::<hir::Expr>.iter()) } } } fn call<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self, call_expr: &hir::Expr, pred: CFGIndex, func_or_rcvr: &hir::Expr, args: I) -> CFGIndex { let func_or_rcvr_exit = self.expr(func_or_rcvr, pred); let ret = self.straightline(call_expr, func_or_rcvr_exit, args); // FIXME(canndrew): This is_never should probably be an is_uninhabited. if self.tables.expr_ty(call_expr).is_never() { self.add_unreachable_node() } else { ret } } fn exprs<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self, exprs: I, pred: CFGIndex) -> CFGIndex { //! Constructs graph for `exprs` evaluated in order exprs.fold(pred, |p, e| self.expr(e, p)) } fn opt_expr(&mut self, opt_expr: &Option<P<hir::Expr>>, pred: CFGIndex) -> CFGIndex { //! Constructs graph for `opt_expr` evaluated, if Some opt_expr.iter().fold(pred, |p, e| self.expr(&e, p)) } fn straightline<'b, I: Iterator<Item=&'b hir::Expr>>(&mut self, expr: &hir::Expr, pred: CFGIndex, subexprs: I) -> CFGIndex { //! Handles case of an expression that evaluates `subexprs` in order let subexprs_exit = self.exprs(subexprs, pred); self.add_ast_node(expr.hir_id.local_id, &[subexprs_exit]) } fn match_(&mut self, id: hir::ItemLocalId, discr: &hir::Expr, arms: &[hir::Arm], pred: CFGIndex) -> CFGIndex { // The CFG for match expression is quite complex, so no ASCII // art for it (yet). // // The CFG generated below matches roughly what MIR contains. // Each pattern and guard is visited in parallel, with // arms containing multiple patterns generating multiple nodes // for the same guard expression. The guard expressions chain // into each other from top to bottom, with a specific // exception to allow some additional valid programs // (explained below). MIR differs slightly in that the // pattern matching may continue after a guard but the visible // behaviour should be the same. // // What is going on is explained in further comments. // Visit the discriminant expression let discr_exit = self.expr(discr, pred); // Add a node for the exit of the match expression as a whole. let expr_exit = self.add_ast_node(id, &[]); // Keep track of the previous guard expressions let mut prev_guards = Vec::new(); for arm in arms { // Add an exit node for when we've visited all the // patterns and the guard (if there is one) in the arm. let arm_exit = self.add_dummy_node(&[]); for pat in &arm.pats { // Visit the pattern, coming from the discriminant exit let mut pat_exit = self.pat(&pat, discr_exit); // If there is a guard expression, handle it here if let Some(ref guard) = arm.guard { // Add a dummy node for the previous guard // expression to target let guard_start = self.add_dummy_node(&[pat_exit]); // Visit the guard expression let guard_exit = self.expr(&guard, guard_start); // #47295: We used to have very special case code // here for when a pair of arms are both formed // solely from constants, and if so, not add these // edges. But this was not actually sound without // other constraints that we stopped enforcing at // some point. while let Some(prev) = prev_guards.pop() { self.add_contained_edge(prev, guard_start); } // Push the guard onto the list of previous guards prev_guards.push(guard_exit); // Update the exit node for the pattern pat_exit = guard_exit; } // Add an edge from the exit of this pattern to the // exit of the arm self.add_contained_edge(pat_exit, arm_exit); } // Visit the body of this arm let body_exit = self.expr(&arm.body, arm_exit); // Link the body to the exit of the expression self.add_contained_edge(body_exit, expr_exit); } expr_exit } fn add_dummy_node(&mut self, preds: &[CFGIndex]) -> CFGIndex { self.add_node(CFGNodeData::Dummy, preds) } fn add_ast_node(&mut self, id: hir::ItemLocalId, preds: &[CFGIndex]) -> CFGIndex { self.add_node(CFGNodeData::AST(id), preds) } fn add_unreachable_node(&mut self) -> CFGIndex { self.add_node(CFGNodeData::Unreachable, &[]) } fn add_node(&mut self, data: CFGNodeData, preds: &[CFGIndex]) -> CFGIndex { let node = self.graph.add_node(data); for &pred in preds { self.add_contained_edge(pred, node); } node } fn add_contained_edge(&mut self, source: CFGIndex, target: CFGIndex) { let data = CFGEdgeData {exiting_scopes: vec![] }; self.graph.add_edge(source, target, data); } fn add_exiting_edge(&mut self, from_expr: &hir::Expr, from_index: CFGIndex, target_scope: region::Scope, to_index: CFGIndex) { let mut data = CFGEdgeData { exiting_scopes: vec![] }; let mut scope = region::Scope::Node(from_expr.hir_id.local_id); let region_scope_tree = self.tcx.region_scope_tree(self.owner_def_id); while scope != target_scope { data.exiting_scopes.push(scope.item_local_id()); scope = region_scope_tree.encl_scope(scope); } self.graph.add_edge(from_index, to_index, data); } fn add_returning_edge(&mut self, _from_expr: &hir::Expr, from_index: CFGIndex) { let mut data = CFGEdgeData { exiting_scopes: vec![], }; for &LoopScope { loop_id: id, .. } in self.loop_scopes.iter().rev() { data.exiting_scopes.push(id); } self.graph.add_edge(from_index, self.fn_exit, data); } fn find_scope_edge(&self, expr: &hir::Expr, destination: hir::Destination, scope_cf_kind: ScopeCfKind) -> (region::Scope, CFGIndex) { match destination.target_id { Ok(loop_id) => { for b in &self.breakable_block_scopes { if b.block_expr_id == self.tcx.hir.node_to_hir_id(loop_id).local_id { let scope_id = self.tcx.hir.node_to_hir_id(loop_id).local_id; return (region::Scope::Node(scope_id), match scope_cf_kind { ScopeCfKind::Break => b.break_index, ScopeCfKind::Continue => bug!("can't continue to block"), }); } } for l in &self.loop_scopes { if l.loop_id == self.tcx.hir.node_to_hir_id(loop_id).local_id { let scope_id = self.tcx.hir.node_to_hir_id(loop_id).local_id; return (region::Scope::Node(scope_id), match scope_cf_kind { ScopeCfKind::Break => l.break_index, ScopeCfKind::Continue => l.continue_index, }); } } span_bug!(expr.span, "no scope for id {}", loop_id); } Err(err) => span_bug!(expr.span, "scope error: {}", err), } } } #[derive(Copy, Clone, Eq, PartialEq)] enum ScopeCfKind { Break, Continue, }
37.970779
95
0.486789
ef905fa270be44e40fd51010046ebc4acc341e9b
1,084
use std::fs; use std::io::prelude::*; use std::net::TcpListener; use std::net::TcpStream; fn main() { let listener = TcpListener::bind("127.0.0.1:7878").unwrap(); for stream in listener.incoming() { let stream = stream.unwrap(); handle_connection(stream); } } fn handle_connection(mut stream: TcpStream) { let mut buffer = [0; 1024]; stream.read(&mut buffer).unwrap(); let get = b"GET / HTTP/1.1\r\n"; if buffer.starts_with(get) { let contents = fs::read_to_string("hello.html").unwrap(); let response = format!( "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", contents.len(), contents ); stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } else { let status_line = "HTTP/1.1 404 NOT FOUND\r\n\r\n"; let contents = fs::read_to_string("404.html").unwrap(); let response = format!("{}{}", status_line, contents); stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } }
25.209302
65
0.573801
e428f28c521c54d7af0fad466dc84c740ee391e9
1,644
// // Vidoxide - Image acquisition for amateur astronomy // Copyright (c) 2020-2021 Filip Szczerek <[email protected]> // // This project is licensed under the terms of the MIT license // (see the LICENSE file for details). // //! //! Sky-Watcher mount connection GUI. //! use crate::gui::mount_gui::connection_dialog::ConnectionCreator; use crate::mount::MountConnection; use gtk::prelude::*; /// Control padding in pixels. const PADDING: u32 = 10; pub struct SWConnectionCreator { dialog_tab: gtk::Box, entry: gtk::Entry } impl SWConnectionCreator { pub(in crate::gui::mount_gui) fn new(configuration: &crate::config::Configuration) -> Box<dyn ConnectionCreator> { let vbox = gtk::Box::new(gtk::Orientation::Vertical, 0); vbox.pack_start( &gtk::Label::new(Some("Device name (e.g., “COM5” on Windows or “/dev/ttyUSB0” on Linux):")), false, false, PADDING ); let entry = gtk::Entry::new(); entry.set_text(&configuration.skywatcher_last_device().unwrap_or("".to_string())); vbox.pack_start(&entry, true, false, PADDING); Box::new(SWConnectionCreator{ dialog_tab: vbox, entry }) } } impl ConnectionCreator for SWConnectionCreator { fn dialog_tab(&self) -> &gtk::Box { &self.dialog_tab } fn create(&self, configuration: &crate::config::Configuration) -> MountConnection { configuration.set_skywatcher_last_device(&self.entry.text()); MountConnection::SkyWatcherSerial(self.entry.text().as_str().to_string()) } fn label(&self) -> &'static str { "Sky-Watcher serial connection" } }
30.444444
118
0.663017
fc45eeb82c834ad7de8fbfcc2f05718dc75789e7
836
/* * Asana * * This is the interface for interacting with the [Asana Platform](https://developers.asana.com). Our API reference is generated from our [OpenAPI spec] (https://raw.githubusercontent.com/Asana/developer-docs/master/defs/asana_oas.yaml). * * The version of the OpenAPI document: 1.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InlineResponse2001 { /// An empty object. Some endpoints do not return an object on success. The success is conveyed through a 2-- status code and returning an empty object. #[serde(rename = "data", skip_serializing_if = "Option::is_none")] pub data: Option<serde_json::Value>, } impl InlineResponse2001 { pub fn new() -> InlineResponse2001 { InlineResponse2001 { data: None } } }
36.347826
237
0.717703
26afbde4af432185745d0188744563fe482faa05
16,676
use crate::api::page::{Page, Slot}; use crate::util::bsearch::bsearch; use bytes::{BufMut, BytesMut}; use std::mem::size_of; pub(crate) struct Block { buf: BytesMut, } impl AsMut<[u8]> for Block { fn as_mut(&mut self) -> &mut [u8] { &mut self.buf[..] } } impl AsRef<[u8]> for Block { fn as_ref(&self) -> &[u8] { &self.buf[..] } } const ID_OFFSET: usize = 0; const CAP_OFFSET: usize = 4; const SIZE_OFFSET: usize = 8; const RESERVED: u32 = 0xC0DE1542; impl Block { fn put_entry(&mut self, key: &[u8], val: &[u8], page: u32) -> Option<u32> { if !self.fits((key.len() + val.len()) as u32) { return None; } let ceil_opt = self.ceil(key); if let Some(idx) = &ceil_opt { if self.key(*idx) == key { let n = self.size() - 1; put_size(&mut self.buf, n); self.remove(*idx); } } let size = self.size(); let idx = self.ceil(key).unwrap_or_else(|| size); let mut slots = (0..size) .into_iter() .filter_map(|idx| self.slot(idx)) .collect::<Vec<_>>(); let klen = key.len() as u32; let vlen = val.len() as u32; let end = slots .iter() .map(|slot| slot.offset) .min() .unwrap_or_else(|| self.cap()); let offset = end - klen - vlen; let slot = Slot::new(offset, klen, vlen, page); slots.insert(idx as usize, slot); slots .into_iter() .enumerate() .for_each(|(idx, slot)| put_slot(&mut self.buf, idx as u32, &slot)); let n = self.size() + 1; put_size(&mut self.buf, n); put_slice(&mut self.buf, offset as usize, key); if !val.is_empty() { put_slice(&mut self.buf, offset as usize + key.len(), val); } Some(idx) } } impl Page for Block { fn reserve(capacity: u32) -> Self { let mut buf = BytesMut::with_capacity(capacity as usize); buf.extend_from_slice(&vec![0u8; capacity as usize]); Self { buf } } fn create(id: u32, cap: u32) -> Self { let mut buf = BytesMut::with_capacity(cap as usize); buf.put_u32(id); buf.put_u32(cap); buf.put_u32(0); buf.put_u32(RESERVED); assert_eq!(buf.len(), HEAD); buf.extend_from_slice(&vec![0u8; cap as usize - HEAD]); Self { buf } } fn id(&self) -> u32 { get_u32(&self.buf, ID_OFFSET) } fn cap(&self) -> u32 { get_u32(&self.buf, CAP_OFFSET) } fn size(&self) -> u32 { get_u32(&self.buf, SIZE_OFFSET) } fn slot(&self, idx: u32) -> Option<Slot> { if idx >= self.size() { return None; } let pos = HEAD + U32 * 4 * idx as usize; let offset = get_u32(&self.buf, pos); let klen = get_u32(&self.buf, pos + 4); let vlen = get_u32(&self.buf, pos + 8); let page = get_u32(&self.buf, pos + 12); Some(Slot::new(offset, klen, vlen, page)) } fn min(&self) -> &[u8] { self.key(0) } fn max(&self) -> &[u8] { self.key(self.size() - 1) } fn key(&self, idx: u32) -> &[u8] { self.slot(idx) .map(|slot| { let at = slot.offset as usize; let to = at + slot.klen as usize; &self.buf[at..to] }) .unwrap_or_default() } fn val(&self, idx: u32) -> &[u8] { self.slot(idx) .map(|slot| { let at = slot.offset as usize + slot.klen as usize; let to = at + slot.vlen as usize; &self.buf[at..to] }) .unwrap_or_default() } fn free(&self) -> u32 { let size = self.size(); if size == 0 { return self.cap() - HEAD as u32; } let lo = HEAD as u32 + size * SLOT as u32; let hi = (0..size) .into_iter() .filter_map(|idx| self.slot(idx)) .map(|slot| slot.offset) .min() .unwrap_or(lo); assert!(lo <= hi); hi - lo } fn full(&self) -> u8 { let len = self.cap() - HEAD as u32; ((len - self.free()) * 100 / len) as u8 } fn fits(&self, len: u32) -> bool { self.free() >= len + SLOT as u32 } fn find(&self, key: &[u8]) -> Option<u32> { let n = self.size(); if n == 0 { return None; } let k = bsearch(key, 0, n - 1, |i| self.key(i)); if self.key(k) == key { Some(k) } else { None } } fn ceil(&self, key: &[u8]) -> Option<u32> { let n = self.size(); if n == 0 { return None; } let k = bsearch(key, 0, n - 1, |i| self.key(i)); if self.key(k) >= key { Some(k) } else { None } } fn put_val(&mut self, key: &[u8], val: &[u8]) -> Option<u32> { self.put_entry(key, val, 0) } fn put_ref(&mut self, key: &[u8], page: u32) -> Option<u32> { self.put_entry(key, &[], page) } fn remove(&mut self, idx: u32) { let size = self.size(); if idx >= size { return; } let mut slots = (0..size) .into_iter() .filter_map(|idx| self.slot(idx)) .collect::<Vec<_>>(); let removed = slots.remove(idx as usize); let blank = vec![0u8; (removed.klen + removed.vlen) as usize]; put_slice(&mut self.buf, removed.offset as usize, &blank); put_size(&mut self.buf, size - 1); let total: u32 = slots.iter().map(|slot| slot.klen + slot.vlen).sum(); let mut offset = self.cap() - total; let copy = slots .iter() .map(|slot| { ( get_key(&self.buf, slot).to_vec(), get_val(&self.buf, slot).to_vec(), ) }) .collect::<Vec<_>>(); for (i, (key, val)) in copy.iter().enumerate() { slots.get_mut(i).unwrap().offset = offset; put_slice(&mut self.buf, offset as usize, key); offset += key.len() as u32; if !val.is_empty() { put_slice(&mut self.buf, offset as usize, val); offset += val.len() as u32; } } slots.push(Slot::empty()); slots .into_iter() .enumerate() .for_each(|(idx, slot)| put_slot(&mut self.buf, idx as u32, &slot)); } fn copy(&self) -> Vec<(Vec<u8>, Vec<u8>, u32)> { (0..self.size()) .into_iter() .filter_map(|idx| self.slot(idx)) .map(|slot| { ( get_key(&self.buf, &slot).to_vec(), get_val(&self.buf, &slot).to_vec(), slot.page, ) }) .collect::<Vec<_>>() } fn clear(&mut self) { let len = self.cap() as usize; let mut tmp = BytesMut::with_capacity(len); tmp.put_u32(self.id()); tmp.put_u32(self.cap()); tmp.put_u32(0); tmp.put_u32(RESERVED); self.buf[..HEAD].copy_from_slice(tmp.as_ref()); let blank = vec![0xFFu8; len - HEAD]; self.buf[HEAD..].copy_from_slice(&blank); } } const U32: usize = size_of::<u32>(); const SLOT: usize = size_of::<Slot>(); const HEAD: usize = 4 * U32; // page header: id, length, size, reserved fn get_u32(buf: &BytesMut, pos: usize) -> u32 { let mut src = [0u8; U32]; src.copy_from_slice(&buf[pos..(pos + U32)]); u32::from_be_bytes(src) } fn get_key<'a>(buf: &'a BytesMut, slot: &'a Slot) -> &'a [u8] { &buf[(slot.offset as usize)..(slot.offset as usize + slot.klen as usize)] } fn get_val<'a>(buf: &'a BytesMut, slot: &'a Slot) -> &'a [u8] { &buf[(slot.offset as usize + slot.klen as usize) ..(slot.offset as usize + slot.klen as usize + slot.vlen as usize)] } fn put_u32(buf: &mut BytesMut, pos: usize, val: u32) { let dst = &mut buf[pos..(pos + U32)]; dst.copy_from_slice(&val.to_be_bytes()); } fn put_slice(buf: &mut BytesMut, pos: usize, src: &[u8]) { let dst = &mut buf[pos..(pos + src.len())]; dst.copy_from_slice(src); } fn put_size(buf: &mut BytesMut, val: u32) { put_u32(buf, SIZE_OFFSET, val); } fn put_slot(buf: &mut BytesMut, idx: u32, slot: &Slot) { let pos = HEAD + idx as usize * SLOT; put_u32(buf, pos, slot.offset); put_u32(buf, pos + 4, slot.klen); put_u32(buf, pos + 8, slot.vlen); put_u32(buf, pos + 12, slot.page); } #[cfg(test)] mod tests { use super::*; use rand::prelude::*; use std::collections::HashSet; #[test] fn test_sizes() { assert_eq!(U32, 4); assert_eq!(SLOT, 16); assert_eq!(HEAD, 16); } #[test] fn test_sorted() { let mut rng = thread_rng(); let size = 32; let len = size * size_of::<u64>() * 4; let mut keys = (0..size) .into_iter() .map(|_| rng.gen::<u64>().to_be_bytes().to_vec()) .collect::<Vec<_>>(); let mut page = Block::create(42, len as u32); for (i, key) in keys.iter().enumerate() { if i % 2 == 0 { page.put_ref(key, 42).unwrap(); } else { page.put_val(key, b"undefined").unwrap(); } } keys.sort(); let read = (0..size) .into_iter() .map(|idx| page.key(idx as u32).to_vec()) .collect::<Vec<_>>(); assert_eq!(read, keys); } #[test] fn test_find() { let mut rng = thread_rng(); let size = 64; let len = size * size_of::<u64>() * 10; let keys = (0..size) .into_iter() .map(|_| rng.gen::<u64>()) .collect::<HashSet<_>>(); let pairs = keys .iter() .map(|k| { ( k.to_be_bytes().to_vec(), rng.gen::<u64>().to_be_bytes().to_vec(), ) }) .collect::<Vec<_>>(); let mut page = Block::create(42, len as u32); for (key, val) in pairs.iter() { page.put_val(key, val).unwrap(); } for (key, val) in pairs.iter() { let idx = page.find(key).unwrap(); assert_eq!(page.key(idx), key); assert_eq!(page.val(idx), val); } } #[test] fn test_ceil() { let mut rng = thread_rng(); let size = 64; let len = size * size_of::<u64>() * 10; let keys = (0..size) .into_iter() .map(|_| { let x = rng.gen::<u64>(); x - (x % 100) }) .collect::<Vec<_>>(); let mut page = Block::create(42, len as u32); for key in keys.iter() { page.put_ref(&key.to_be_bytes(), 42).unwrap(); } for k in keys.iter() { let r = rng.gen::<u64>() % 100; let key = &(k - r).to_be_bytes(); let exp = &k.to_be_bytes(); let idx = page.ceil(key).unwrap(); assert_eq!(page.key(idx), exp); let idx = page.ceil(exp).unwrap(); assert_eq!(page.key(idx), exp); } let missing = keys.iter().max().cloned().unwrap() + 1; assert_eq!(page.ceil(&missing.to_be_bytes()), None); } #[test] fn test_size() { let mut rng = thread_rng(); let count = 32; let pairs = (0..count) .into_iter() .map(|_| { ( rng.next_u64().to_be_bytes().to_vec(), rng.next_u64().to_be_bytes().to_vec(), ) }) .collect::<Vec<_>>(); let len = pairs.iter().map(|(k, v)| k.len() + v.len()).sum::<usize>() + HEAD + pairs.len() * SLOT; let mut page = Block::create(42, len as u32); assert_eq!(page.free(), len as u32 - HEAD as u32); assert_eq!(page.full(), 0); let half = count / 2; for (k, v) in pairs.iter().take(half) { page.put_val(k, v).unwrap(); } let free = half * (size_of::<u64>() * 2 + SLOT); assert_eq!(page.free(), free as u32); assert_eq!(page.full(), 50); for (k, v) in pairs.iter().skip(half) { page.put_val(k, v).unwrap(); } assert_eq!(page.free(), 0); assert_eq!(page.full(), 100); } #[test] fn test_page() { let k1 = b"bb-cc-dd-ee"; let v1 = b"0000-1111-2222"; let k2 = b"\x03\x04\x05\x06"; let v2 = b"ABCDEFGH"; let k3 = b"xx-yy-zz"; let p3 = 142; let id = 42; let len = 128; let mut page = Block::create(id, len); assert_eq!(page.id(), id); assert_eq!(page.cap(), len); assert_eq!(page.buf.len(), len as usize); assert_eq!( &page.buf[0..HEAD], &[0, 0, 0, id as u8, 0, 0, 0, len as u8, 0, 0, 0, 0, 0xC0, 0xDE, 0x15, 0x42,] ); assert_eq!(page.put_val(k1, v1), Some(0)); assert_eq!(page.put_val(k2, v2), Some(0)); assert_eq!(page.put_ref(k3, p3), Some(2)); let slots = (0..page.size()) .into_iter() .filter_map(|idx| page.slot(idx)) .collect::<Vec<_>>(); assert_eq!( slots, vec![ Slot::new( len - k2.len() as u32 - v2.len() as u32 - k1.len() as u32 - v1.len() as u32, k2.len() as u32, v2.len() as u32, 0 ), Slot::new( len - k1.len() as u32 - v1.len() as u32, k1.len() as u32, v1.len() as u32, 0 ), Slot::new( len - k2.len() as u32 - v2.len() as u32 - k1.len() as u32 - v1.len() as u32 - k3.len() as u32, k3.len() as u32, 0, p3 ), ] ); assert_eq!(page.key(0), k2); assert_eq!(page.key(1), k1); assert_eq!(page.key(2), k3); assert_eq!(page.val(0), v2); assert_eq!(page.val(1), v1); assert_eq!(page.val(2), &[]); assert_eq!(page.find(k1).unwrap(), 1); assert_eq!(page.find(k2).unwrap(), 0); assert_eq!(page.find(k3).unwrap(), 2); assert_eq!(page.find(b"no-such-key"), None); assert_eq!(page.ceil(b"\x01"), Some(0)); assert_eq!(page.ceil(b"\x03"), Some(0)); assert_eq!(page.ceil(b"a"), Some(1)); assert_eq!(page.ceil(b"b"), Some(1)); assert_eq!(page.ceil(b"o"), Some(2)); assert_eq!(page.ceil(b"x"), Some(2)); assert_eq!(page.ceil(b"z"), None); let free = len - HEAD as u32 - 3 * SLOT as u32 - k1.len() as u32 - v1.len() as u32 - k2.len() as u32 - v2.len() as u32 - k3.len() as u32; assert_eq!(page.free(), free); page.remove(1); // remove (k1, v1) assert_eq!(page.free(), free + 16 + k1.len() as u32 + v1.len() as u32); assert_eq!(page.find(k2).unwrap(), 0); assert_eq!(page.find(k3).unwrap(), 1); assert_eq!(page.find(b"no-such-key"), None); } #[test] fn test_put_find_3() { let data = vec![ (b"uno".to_vec(), b"la squadra azzurra".to_vec()), (b"due".to_vec(), b"it's coming home".to_vec()), (b"tre".to_vec(), b"red devils".to_vec()), ]; let id = 42; let len = 256; let mut page = Block::create(id, len); for (k, v) in data.iter() { page.put_val(k, v); } let mut copy = data.clone(); copy.sort_by_key(|x| x.0.clone()); assert_eq!( page.copy() .into_iter() .map(|(k, v, _)| (k, v)) .collect::<Vec<_>>(), copy ); assert_eq!(page.find(&data[0].0), Some(2)); assert_eq!(page.find(&data[1].0), Some(0)); assert_eq!(page.find(&data[2].0), Some(1)); } }
27.472817
100
0.455205
01c0f960b053bab66f86396ac0b4df02570ea376
2,096
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. use common_datavalues::prelude::*; use common_exception::Result; use pretty_assertions::assert_eq; use crate::scalars::*; #[test] fn test_to_type_name_function() -> Result<()> { #[allow(dead_code)] struct Test { name: &'static str, display: &'static str, nullable: bool, arg_names: Vec<&'static str>, columns: Vec<DataColumn>, expect: DataColumn, error: &'static str, func: Box<dyn Function>, } let schema = DataSchemaRefExt::create(vec![DataField::new("a", DataType::Boolean, false)]); let tests = vec![Test { name: "to_type_name-example-passed", display: "toTypeName", nullable: false, arg_names: vec!["a"], func: ToTypeNameFunction::try_create("toTypeName")?, columns: vec![Series::new(vec![true, true, true, false]).into()], expect: Series::new(vec!["Boolean", "Boolean", "Boolean", "Boolean"]).into(), error: "", }]; for t in tests { let rows = t.columns[0].len(); let func = t.func; println!("{:?}", t.name); if let Err(e) = func.eval(&t.columns, rows) { assert_eq!(t.error, e.to_string()); } // Display check. let expect_display = t.display.to_string(); let actual_display = format!("{}", func); assert_eq!(expect_display, actual_display); // Nullable check. let expect_null = t.nullable; let actual_null = func.nullable(&schema)?; assert_eq!(expect_null, actual_null); let ref v = func.eval(&t.columns, rows)?; // Type check. let mut args = vec![]; for name in t.arg_names { args.push(schema.field_with_name(name)?.data_type().clone()); } let expect_type = func.return_type(&args)?; let actual_type = v.data_type(); assert_eq!(expect_type, actual_type); assert!(v.to_array()?.series_equal(&t.expect.to_array()?)); } Ok(()) }
29.521127
95
0.57729
e66e394d639563e680e709c1375052ef8d323999
21,980
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast; use codemap::Span; use ext::base::ExtCtxt; use ext::base; use ext::build::AstBuilder; use parse::token::*; use parse::token; use parse; /** * * Quasiquoting works via token trees. * * This is registered as a set of expression syntax extension called quote! * that lifts its argument token-tree to an AST representing the * construction of the same token tree, with ast::TTNonterminal nodes * interpreted as antiquotes (splices). * */ pub mod rt { use ast; use ext::base::ExtCtxt; use parse; use print::pprust; pub use ast::*; pub use parse::token::*; pub use parse::new_parser_from_tts; pub use codemap::{BytePos, Span, dummy_spanned}; pub trait ToTokens { fn to_tokens(&self, _cx: &ExtCtxt) -> ~[TokenTree]; } impl ToTokens for ~[TokenTree] { fn to_tokens(&self, _cx: &ExtCtxt) -> ~[TokenTree] { (*self).clone() } } /* Should be (when bugs in default methods are fixed): trait ToSource : ToTokens { // Takes a thing and generates a string containing rust code for it. pub fn to_source() -> ~str; // If you can make source, you can definitely make tokens. pub fn to_tokens(cx: &ExtCtxt) -> ~[TokenTree] { cx.parse_tts(self.to_source()) } } */ pub trait ToSource { // Takes a thing and generates a string containing rust code for it. fn to_source(&self) -> @str; } impl ToSource for ast::Ident { fn to_source(&self) -> @str { ident_to_str(self) } } impl ToSource for @ast::Item { fn to_source(&self) -> @str { pprust::item_to_str(*self, get_ident_interner()).to_managed() } } impl<'a> ToSource for &'a [@ast::Item] { fn to_source(&self) -> @str { self.map(|i| i.to_source()).connect("\n\n").to_managed() } } impl ToSource for ast::Ty { fn to_source(&self) -> @str { pprust::ty_to_str(self, get_ident_interner()).to_managed() } } impl<'a> ToSource for &'a [ast::Ty] { fn to_source(&self) -> @str { self.map(|i| i.to_source()).connect(", ").to_managed() } } impl ToSource for Generics { fn to_source(&self) -> @str { pprust::generics_to_str(self, get_ident_interner()).to_managed() } } impl ToSource for @ast::Expr { fn to_source(&self) -> @str { pprust::expr_to_str(*self, get_ident_interner()).to_managed() } } impl ToSource for ast::Block { fn to_source(&self) -> @str { pprust::block_to_str(self, get_ident_interner()).to_managed() } } impl<'a> ToSource for &'a str { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitStr(self.to_managed(), ast::CookedStr)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for int { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitInt(*self as i64, ast::TyI)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for i8 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitInt(*self as i64, ast::TyI8)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for i16 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitInt(*self as i64, ast::TyI16)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for i32 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitInt(*self as i64, ast::TyI32)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for i64 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitInt(*self as i64, ast::TyI64)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for uint { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitUint(*self as u64, ast::TyU)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for u8 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitUint(*self as u64, ast::TyU8)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for u16 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitUint(*self as u64, ast::TyU16)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for u32 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitUint(*self as u64, ast::TyU32)); pprust::lit_to_str(&lit).to_managed() } } impl ToSource for u64 { fn to_source(&self) -> @str { let lit = dummy_spanned(ast::LitUint(*self as u64, ast::TyU64)); pprust::lit_to_str(&lit).to_managed() } } // Alas ... we write these out instead. All redundant. macro_rules! impl_to_tokens( ($t:ty) => ( impl ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> ~[TokenTree] { cx.parse_tts(self.to_source()) } } ) ) macro_rules! impl_to_tokens_self( ($t:ty) => ( impl<'a> ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> ~[TokenTree] { cx.parse_tts(self.to_source()) } } ) ) impl_to_tokens!(ast::Ident) impl_to_tokens!(@ast::Item) impl_to_tokens_self!(&'a [@ast::Item]) impl_to_tokens!(ast::Ty) impl_to_tokens_self!(&'a [ast::Ty]) impl_to_tokens!(Generics) impl_to_tokens!(@ast::Expr) impl_to_tokens!(ast::Block) impl_to_tokens_self!(&'a str) impl_to_tokens!(int) impl_to_tokens!(i8) impl_to_tokens!(i16) impl_to_tokens!(i32) impl_to_tokens!(i64) impl_to_tokens!(uint) impl_to_tokens!(u8) impl_to_tokens!(u16) impl_to_tokens!(u32) impl_to_tokens!(u64) pub trait ExtParseUtils { fn parse_item(&self, s: @str) -> @ast::Item; fn parse_expr(&self, s: @str) -> @ast::Expr; fn parse_stmt(&self, s: @str) -> @ast::Stmt; fn parse_tts(&self, s: @str) -> ~[ast::TokenTree]; } impl ExtParseUtils for ExtCtxt { fn parse_item(&self, s: @str) -> @ast::Item { let res = parse::parse_item_from_source_str( @"<quote expansion>", s, self.cfg(), ~[], self.parse_sess()); match res { Some(ast) => ast, None => { error!("Parse error with ```\n{}\n```", s); fail!() } } } fn parse_stmt(&self, s: @str) -> @ast::Stmt { parse::parse_stmt_from_source_str( @"<quote expansion>", s, self.cfg(), ~[], self.parse_sess()) } fn parse_expr(&self, s: @str) -> @ast::Expr { parse::parse_expr_from_source_str( @"<quote expansion>", s, self.cfg(), self.parse_sess()) } fn parse_tts(&self, s: @str) -> ~[ast::TokenTree] { parse::parse_tts_from_source_str( @"<quote expansion>", s, self.cfg(), self.parse_sess()) } } } pub fn expand_quote_tokens(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let (cx_expr, expr) = expand_tts(cx, sp, tts); let expanded = expand_wrapper(cx, sp, cx_expr, expr); base::MRExpr(expanded) } pub fn expand_quote_expr(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let expanded = expand_parse_call(cx, sp, "parse_expr", ~[], tts); base::MRExpr(expanded) } pub fn expand_quote_item(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let e_attrs = cx.expr_vec_uniq(sp, ~[]); let expanded = expand_parse_call(cx, sp, "parse_item", ~[e_attrs], tts); base::MRExpr(expanded) } pub fn expand_quote_pat(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let e_refutable = cx.expr_lit(sp, ast::LitBool(true)); let expanded = expand_parse_call(cx, sp, "parse_pat", ~[e_refutable], tts); base::MRExpr(expanded) } pub fn expand_quote_ty(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let e_param_colons = cx.expr_lit(sp, ast::LitBool(false)); let expanded = expand_parse_call(cx, sp, "parse_ty", ~[e_param_colons], tts); base::MRExpr(expanded) } pub fn expand_quote_stmt(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> base::MacResult { let e_attrs = cx.expr_vec_uniq(sp, ~[]); let expanded = expand_parse_call(cx, sp, "parse_stmt", ~[e_attrs], tts); base::MRExpr(expanded) } fn ids_ext(strs: ~[~str]) -> ~[ast::Ident] { strs.map(|str| str_to_ident(*str)) } fn id_ext(str: &str) -> ast::Ident { str_to_ident(str) } // Lift an ident to the expr that evaluates to that ident. fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> @ast::Expr { let e_str = cx.expr_str(sp, cx.str_of(ident)); cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("ident_of"), ~[e_str]) } fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> @ast::Expr { let name = match bop { PLUS => "PLUS", MINUS => "MINUS", STAR => "STAR", SLASH => "SLASH", PERCENT => "PERCENT", CARET => "CARET", AND => "AND", OR => "OR", SHL => "SHL", SHR => "SHR" }; cx.expr_ident(sp, id_ext(name)) } fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> @ast::Expr { match *tok { BINOP(binop) => { return cx.expr_call_ident(sp, id_ext("BINOP"), ~[mk_binop(cx, sp, binop)]); } BINOPEQ(binop) => { return cx.expr_call_ident(sp, id_ext("BINOPEQ"), ~[mk_binop(cx, sp, binop)]); } LIT_CHAR(i) => { let e_char = cx.expr_lit(sp, ast::LitChar(i)); return cx.expr_call_ident(sp, id_ext("LIT_CHAR"), ~[e_char]); } LIT_INT(i, ity) => { let s_ity = match ity { ast::TyI => ~"TyI", ast::TyI8 => ~"TyI8", ast::TyI16 => ~"TyI16", ast::TyI32 => ~"TyI32", ast::TyI64 => ~"TyI64" }; let e_ity = cx.expr_ident(sp, id_ext(s_ity)); let e_i64 = cx.expr_lit(sp, ast::LitInt(i, ast::TyI64)); return cx.expr_call_ident(sp, id_ext("LIT_INT"), ~[e_i64, e_ity]); } LIT_UINT(u, uty) => { let s_uty = match uty { ast::TyU => ~"TyU", ast::TyU8 => ~"TyU8", ast::TyU16 => ~"TyU16", ast::TyU32 => ~"TyU32", ast::TyU64 => ~"TyU64" }; let e_uty = cx.expr_ident(sp, id_ext(s_uty)); let e_u64 = cx.expr_lit(sp, ast::LitUint(u, ast::TyU64)); return cx.expr_call_ident(sp, id_ext("LIT_UINT"), ~[e_u64, e_uty]); } LIT_INT_UNSUFFIXED(i) => { let e_i64 = cx.expr_lit(sp, ast::LitInt(i, ast::TyI64)); return cx.expr_call_ident(sp, id_ext("LIT_INT_UNSUFFIXED"), ~[e_i64]); } LIT_FLOAT(fident, fty) => { let s_fty = match fty { ast::TyF32 => ~"TyF32", ast::TyF64 => ~"TyF64" }; let e_fty = cx.expr_ident(sp, id_ext(s_fty)); let e_fident = mk_ident(cx, sp, fident); return cx.expr_call_ident(sp, id_ext("LIT_FLOAT"), ~[e_fident, e_fty]); } LIT_STR(ident) => { return cx.expr_call_ident(sp, id_ext("LIT_STR"), ~[mk_ident(cx, sp, ident)]); } LIT_STR_RAW(ident, n) => { return cx.expr_call_ident(sp, id_ext("LIT_STR_RAW"), ~[mk_ident(cx, sp, ident), cx.expr_uint(sp, n)]); } IDENT(ident, b) => { return cx.expr_call_ident(sp, id_ext("IDENT"), ~[mk_ident(cx, sp, ident), cx.expr_bool(sp, b)]); } LIFETIME(ident) => { return cx.expr_call_ident(sp, id_ext("LIFETIME"), ~[mk_ident(cx, sp, ident)]); } DOC_COMMENT(ident) => { return cx.expr_call_ident(sp, id_ext("DOC_COMMENT"), ~[mk_ident(cx, sp, ident)]); } INTERPOLATED(_) => fail!("quote! with interpolated token"), _ => () } let name = match *tok { EQ => "EQ", LT => "LT", LE => "LE", EQEQ => "EQEQ", NE => "NE", GE => "GE", GT => "GT", ANDAND => "ANDAND", OROR => "OROR", NOT => "NOT", TILDE => "TILDE", AT => "AT", DOT => "DOT", DOTDOT => "DOTDOT", COMMA => "COMMA", SEMI => "SEMI", COLON => "COLON", MOD_SEP => "MOD_SEP", RARROW => "RARROW", LARROW => "LARROW", DARROW => "DARROW", FAT_ARROW => "FAT_ARROW", LPAREN => "LPAREN", RPAREN => "RPAREN", LBRACKET => "LBRACKET", RBRACKET => "RBRACKET", LBRACE => "LBRACE", RBRACE => "RBRACE", POUND => "POUND", DOLLAR => "DOLLAR", UNDERSCORE => "UNDERSCORE", EOF => "EOF", _ => fail!() }; cx.expr_ident(sp, id_ext(name)) } fn mk_tt(cx: &ExtCtxt, sp: Span, tt: &ast::TokenTree) -> ~[@ast::Stmt] { match *tt { ast::TTTok(sp, ref tok) => { let e_sp = cx.expr_ident(sp, id_ext("sp")); let e_tok = cx.expr_call_ident(sp, id_ext("TTTok"), ~[e_sp, mk_token(cx, sp, tok)]); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push"), ~[e_tok]); ~[cx.stmt_expr(e_push)] } ast::TTDelim(ref tts) => mk_tts(cx, sp, **tts), ast::TTSeq(..) => fail!("TTSeq in quote!"), ast::TTNonterminal(sp, ident) => { // tt.push_all_move($ident.to_tokens(ext_cx)) let e_to_toks = cx.expr_method_call(sp, cx.expr_ident(sp, ident), id_ext("to_tokens"), ~[cx.expr_ident(sp, id_ext("ext_cx"))]); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push_all_move"), ~[e_to_toks]); ~[cx.stmt_expr(e_push)] } } } fn mk_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> ~[@ast::Stmt] { let mut ss = ~[]; for tt in tts.iter() { ss.push_all_move(mk_tt(cx, sp, tt)); } ss } fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree]) -> (@ast::Expr, @ast::Expr) { // NB: It appears that the main parser loses its mind if we consider // $foo as a TTNonterminal during the main parse, so we have to re-parse // under quote_depth > 0. This is silly and should go away; the _guess_ is // it has to do with transition away from supporting old-style macros, so // try removing it when enough of them are gone. let mut p = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), tts.to_owned()); p.quote_depth += 1u; let cx_expr = p.parse_expr(); if !p.eat(&token::COMMA) { p.fatal("Expected token `,`"); } let tts = p.parse_all_token_trees(); p.abort_if_errors(); // We also bind a single value, sp, to ext_cx.call_site() // // This causes every span in a token-tree quote to be attributed to the // call site of the extension using the quote. We can't really do much // better since the source of the quote may well be in a library that // was not even parsed by this compilation run, that the user has no // source code for (eg. in libsyntax, which they're just _using_). // // The old quasiquoter had an elaborate mechanism for denoting input // file locations from which quotes originated; unfortunately this // relied on feeding the source string of the quote back into the // compiler (which we don't really want to do) and, in any case, only // pushed the problem a very small step further back: an error // resulting from a parse of the resulting quote is still attributed to // the site the string literal occurred, which was in a source file // _other_ than the one the user has control over. For example, an // error in a quote from the protocol compiler, invoked in user code // using macro_rules! for example, will be attributed to the macro_rules.rs // file in libsyntax, which the user might not even have source to (unless // they happen to have a compiler on hand). Over all, the phase distinction // just makes quotes "hard to attribute". Possibly this could be fixed // by recreating some of the original qq machinery in the tt regime // (pushing fake FileMaps onto the parser to account for original sites // of quotes, for example) but at this point it seems not likely to be // worth the hassle. let e_sp = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("call_site"), ~[]); let stmt_let_sp = cx.stmt_let(sp, false, id_ext("sp"), e_sp); let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_uniq(sp, ~[])); let block = cx.expr_block( cx.block_all(sp, ~[], ~[stmt_let_sp, stmt_let_tt] + mk_tts(cx, sp, tts), Some(cx.expr_ident(sp, id_ext("tt"))))); (cx_expr, block) } fn expand_wrapper(cx: &ExtCtxt, sp: Span, cx_expr: @ast::Expr, expr: @ast::Expr) -> @ast::Expr { let uses = ~[ cx.view_use_glob(sp, ast::Public, ids_ext(~[~"syntax", ~"ext", ~"quote", ~"rt"])) ]; let stmt_let_ext_cx = cx.stmt_let(sp, false, id_ext("ext_cx"), cx_expr); cx.expr_block(cx.block_all(sp, uses, ~[stmt_let_ext_cx], Some(expr))) } fn expand_parse_call(cx: &ExtCtxt, sp: Span, parse_method: &str, arg_exprs: ~[@ast::Expr], tts: &[ast::TokenTree]) -> @ast::Expr { let (cx_expr, tts_expr) = expand_tts(cx, sp, tts); let cfg_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("cfg"), ~[]); let parse_sess_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("parse_sess"), ~[]); let new_parser_call = cx.expr_call(sp, cx.expr_ident(sp, id_ext("new_parser_from_tts")), ~[parse_sess_call(), cfg_call(), tts_expr]); let expr = cx.expr_method_call(sp, new_parser_call, id_ext(parse_method), arg_exprs); expand_wrapper(cx, sp, cx_expr, expr) }
31.994178
84
0.489991
f91e8f9f59a140dbe58ff8f14581aff6112e04a7
1,539
//! System Mutexes //! //! The Windows implementation of mutexes is a little odd and it might not be //! immediately obvious what's going on. The primary oddness is that SRWLock is //! used instead of CriticalSection, and this is done because: //! //! 1. SRWLock is several times faster than CriticalSection according to //! benchmarks performed on both Windows 8 and Windows 7. //! //! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The //! Unix implementation deadlocks so consistency is preferred. See #19962 for //! more details. //! //! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy //! is that there are no guarantees of fairness. use crate::cell::UnsafeCell; use crate::sys::c; pub struct Mutex { srwlock: UnsafeCell<c::SRWLOCK>, } // Windows SRW Locks are movable (while not borrowed). pub type MovableMutex = Mutex; unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} #[inline] pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK { m.srwlock.get() } impl Mutex { #[inline] pub const fn new() -> Mutex { Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) } } #[inline] pub unsafe fn init(&mut self) {} #[inline] pub unsafe fn lock(&self) { c::AcquireSRWLockExclusive(raw(self)); } #[inline] pub unsafe fn try_lock(&self) -> bool { c::TryAcquireSRWLockExclusive(raw(self)) != 0 } #[inline] pub unsafe fn unlock(&self) { c::ReleaseSRWLockExclusive(raw(self)); } }
26.534483
80
0.667316
9030a6f222b853ce3656c068393076dd04e8f739
35,987
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use hir::def::{self, Def}; use rustc::infer::{self, InferOk, TypeOrigin}; use hir::pat_util::{PatIdMap, pat_id_map, pat_is_binding}; use hir::pat_util::{EnumerateAndAdjustIterator, pat_is_resolved_const}; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference}; use check::{FnCtxt, Expectation}; use lint; use util::nodemap::FnvHashMap; use session::Session; use std::cmp; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::ops::Deref; use syntax::ast; use syntax::codemap::{Span, Spanned}; use syntax::ptr::P; use rustc::hir::{self, PatKind}; use rustc::hir::print as pprust; pub struct PatCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, pub map: PatIdMap, } impl<'a, 'gcx, 'tcx> Deref for PatCtxt<'a, 'gcx, 'tcx> { type Target = FnCtxt<'a, 'gcx, 'tcx>; fn deref(&self) -> &Self::Target { self.fcx } } // This function exists due to the warning "diagnostic code E0164 already used" fn bad_struct_kind_err(sess: &Session, pat: &hir::Pat, path: &hir::Path, lint: bool) { let name = pprust::path_to_string(path); let msg = format!("`{}` does not name a tuple variant or a tuple struct", name); if lint { sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT, pat.id, pat.span, msg); } else { span_err!(sess, pat.span, E0164, "{}", msg); } } impl<'a, 'gcx, 'tcx> PatCtxt<'a, 'gcx, 'tcx> { pub fn check_pat(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>) { let tcx = self.tcx; debug!("check_pat(pat={:?},expected={:?})", pat, expected); match pat.node { PatKind::Wild => { self.write_ty(pat.id, expected); } PatKind::Lit(ref lt) => { self.check_expr(&lt); let expr_ty = self.expr_ty(&lt); // Byte string patterns behave the same way as array patterns // They can denote both statically and dynamically sized byte arrays let mut pat_ty = expr_ty; if let hir::ExprLit(ref lt) = lt.node { if let ast::LitKind::ByteStr(_) = lt.node { let expected_ty = self.structurally_resolved_type(pat.span, expected); if let ty::TyRef(_, mt) = expected_ty.sty { if let ty::TySlice(_) = mt.ty.sty { pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), tcx.mk_slice(tcx.types.u8)) } } } } self.write_ty(pat.id, pat_ty); // somewhat surprising: in this case, the subtyping // relation goes the opposite way as the other // cases. Actually what we really want is not a subtyping // relation at all but rather that there exists a LUB (so // that they can be compared). However, in practice, // constants are always scalars or strings. For scalars // subtyping is irrelevant, and for strings `expr_ty` is // type is `&'static str`, so if we say that // // &'static str <: expected // // that's equivalent to there existing a LUB. self.demand_suptype(pat.span, expected, pat_ty); } PatKind::Range(ref begin, ref end) => { self.check_expr(begin); self.check_expr(end); let lhs_ty = self.expr_ty(begin); let rhs_ty = self.expr_ty(end); // Check that both end-points are of numeric or char type. let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char(); let lhs_compat = numeric_or_char(lhs_ty); let rhs_compat = numeric_or_char(rhs_ty); if !lhs_compat || !rhs_compat { let span = if !lhs_compat && !rhs_compat { pat.span } else if !lhs_compat { begin.span } else { end.span }; // Note: spacing here is intentional, we want a space before "start" and "end". span_err!(tcx.sess, span, E0029, "only char and numeric types are allowed in range patterns\n \ start type: {}\n end type: {}", self.ty_to_string(lhs_ty), self.ty_to_string(rhs_ty) ); return; } // Check that the types of the end-points can be unified. let types_unify = self.require_same_types(pat.span, rhs_ty, lhs_ty, "mismatched types in range"); // It's ok to return without a message as `require_same_types` prints an error. if !types_unify { return; } // Now that we know the types can be unified we find the unified type and use // it to type the entire expression. let common_type = self.resolve_type_vars_if_possible(&lhs_ty); self.write_ty(pat.id, common_type); // subtyping doesn't matter here, as the value is some kind of scalar self.demand_eqtype(pat.span, expected, lhs_ty); } PatKind::Path(..) | PatKind::Ident(..) if pat_is_resolved_const(&tcx.def_map.borrow(), pat) => { if let Some(pat_def) = tcx.def_map.borrow().get(&pat.id) { let const_did = pat_def.def_id(); let const_scheme = tcx.lookup_item_type(const_did); assert!(const_scheme.generics.is_empty()); let const_ty = self.instantiate_type_scheme(pat.span, &Substs::empty(), &const_scheme.ty); self.write_ty(pat.id, const_ty); // FIXME(#20489) -- we should limit the types here to scalars or something! // As with PatKind::Lit, what we really want here is that there // exist a LUB, but for the cases that can occur, subtype // is good enough. self.demand_suptype(pat.span, expected, const_ty); } else { self.write_error(pat.id); } } PatKind::Ident(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map.borrow(), pat) => { let typ = self.local_ty(pat.span, pat.id); match bm { hir::BindByRef(mutbl) => { // if the binding is like // ref x | ref const x | ref mut x // then `x` is assigned a value of type `&M T` where M is the mutability // and T is the expected type. let region_var = self.next_region_var(infer::PatternRegion(pat.span)); let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl }; let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt); // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is // required. However, we use equality, which is stronger. See (*) for // an explanation. self.demand_eqtype(pat.span, region_ty, typ); } // otherwise the type of x is the expected type T hir::BindByValue(_) => { // As above, `T <: typeof(x)` is required but we // use equality, see (*) below. self.demand_eqtype(pat.span, expected, typ); } } self.write_ty(pat.id, typ); // if there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be if let Some(&canon_id) = self.map.get(&path.node) { if canon_id != pat.id { let ct = self.local_ty(pat.span, canon_id); self.demand_eqtype(pat.span, ct, typ); } if let Some(ref p) = *sub { self.check_pat(&p, expected); } } } PatKind::Ident(_, ref path, _) => { let path = hir::Path::from_name(path.span, path.node); self.check_pat_enum(pat, &path, &[], None, expected, false); } PatKind::TupleStruct(ref path, ref subpats, ddpos) => { self.check_pat_enum(pat, path, &subpats, ddpos, expected, true); } PatKind::Path(ref path) => { self.check_pat_enum(pat, path, &[], None, expected, false); } PatKind::QPath(ref qself, ref path) => { let self_ty = self.to_ty(&qself.ty); let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) { if d.base_def == Def::Err { self.set_tainted_by_errors(); self.write_error(pat.id); return; } d } else if qself.position == 0 { // This is just a sentinel for finish_resolving_def_to_ty. let sentinel = self.tcx.map.local_def_id(ast::CRATE_NODE_ID); def::PathResolution { base_def: Def::Mod(sentinel), depth: path.segments.len() } } else { debug!("unbound path {:?}", pat); self.write_error(pat.id); return; }; if let Some((opt_ty, segments, def)) = self.resolve_ty_and_def_ufcs(path_res, Some(self_ty), path, pat.span, pat.id) { if self.check_assoc_item_is_const(def, pat.span) { let scheme = tcx.lookup_item_type(def.def_id()); let predicates = tcx.lookup_predicates(def.def_id()); self.instantiate_path(segments, scheme, &predicates, opt_ty, def, pat.span, pat.id); let const_ty = self.node_ty(pat.id); self.demand_suptype(pat.span, expected, const_ty); } else { self.write_error(pat.id) } } } PatKind::Struct(ref path, ref fields, etc) => { self.check_pat_struct(pat, path, fields, etc, expected); } PatKind::Tuple(ref elements, ddpos) => { let mut expected_len = elements.len(); if ddpos.is_some() { // Require known type only when `..` is present if let ty::TyTuple(ref tys) = self.structurally_resolved_type(pat.span, expected).sty { expected_len = tys.len(); } } let max_len = cmp::max(expected_len, elements.len()); let element_tys: Vec<_> = (0 .. max_len).map(|_| self.next_ty_var()).collect(); let pat_ty = tcx.mk_tup(element_tys.clone()); self.write_ty(pat.id, pat_ty); self.demand_eqtype(pat.span, expected, pat_ty); for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { self.check_pat(elem, &element_tys[i]); } } PatKind::Box(ref inner) => { let inner_ty = self.next_ty_var(); let uniq_ty = tcx.mk_box(inner_ty); if self.check_dereferencable(pat.span, expected, &inner) { // Here, `demand::subtype` is good enough, but I don't // think any errors can be introduced by using // `demand::eqtype`. self.demand_eqtype(pat.span, expected, uniq_ty); self.write_ty(pat.id, uniq_ty); self.check_pat(&inner, inner_ty); } else { self.write_error(pat.id); self.check_pat(&inner, tcx.types.err); } } PatKind::Ref(ref inner, mutbl) => { let expected = self.shallow_resolve(expected); if self.check_dereferencable(pat.span, expected, &inner) { // `demand::subtype` would be good enough, but using // `eqtype` turns out to be equally general. See (*) // below for details. // Take region, inner-type from expected type if we // can, to avoid creating needless variables. This // also helps with the bad interactions of the given // hack detailed in (*) below. let (rptr_ty, inner_ty) = match expected.sty { ty::TyRef(_, mt) if mt.mutbl == mutbl => { (expected, mt.ty) } _ => { let inner_ty = self.next_ty_var(); let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; let region = self.next_region_var(infer::PatternRegion(pat.span)); let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt); self.demand_eqtype(pat.span, expected, rptr_ty); (rptr_ty, inner_ty) } }; self.write_ty(pat.id, rptr_ty); self.check_pat(&inner, inner_ty); } else { self.write_error(pat.id); self.check_pat(&inner, tcx.types.err); } } PatKind::Vec(ref before, ref slice, ref after) => { let expected_ty = self.structurally_resolved_type(pat.span, expected); let inner_ty = self.next_ty_var(); let pat_ty = match expected_ty.sty { ty::TyArray(_, size) => tcx.mk_array(inner_ty, { let min_len = before.len() + after.len(); match *slice { Some(_) => cmp::max(min_len, size), None => min_len } }), _ => { let region = self.next_region_var(infer::PatternRegion(pat.span)); tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut { ty: tcx.mk_slice(inner_ty), mutbl: expected_ty.builtin_deref(true, ty::NoPreference) .map_or(hir::MutImmutable, |mt| mt.mutbl) }) } }; self.write_ty(pat.id, pat_ty); // `demand::subtype` would be good enough, but using // `eqtype` turns out to be equally general. See (*) // below for details. self.demand_eqtype(pat.span, expected, pat_ty); for elt in before { self.check_pat(&elt, inner_ty); } if let Some(ref slice) = *slice { let region = self.next_region_var(infer::PatternRegion(pat.span)); let mutbl = expected_ty.builtin_deref(true, ty::NoPreference) .map_or(hir::MutImmutable, |mt| mt.mutbl); let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut { ty: tcx.mk_slice(inner_ty), mutbl: mutbl }); self.check_pat(&slice, slice_ty); } for elt in after { self.check_pat(&elt, inner_ty); } } } // (*) In most of the cases above (literals and constants being // the exception), we relate types using strict equality, evewn // though subtyping would be sufficient. There are a few reasons // for this, some of which are fairly subtle and which cost me // (nmatsakis) an hour or two debugging to remember, so I thought // I'd write them down this time. // // 1. There is no loss of expressiveness here, though it does // cause some inconvenience. What we are saying is that the type // of `x` becomes *exactly* what is expected. This can cause unnecessary // errors in some cases, such as this one: // it will cause errors in a case like this: // // ``` // fn foo<'x>(x: &'x int) { // let a = 1; // let mut z = x; // z = &a; // } // ``` // // The reason we might get an error is that `z` might be // assigned a type like `&'x int`, and then we would have // a problem when we try to assign `&a` to `z`, because // the lifetime of `&a` (i.e., the enclosing block) is // shorter than `'x`. // // HOWEVER, this code works fine. The reason is that the // expected type here is whatever type the user wrote, not // the initializer's type. In this case the user wrote // nothing, so we are going to create a type variable `Z`. // Then we will assign the type of the initializer (`&'x // int`) as a subtype of `Z`: `&'x int <: Z`. And hence we // will instantiate `Z` as a type `&'0 int` where `'0` is // a fresh region variable, with the constraint that `'x : // '0`. So basically we're all set. // // Note that there are two tests to check that this remains true // (`regions-reassign-{match,let}-bound-pointer.rs`). // // 2. Things go horribly wrong if we use subtype. The reason for // THIS is a fairly subtle case involving bound regions. See the // `givens` field in `region_inference`, as well as the test // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`, // for details. Short version is that we must sometimes detect // relationships between specific region variables and regions // bound in a closure signature, and that detection gets thrown // off when we substitute fresh region variables here to enable // subtyping. } fn check_assoc_item_is_const(&self, def: Def, span: Span) -> bool { match def { Def::AssociatedConst(..) => true, Def::Method(..) => { span_err!(self.tcx.sess, span, E0327, "associated items in match patterns must be constants"); false } _ => { span_bug!(span, "non-associated item in check_assoc_item_is_const"); } } } pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool { let tcx = self.tcx; if pat_is_binding(&tcx.def_map.borrow(), inner) { let expected = self.shallow_resolve(expected); expected.builtin_deref(true, ty::NoPreference).map_or(true, |mt| match mt.ty.sty { ty::TyTrait(_) => { // This is "x = SomeTrait" being reduced from // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error. span_err!(tcx.sess, span, E0033, "type `{}` cannot be dereferenced", self.ty_to_string(expected)); false } _ => true }) } else { true } } } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn check_match(&self, expr: &'gcx hir::Expr, discrim: &'gcx hir::Expr, arms: &'gcx [hir::Arm], expected: Expectation<'tcx>, match_src: hir::MatchSource) { let tcx = self.tcx; // Not entirely obvious: if matches may create ref bindings, we // want to use the *precise* type of the discriminant, *not* some // supertype, as the "discriminant type" (issue #23116). let contains_ref_bindings = arms.iter() .filter_map(|a| tcx.arm_contains_ref_binding(a)) .max_by_key(|m| match *m { hir::MutMutable => 1, hir::MutImmutable => 0, }); let discrim_ty; if let Some(m) = contains_ref_bindings { self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m)); discrim_ty = self.expr_ty(discrim); } else { // ...but otherwise we want to use any supertype of the // discriminant. This is sort of a workaround, see note (*) in // `check_pat` for some details. discrim_ty = self.next_ty_var(); self.check_expr_has_type(discrim, discrim_ty); }; // Typecheck the patterns first, so that we get types for all the // bindings. for arm in arms { let pcx = PatCtxt { fcx: self, map: pat_id_map(&tcx.def_map, &arm.pats[0]), }; for p in &arm.pats { pcx.check_pat(&p, discrim_ty); } } // Now typecheck the blocks. // // The result of the match is the common supertype of all the // arms. Start out the value as bottom, since it's the, well, // bottom the type lattice, and we'll be moving up the lattice as // we process each arm. (Note that any match with 0 arms is matching // on any empty type and is therefore unreachable; should the flow // of execution reach it, we will panic, so bottom is an appropriate // type in that case) let expected = expected.adjust_for_branches(self); let mut result_ty = self.next_diverging_ty_var(); let coerce_first = match expected { // We don't coerce to `()` so that if the match expression is a // statement it's branches can have any consistent type. That allows // us to give better error messages (pointing to a usually better // arm for inconsistent arms or to the whole match when a `()` type // is required). Expectation::ExpectHasType(ety) if ety != self.tcx.mk_nil() => { ety } _ => result_ty }; for (i, arm) in arms.iter().enumerate() { if let Some(ref e) = arm.guard { self.check_expr_has_type(e, tcx.types.bool); } self.check_expr_with_expectation(&arm.body, expected); let arm_ty = self.expr_ty(&arm.body); if result_ty.references_error() || arm_ty.references_error() { result_ty = tcx.types.err; continue; } // Handle the fallback arm of a desugared if-let like a missing else. let is_if_let_fallback = match match_src { hir::MatchSource::IfLetDesugar { contains_else_clause: false } => { i == arms.len() - 1 && arm_ty.is_nil() } _ => false }; let origin = if is_if_let_fallback { TypeOrigin::IfExpressionWithNoElse(expr.span) } else { TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src) }; let result = if is_if_let_fallback { self.eq_types(true, origin, arm_ty, result_ty) .map(|InferOk { obligations, .. }| { // FIXME(#32730) propagate obligations assert!(obligations.is_empty()); arm_ty }) } else if i == 0 { // Special-case the first arm, as it has no "previous expressions". self.try_coerce(&arm.body, coerce_first) } else { let prev_arms = || arms[..i].iter().map(|arm| &*arm.body); self.try_find_coercion_lub(origin, prev_arms, result_ty, &arm.body) }; result_ty = match result { Ok(ty) => ty, Err(e) => { let (expected, found) = if is_if_let_fallback { (arm_ty, result_ty) } else { (result_ty, arm_ty) }; self.report_mismatched_types(origin, expected, found, e); self.tcx.types.err } }; } self.write_ty(expr.id, result_ty); } } impl<'a, 'gcx, 'tcx> PatCtxt<'a, 'gcx, 'tcx> { pub fn check_pat_struct(&self, pat: &'gcx hir::Pat, path: &hir::Path, fields: &'gcx [Spanned<hir::FieldPat>], etc: bool, expected: Ty<'tcx>) { let tcx = self.tcx; let def = tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); let variant = match self.def_struct_variant(def, path.span) { Some((_, variant)) => variant, None => { let name = pprust::path_to_string(path); span_err!(tcx.sess, pat.span, E0163, "`{}` does not name a struct or a struct variant", name); self.write_error(pat.id); for field in fields { self.check_pat(&field.node.pat, tcx.types.err); } return; } }; let pat_ty = self.instantiate_type(def.def_id(), path); let item_substs = match pat_ty.sty { ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, _ => span_bug!(pat.span, "struct variant is not an ADT") }; self.demand_eqtype(pat.span, expected, pat_ty); self.check_struct_pat_fields(pat.span, fields, variant, &item_substs, etc); self.write_ty(pat.id, pat_ty); self.write_substs(pat.id, ty::ItemSubsts { substs: item_substs }); } fn check_pat_enum(&self, pat: &hir::Pat, path: &hir::Path, subpats: &'gcx [P<hir::Pat>], ddpos: Option<usize>, expected: Ty<'tcx>, is_tuple_struct_pat: bool) { // Typecheck the path. let tcx = self.tcx; let path_res = match tcx.def_map.borrow().get(&pat.id) { Some(&path_res) if path_res.base_def != Def::Err => path_res, _ => { self.set_tainted_by_errors(); self.write_error(pat.id); for pat in subpats { self.check_pat(&pat, tcx.types.err); } return; } }; let (opt_ty, segments, def) = match self.resolve_ty_and_def_ufcs(path_res, None, path, pat.span, pat.id) { Some(resolution) => resolution, // Error handling done inside resolve_ty_and_def_ufcs, so if // resolution fails just return. None => {return;} }; // Items that were partially resolved before should have been resolved to // associated constants (i.e. not methods). if path_res.depth != 0 && !self.check_assoc_item_is_const(def, pat.span) { self.write_error(pat.id); return; } let enum_def = def.variant_def_ids() .map_or_else(|| def.def_id(), |(enum_def, _)| enum_def); let ctor_scheme = tcx.lookup_item_type(enum_def); let ctor_predicates = tcx.lookup_predicates(enum_def); let path_scheme = if ctor_scheme.ty.is_fn() { let fn_ret = tcx.no_late_bound_regions(&ctor_scheme.ty.fn_ret()).unwrap(); ty::TypeScheme { ty: fn_ret.unwrap(), generics: ctor_scheme.generics, } } else { ctor_scheme }; self.instantiate_path(segments, path_scheme, &ctor_predicates, opt_ty, def, pat.span, pat.id); let report_bad_struct_kind = |is_warning| { bad_struct_kind_err(tcx.sess, pat, path, is_warning); if is_warning { return; } self.write_error(pat.id); for pat in subpats { self.check_pat(&pat, tcx.types.err); } }; // If we didn't have a fully resolved path to start with, we had an // associated const, and we should quit now, since the rest of this // function uses checks specific to structs and enums. if path_res.depth != 0 { if is_tuple_struct_pat { report_bad_struct_kind(false); } else { let pat_ty = self.node_ty(pat.id); self.demand_suptype(pat.span, expected, pat_ty); } return; } let pat_ty = self.node_ty(pat.id); self.demand_eqtype(pat.span, expected, pat_ty); let real_path_ty = self.node_ty(pat.id); let (kind_name, variant, expected_substs) = match real_path_ty.sty { ty::TyEnum(enum_def, expected_substs) => { let variant = enum_def.variant_of_def(def); ("variant", variant, expected_substs) } ty::TyStruct(struct_def, expected_substs) => { let variant = struct_def.struct_variant(); ("struct", variant, expected_substs) } _ => { report_bad_struct_kind(false); return; } }; match (is_tuple_struct_pat, variant.kind()) { (true, ty::VariantKind::Unit) if subpats.is_empty() && ddpos.is_some() => { // Matching unit structs with tuple variant patterns (`UnitVariant(..)`) // is allowed for backward compatibility. report_bad_struct_kind(true); } (true, ty::VariantKind::Unit) | (false, ty::VariantKind::Tuple) | (_, ty::VariantKind::Struct) => { report_bad_struct_kind(false); return } _ => {} } if subpats.len() == variant.fields.len() || subpats.len() < variant.fields.len() && ddpos.is_some() { for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { let field_ty = self.field_ty(subpat.span, &variant.fields[i], expected_substs); self.check_pat(&subpat, field_ty); } } else { span_err!(tcx.sess, pat.span, E0023, "this pattern has {} field{}, but the corresponding {} has {} field{}", subpats.len(), if subpats.len() == 1 {""} else {"s"}, kind_name, variant.fields.len(), if variant.fields.len() == 1 {""} else {"s"}); for pat in subpats { self.check_pat(&pat, tcx.types.err); } } } /// `path` is the AST path item naming the type of this struct. /// `fields` is the field patterns of the struct pattern. /// `struct_fields` describes the type of each field of the struct. /// `struct_id` is the ID of the struct. /// `etc` is true if the pattern said '...' and false otherwise. pub fn check_struct_pat_fields(&self, span: Span, fields: &'gcx [Spanned<hir::FieldPat>], variant: ty::VariantDef<'tcx>, substs: &Substs<'tcx>, etc: bool) { let tcx = self.tcx; // Index the struct fields' types. let field_map = variant.fields .iter() .map(|field| (field.name, field)) .collect::<FnvHashMap<_, _>>(); // Keep track of which fields have already appeared in the pattern. let mut used_fields = FnvHashMap(); // Typecheck each field. for &Spanned { node: ref field, span } in fields { let field_ty = match used_fields.entry(field.name) { Occupied(occupied) => { let mut err = struct_span_err!(tcx.sess, span, E0025, "field `{}` bound multiple times \ in the pattern", field.name); span_note!(&mut err, *occupied.get(), "field `{}` previously bound here", field.name); err.emit(); tcx.types.err } Vacant(vacant) => { vacant.insert(span); field_map.get(&field.name) .map(|f| self.field_ty(span, f, substs)) .unwrap_or_else(|| { span_err!(tcx.sess, span, E0026, "struct `{}` does not have a field named `{}`", tcx.item_path_str(variant.did), field.name); tcx.types.err }) } }; self.check_pat(&field.pat, field_ty); } // Report an error if not all the fields were specified. if !etc { for field in variant.fields .iter() .filter(|field| !used_fields.contains_key(&field.name)) { span_err!(tcx.sess, span, E0027, "pattern does not mention field `{}`", field.name); } } } }
43.886585
99
0.485398
11527abd19031131a8e15e58a8d2af3a192a5614
3,649
use crate::types::{TyKind, Type}; use util::span::Span; /// Constant expression or pattern #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Hash)] pub enum Constant { Unit, Bool(bool), Nat(u32), } #[derive(Clone, Debug, PartialEq, PartialOrd)] pub enum Kind { /// Constant Const(Constant), /// Variable Var(usize), /// Term abstraction /// \x: Ty. x /// Introduce a lambda term Abs(Box<Type>, Box<Term>), /// Term application /// m n /// Eliminate a lambda term App(Box<Term>, Box<Term>), /// Type abstraction /// \X. \x: X. x /// Introduce a universally quantified type TyAbs(Box<TyKind>, Box<Term>), /// Type application /// id [Nat] 1 /// Eliminate a universally quantified type TyApp(Box<Term>, Box<Type>), /// Record term /// {label1 = Tm1, label2 = Tm2, etc} /// Invariant that all fields have unique labels Record(Record), Index(Box<Term>, String), /// Injection into a sum type /// fields: type constructor tag, term, and sum type Injection(String, Box<Term>, Box<Type>), Fold(Box<Type>, Box<Term>), Unfold(Box<Type>, Box<Term>), /// Introduce an existential type /// { *Ty1, Term } as {∃X.Ty} /// essentially, concrete representation as interface Pack(Box<Type>, Box<Term>, Box<Type>), /// Unpack an existential type /// open {∃X, bind} in body -- X is bound as a TyVar, and bind as Var(0) /// Eliminate an existential type Unpack(Box<Term>, Box<Term>), } #[derive(Clone, Debug, PartialEq, PartialOrd)] pub struct Field { pub span: Span, pub label: String, pub expr: Box<Term>, } #[derive(Clone, Debug, PartialEq, PartialOrd)] pub struct Record { pub fields: Vec<Field>, } #[derive(Clone, Debug, PartialEq, PartialOrd)] pub struct Term { pub span: Span, pub kind: Kind, } impl Term { pub fn new(kind: Kind, span: Span) -> Term { Term { kind, span } } } impl Record { pub fn get(&self, label: &str) -> Option<&Field> { for field in &self.fields { if field.label == label { return Some(field); } } None } } use std::fmt; impl fmt::Display for Term { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match &self.kind { Kind::Var(idx) => write!(f, "#{}", idx), Kind::Const(Constant::Bool(b)) => write!(f, "{}", b), Kind::Const(Constant::Nat(b)) => write!(f, "{}", b), Kind::Const(Constant::Unit) => write!(f, "()"), Kind::Abs(ty, body) => write!(f, "(λx:{}. {})", ty, body), Kind::App(m, n) => write!(f, "{} {}", m, n), Kind::TyAbs(kind, body) => write!(f, "ΛX::{}. {}", kind, body), Kind::TyApp(body, ty) => write!(f, "{} [{}]", body, ty), Kind::Pack(witness, body, sig) => write!(f, "{{*{}, {}}} as {}", witness, body, sig), Kind::Unpack(m, n) => write!(f, "unpack {} as {}", m, n), Kind::Record(rec) => write!( f, "{{\n{}\n}}", rec.fields .iter() .map(|fi| format!("\t{}: {}", fi.label, fi.expr)) .collect::<Vec<_>>() .join(",\n") ), Kind::Index(t1, t2) => write!(f, "{}.{}", t1, t2), Kind::Injection(label, tm, ty) => write!(f, "{} of {} as {}", label, tm, ty), Kind::Fold(ty, term) => write!(f, "fold [{}] {}", ty, term), Kind::Unfold(ty, term) => write!(f, "unfold [{}] {}", ty, term), } } }
29.427419
97
0.509729
21c53e764aacded4fcd280620a3afb64f865f11a
5,708
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT #[cfg(any(feature = "v2_2", feature = "dox"))] use glib::object::Cast; use glib::object::IsA; #[cfg(any(feature = "v2_2", feature = "dox"))] use glib::signal::connect_raw; #[cfg(any(feature = "v2_2", feature = "dox"))] use glib::signal::SignalHandlerId; use glib::translate::*; #[cfg(any(feature = "v2_2", feature = "dox"))] use glib::GString; #[cfg(any(feature = "v2_2", feature = "dox"))] use glib_sys; #[cfg(any(feature = "v2_2", feature = "dox"))] use std::boxed::Box as Box_; use std::fmt; #[cfg(any(feature = "v2_2", feature = "dox"))] use std::mem::transmute; use webkit2_sys; #[cfg(any(feature = "v2_2", feature = "dox"))] use AuthenticationScheme; #[cfg(any(feature = "v2_2", feature = "dox"))] use Credential; glib_wrapper! { pub struct AuthenticationRequest(Object<webkit2_sys::WebKitAuthenticationRequest, webkit2_sys::WebKitAuthenticationRequestClass, AuthenticationRequestClass>); match fn { get_type => || webkit2_sys::webkit_authentication_request_get_type(), } } pub const NONE_AUTHENTICATION_REQUEST: Option<&AuthenticationRequest> = None; pub trait AuthenticationRequestExt: 'static { #[cfg(any(feature = "v2_2", feature = "dox"))] fn can_save_credentials(&self) -> bool; #[cfg(any(feature = "v2_2", feature = "dox"))] fn cancel(&self); #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_host(&self) -> Option<GString>; #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_port(&self) -> u32; #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_proposed_credential(&self) -> Option<Credential>; #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_realm(&self) -> Option<GString>; #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_scheme(&self) -> AuthenticationScheme; #[cfg(any(feature = "v2_2", feature = "dox"))] fn is_for_proxy(&self) -> bool; #[cfg(any(feature = "v2_2", feature = "dox"))] fn is_retry(&self) -> bool; #[cfg(any(feature = "v2_2", feature = "dox"))] fn connect_cancelled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<AuthenticationRequest>> AuthenticationRequestExt for O { #[cfg(any(feature = "v2_2", feature = "dox"))] fn can_save_credentials(&self) -> bool { unsafe { from_glib( webkit2_sys::webkit_authentication_request_can_save_credentials( self.as_ref().to_glib_none().0, ), ) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn cancel(&self) { unsafe { webkit2_sys::webkit_authentication_request_cancel(self.as_ref().to_glib_none().0); } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_host(&self) -> Option<GString> { unsafe { from_glib_none(webkit2_sys::webkit_authentication_request_get_host( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_port(&self) -> u32 { unsafe { webkit2_sys::webkit_authentication_request_get_port(self.as_ref().to_glib_none().0) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_proposed_credential(&self) -> Option<Credential> { unsafe { from_glib_full( webkit2_sys::webkit_authentication_request_get_proposed_credential( self.as_ref().to_glib_none().0, ), ) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_realm(&self) -> Option<GString> { unsafe { from_glib_none(webkit2_sys::webkit_authentication_request_get_realm( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn get_scheme(&self) -> AuthenticationScheme { unsafe { from_glib(webkit2_sys::webkit_authentication_request_get_scheme( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn is_for_proxy(&self) -> bool { unsafe { from_glib(webkit2_sys::webkit_authentication_request_is_for_proxy( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn is_retry(&self) -> bool { unsafe { from_glib(webkit2_sys::webkit_authentication_request_is_retry( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v2_2", feature = "dox"))] fn connect_cancelled<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn cancelled_trampoline<P, F: Fn(&P) + 'static>( this: *mut webkit2_sys::WebKitAuthenticationRequest, f: glib_sys::gpointer, ) where P: IsA<AuthenticationRequest>, { let f: &F = &*(f as *const F); f(&AuthenticationRequest::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"cancelled\0".as_ptr() as *const _, Some(transmute(cancelled_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } } impl fmt::Display for AuthenticationRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AuthenticationRequest") } }
31.711111
162
0.579187
f50001046880ba4a8e43b7a86d1a079838670809
10,715
#[doc = "Register `OAR1` reader"] pub struct R(crate::R<OAR1_SPEC>); impl core::ops::Deref for R { type Target = crate::R<OAR1_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<OAR1_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<OAR1_SPEC>) -> Self { R(reader) } } #[doc = "Register `OAR1` writer"] pub struct W(crate::W<OAR1_SPEC>); impl core::ops::Deref for W { type Target = crate::W<OAR1_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<OAR1_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<OAR1_SPEC>) -> Self { W(writer) } } #[doc = "Field `OA1` reader - OA1"] pub struct OA1_R(crate::FieldReader<bool, bool>); impl OA1_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { OA1_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for OA1_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OA1` writer - OA1"] pub struct OA1_W<'a> { w: &'a mut W, } impl<'a> OA1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub unsafe fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub unsafe fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Field `OA11_7` reader - OA11_7"] pub struct OA11_7_R(crate::FieldReader<u8, u8>); impl OA11_7_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { OA11_7_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for OA11_7_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OA11_7` writer - OA11_7"] pub struct OA11_7_W<'a> { w: &'a mut W, } impl<'a> OA11_7_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x7f << 1)) | ((value as u32 & 0x7f) << 1); self.w } } #[doc = "Field `OA18_9` reader - OA18_9"] pub struct OA18_9_R(crate::FieldReader<u8, u8>); impl OA18_9_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { OA18_9_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for OA18_9_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OA18_9` writer - OA18_9"] pub struct OA18_9_W<'a> { w: &'a mut W, } impl<'a> OA18_9_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 8)) | ((value as u32 & 0x03) << 8); self.w } } #[doc = "OA1MODE\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum OA1MODE_A { #[doc = "0: Own address 1 is a 7-bit address"] BIT7 = 0, #[doc = "1: Own address 1 is a 10-bit address"] BIT10 = 1, } impl From<OA1MODE_A> for bool { #[inline(always)] fn from(variant: OA1MODE_A) -> Self { variant as u8 != 0 } } #[doc = "Field `OA1MODE` reader - OA1MODE"] pub struct OA1MODE_R(crate::FieldReader<bool, OA1MODE_A>); impl OA1MODE_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { OA1MODE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> OA1MODE_A { match self.bits { false => OA1MODE_A::BIT7, true => OA1MODE_A::BIT10, } } #[doc = "Checks if the value of the field is `BIT7`"] #[inline(always)] pub fn is_bit7(&self) -> bool { **self == OA1MODE_A::BIT7 } #[doc = "Checks if the value of the field is `BIT10`"] #[inline(always)] pub fn is_bit10(&self) -> bool { **self == OA1MODE_A::BIT10 } } impl core::ops::Deref for OA1MODE_R { type Target = crate::FieldReader<bool, OA1MODE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OA1MODE` writer - OA1MODE"] pub struct OA1MODE_W<'a> { w: &'a mut W, } impl<'a> OA1MODE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: OA1MODE_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Own address 1 is a 7-bit address"] #[inline(always)] pub fn bit7(self) -> &'a mut W { self.variant(OA1MODE_A::BIT7) } #[doc = "Own address 1 is a 10-bit address"] #[inline(always)] pub fn bit10(self) -> &'a mut W { self.variant(OA1MODE_A::BIT10) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "OA1EN\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum OA1EN_A { #[doc = "0: Own address 1 disabled. The received slave address OA1 is NACKed"] DISABLED = 0, #[doc = "1: Own address 1 enabled. The received slave address OA1 is ACKed"] ENABLED = 1, } impl From<OA1EN_A> for bool { #[inline(always)] fn from(variant: OA1EN_A) -> Self { variant as u8 != 0 } } #[doc = "Field `OA1EN` reader - OA1EN"] pub struct OA1EN_R(crate::FieldReader<bool, OA1EN_A>); impl OA1EN_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { OA1EN_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> OA1EN_A { match self.bits { false => OA1EN_A::DISABLED, true => OA1EN_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == OA1EN_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == OA1EN_A::ENABLED } } impl core::ops::Deref for OA1EN_R { type Target = crate::FieldReader<bool, OA1EN_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `OA1EN` writer - OA1EN"] pub struct OA1EN_W<'a> { w: &'a mut W, } impl<'a> OA1EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: OA1EN_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Own address 1 disabled. The received slave address OA1 is NACKed"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OA1EN_A::DISABLED) } #[doc = "Own address 1 enabled. The received slave address OA1 is ACKed"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OA1EN_A::ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15); self.w } } impl R { #[doc = "Bit 0 - OA1"] #[inline(always)] pub fn oa1(&self) -> OA1_R { OA1_R::new((self.bits & 0x01) != 0) } #[doc = "Bits 1:7 - OA11_7"] #[inline(always)] pub fn oa11_7(&self) -> OA11_7_R { OA11_7_R::new(((self.bits >> 1) & 0x7f) as u8) } #[doc = "Bits 8:9 - OA18_9"] #[inline(always)] pub fn oa18_9(&self) -> OA18_9_R { OA18_9_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bit 10 - OA1MODE"] #[inline(always)] pub fn oa1mode(&self) -> OA1MODE_R { OA1MODE_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 15 - OA1EN"] #[inline(always)] pub fn oa1en(&self) -> OA1EN_R { OA1EN_R::new(((self.bits >> 15) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - OA1"] #[inline(always)] pub fn oa1(&mut self) -> OA1_W { OA1_W { w: self } } #[doc = "Bits 1:7 - OA11_7"] #[inline(always)] pub fn oa11_7(&mut self) -> OA11_7_W { OA11_7_W { w: self } } #[doc = "Bits 8:9 - OA18_9"] #[inline(always)] pub fn oa18_9(&mut self) -> OA18_9_W { OA18_9_W { w: self } } #[doc = "Bit 10 - OA1MODE"] #[inline(always)] pub fn oa1mode(&mut self) -> OA1MODE_W { OA1MODE_W { w: self } } #[doc = "Bit 15 - OA1EN"] #[inline(always)] pub fn oa1en(&mut self) -> OA1EN_W { OA1EN_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Own address register 1\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [oar1](index.html) module"] pub struct OAR1_SPEC; impl crate::RegisterSpec for OAR1_SPEC { type Ux = u32; } #[doc = "`read()` method returns [oar1::R](R) reader structure"] impl crate::Readable for OAR1_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [oar1::W](W) writer structure"] impl crate::Writable for OAR1_SPEC { type Writer = W; } #[doc = "`reset()` method sets OAR1 to value 0"] impl crate::Resettable for OAR1_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
28.49734
407
0.559963
1c921d6c35a15cbd10d20dad3e5df1bd563bb7c9
3,213
#![allow(clippy::module_inception)] #![allow(clippy::upper_case_acronyms)] #![allow(clippy::large_enum_variant)] #![allow(clippy::wrong_self_convention)] #![allow(clippy::should_implement_trait)] #![allow(clippy::blacklisted_name)] #![allow(clippy::vec_init_then_push)] #![allow(clippy::type_complexity)] #![allow(rustdoc::bare_urls)] #![warn(missing_docs)] //! <fullname>IoT data</fullname> //! <p>IoT data enables secure, bi-directional communication between Internet-connected things (such as sensors, //! actuators, embedded devices, or smart appliances) and the Amazon Web Services cloud. It implements a broker for applications and //! things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a //! persistent representation of your things and their state in the Amazon Web Services cloud.</p> //! <p>Find the endpoint address for actions in IoT data by running this CLI command:</p> //! <p> //! <code>aws iot describe-endpoint --endpoint-type iot:Data-ATS</code> //! </p> //! <p>The service name used by <a href="https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html">Amazon Web ServicesSignature Version 4</a> //! to sign requests is: <i>iotdevicegateway</i>.</p> //! //! # Crate Organization //! //! The entry point for most customers will be [`Client`]. [`Client`] exposes one method for each API offered //! by the service. //! //! Some APIs require complex or nested arguments. These exist in [`model`](crate::model). //! //! Lastly, errors that can be returned by the service are contained within [`error`]. [`Error`] defines a meta //! error encompassing all possible errors that can be returned by the service. //! //! The other modules within this crate are not required for normal usage. // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub use error_meta::Error; #[doc(inline)] pub use config::Config; mod aws_endpoint; /// Client and fluent builders for calling the service. pub mod client; /// Configuration for the service. pub mod config; /// Errors that can occur when calling the service. pub mod error; mod error_meta; mod http_serde; /// Input structures for operations. pub mod input; mod json_deser; mod json_errors; /// Generated accessors for nested fields pub mod lens; pub mod middleware; /// Data structures used by operation inputs/outputs. pub mod model; mod no_credentials; /// All operations that this crate can perform. pub mod operation; mod operation_deser; mod operation_ser; /// Output structures for operations. pub mod output; /// Paginators for the service pub mod paginator; /// Crate version number. pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); /// Re-exported types from supporting crates. pub mod types { pub use aws_smithy_http::result::SdkError; pub use aws_smithy_types::Blob; } static API_METADATA: aws_http::user_agent::ApiMetadata = aws_http::user_agent::ApiMetadata::new("iotdataplane", PKG_VERSION); pub use aws_smithy_http::endpoint::Endpoint; pub use aws_smithy_types::retry::RetryConfig; pub use aws_types::app_name::AppName; pub use aws_types::region::Region; pub use aws_types::Credentials; #[doc(inline)] pub use client::Client;
38.25
155
0.748833
e551d4b06037e70a5d93ce45e162d96f4274e40c
30,910
/* * Copyright Stalwart Labs, Minter Ltd. See the COPYING * file at the top-level directory of this distribution. * * Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or * https://www.apache.org/licenses/LICENSE-2.0> or the MIT license * <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your * option. This file may not be copied, modified, or distributed * except according to those terms. */ use std::borrow::Cow; use crate::{ decoders::{ base64::decode_base64, charsets::map::get_charset_decoder, quoted_printable::decode_quoted_printable, DecodeFnc, DecodeResult, }, ContentType, HeaderName, HeaderValue, Message, MessageAttachment, MessagePart, MessagePartId, MessageStructure, MultiPart, Part, RawHeaders, RfcHeader, RfcHeaders, }; use super::{ header::parse_headers, mime::{get_bytes_to_boundary, seek_crlf_end, seek_next_part, skip_crlf, skip_multipart_end}, }; #[derive(Debug, PartialEq)] enum MimeType { MultipartMixed, MultipartAlernative, MultipartRelated, MultipartDigest, TextPlain, TextHtml, TextOther, Inline, Message, Other, } impl Default for MimeType { fn default() -> Self { MimeType::Message } } fn result_to_string<'x>( result: DecodeResult, data: &'x [u8], content_type: Option<&ContentType>, ) -> Cow<'x, str> { match ( result, content_type.and_then(|ct| { ct.get_attribute("charset") .and_then(|c| get_charset_decoder(c.as_bytes())) }), ) { (DecodeResult::Owned(vec), Some(charset_decoder)) => charset_decoder(&vec).into(), (DecodeResult::Owned(vec), None) => String::from_utf8(vec) .unwrap_or_else(|e| String::from_utf8_lossy(e.as_bytes()).into_owned()) .into(), (DecodeResult::Borrowed((from, to)), Some(charset_decoder)) => { charset_decoder(&data[from..to]).into() } (DecodeResult::Borrowed((from, to)), None) => String::from_utf8_lossy(&data[from..to]), (DecodeResult::Empty, _) => "\n".to_string().into(), } } fn result_to_bytes(result: DecodeResult, data: &[u8]) -> Cow<[u8]> { match result { DecodeResult::Owned(vec) => Cow::Owned(vec), DecodeResult::Borrowed((from, to)) => Cow::Borrowed(&data[from..to]), DecodeResult::Empty => Cow::from(vec![b'?']), } } #[inline(always)] fn get_mime_type( content_type: Option<&ContentType>, parent_content_type: &MimeType, ) -> (bool, bool, bool, MimeType) { if let Some(content_type) = content_type { match content_type.get_type() { "multipart" => ( true, false, false, match content_type.get_subtype() { Some("mixed") => MimeType::MultipartMixed, Some("alternative") => MimeType::MultipartAlernative, Some("related") => MimeType::MultipartRelated, Some("digest") => MimeType::MultipartDigest, _ => MimeType::Other, }, ), "text" => match content_type.get_subtype() { Some("plain") => (false, true, true, MimeType::TextPlain), Some("html") => (false, true, true, MimeType::TextHtml), _ => (false, false, true, MimeType::TextOther), }, "image" | "audio" | "video" => (false, true, false, MimeType::Inline), "message" if content_type.get_subtype() == Some("rfc822") => { (false, false, false, MimeType::Message) } _ => (false, false, false, MimeType::Other), } } else if let MimeType::MultipartDigest = parent_content_type { (false, false, false, MimeType::Message) } else { (false, true, true, MimeType::TextPlain) } } #[inline(always)] fn add_missing_type<'x>( headers: &mut RfcHeaders<'x>, c_type: Cow<'x, str>, c_subtype: Cow<'x, str>, ) { if headers.is_empty() { headers.insert( RfcHeader::ContentType, HeaderValue::ContentType(ContentType { c_type, c_subtype: Some(c_subtype), attributes: None, }), ); } } #[derive(Default)] struct MessageParserState { mime_type: MimeType, mime_boundary: Option<Vec<u8>>, in_alternative: bool, parts: usize, html_parts: usize, text_parts: usize, need_html_body: bool, need_text_body: bool, header_id: Option<MessagePartId>, structure: Vec<MessageStructure>, } impl MessageParserState { fn new() -> MessageParserState { MessageParserState { mime_type: MimeType::Message, mime_boundary: None, in_alternative: false, parts: 0, html_parts: 0, text_parts: 0, need_text_body: true, need_html_body: true, ..Default::default() } } pub fn get_structure(&mut self) -> MessageStructure { if let Some(header_id) = self.header_id { MessageStructure::MultiPart((header_id, std::mem::take(&mut self.structure))) } else if self.structure.len() > 1 { MessageStructure::List(std::mem::take(&mut self.structure)) } else if self.structure.len() == 1 { self.structure.pop().unwrap() } else { MessageStructure::List(vec![]) } } } pub struct MessageStream<'x> { pub data: &'x [u8], pub pos: usize, } impl<'x> MessageStream<'x> { pub fn new(data: &'x [u8]) -> MessageStream<'x> { MessageStream { data, pos: 0 } } } impl<'x> Message<'x> { fn new() -> Message<'x> { Message { ..Default::default() } } /// Returns `false` if at least one header field was successfully parsed. pub fn is_empty(&self) -> bool { self.headers_rfc.is_empty() && self.headers_raw.is_empty() } /// Parses a byte slice containing the RFC5322 raw message and returns a /// `Message` struct. /// /// This function never panics, a best-effort is made to parse the message and /// if no headers are found None is returned. /// pub fn parse(raw_message: &'x [u8]) -> Option<Message<'x>> { let mut stream = MessageStream::new(raw_message); let mut message = Message::new(); let mut message_stack = Vec::new(); let mut state = MessageParserState::new(); let mut state_stack = Vec::new(); let mut mime_part_header = RfcHeaders::new(); let mut mime_part_header_raw = RawHeaders::new(); 'outer: loop { // Parse headers let (is_message, header) = if let MimeType::Message = state.mime_type { message.offset_header = stream.pos; if !parse_headers( &mut message.headers_rfc, &mut message.headers_raw, &mut stream, ) { break; } message.offset_body = seek_crlf_end(&stream, stream.pos); (true, &mut message.headers_rfc) } else { if !parse_headers( &mut mime_part_header, &mut mime_part_header_raw, &mut stream, ) { break; } (false, &mut mime_part_header) }; state.parts += 1; let content_type = header .get(&RfcHeader::ContentType) .and_then(|c| c.as_content_type_ref()); let (is_multipart, mut is_inline, mut is_text, mut mime_type) = get_mime_type(content_type, &state.mime_type); if is_multipart { if let Some(mime_boundary) = content_type.map_or_else(|| None, |f| f.get_attribute("boundary")) { let mime_boundary = ("\n--".to_string() + mime_boundary).into_bytes(); if seek_next_part(&mut stream, mime_boundary.as_ref()) { let new_state = MessageParserState { in_alternative: state.in_alternative || mime_type == MimeType::MultipartAlernative, mime_type, mime_boundary: mime_boundary.into(), html_parts: message.html_body.len(), text_parts: message.text_body.len(), need_html_body: state.need_html_body, need_text_body: state.need_text_body, header_id: if !is_message { Some(message.parts.len()) } else { None }, ..Default::default() }; if !is_message { add_missing_type(&mut mime_part_header, "text".into(), "plain".into()); message.parts.push(MessagePart::Multipart(MultiPart::new( std::mem::take(&mut mime_part_header), std::mem::take(&mut mime_part_header_raw), ))); } state_stack.push(state); state = new_state; skip_crlf(&mut stream); continue; } else { mime_type = MimeType::TextOther; is_text = true; } } } skip_crlf(&mut stream); let (is_binary, decode_fnc): (bool, DecodeFnc) = match header .get(&RfcHeader::ContentTransferEncoding) { Some(HeaderValue::Text(encoding)) if encoding.eq_ignore_ascii_case("base64") => { (false, decode_base64) } Some(HeaderValue::Text(encoding)) if encoding.eq_ignore_ascii_case("quoted-printable") => { (false, decode_quoted_printable) } _ => (true, get_bytes_to_boundary), }; state .structure .push(MessageStructure::Part(message.parts.len())); if is_binary && mime_type == MimeType::Message { let new_state = MessageParserState { mime_type: MimeType::Message, mime_boundary: state.mime_boundary.take(), need_html_body: true, need_text_body: true, ..Default::default() }; add_missing_type(&mut mime_part_header, "message".into(), "rfc822".into()); message_stack.push(( message, std::mem::take(&mut mime_part_header), std::mem::take(&mut mime_part_header_raw), )); state_stack.push(state); message = Message::new(); state = new_state; continue; } let (bytes_read, mut bytes) = decode_fnc( &stream, stream.pos, state .mime_boundary .as_ref() .map_or_else(|| &[][..], |b| &b[..]), false, ); // Attempt to recover contents of an invalid message let is_encoding_problem = bytes_read == 0; if is_encoding_problem { if stream.pos >= stream.data.len() || (is_binary && state.mime_boundary.is_none()) { state.structure.pop(); break; } // Get raw MIME part let (bytes_read, r_bytes) = if !is_binary { get_bytes_to_boundary( &stream, stream.pos, state .mime_boundary .as_ref() .map_or_else(|| &[][..], |b| &b[..]), false, ) } else { (0, DecodeResult::Empty) }; if bytes_read == 0 { // If there is MIME boundary, ignore it and get raw message if state.mime_boundary.is_some() { let (bytes_read, r_bytes) = get_bytes_to_boundary(&stream, stream.pos, &[][..], false); if bytes_read > 0 { bytes = r_bytes; stream.pos += bytes_read; } else { state.structure.pop(); break; } } else { state.structure.pop(); break; } } else { bytes = r_bytes; stream.pos += bytes_read; } mime_type = MimeType::TextOther; is_inline = false; is_text = true; } else { stream.pos += bytes_read; } if mime_type != MimeType::Message { let is_inline = is_inline && header .get(&RfcHeader::ContentDisposition) .map_or_else(|| true, |d| !d.get_content_type().is_attachment()) && (state.parts == 1 || (state.mime_type != MimeType::MultipartRelated && (mime_type == MimeType::Inline || content_type .map_or_else(|| true, |c| !c.has_attribute("name"))))); let (add_to_html, add_to_text) = if let MimeType::MultipartAlernative = state.mime_type { match mime_type { MimeType::TextHtml => (true, false), MimeType::TextPlain => (false, true), _ => (false, false), } } else if is_inline { if state.in_alternative && (state.need_text_body || state.need_html_body) { match mime_type { MimeType::TextHtml => { state.need_text_body = false; } MimeType::TextPlain => { state.need_html_body = false; } _ => (), } } (state.need_html_body, state.need_text_body) } else { (false, false) }; if is_text { let is_html = mime_type == MimeType::TextHtml; let mut text_part = Part { body: result_to_string(bytes, stream.data, content_type), headers_rfc: std::mem::take(&mut mime_part_header), headers_raw: std::mem::take(&mut mime_part_header_raw), is_encoding_problem, }; // If there is a single part in the message, move MIME headers // to the part if is_message { for header_name in [ RfcHeader::ContentType, RfcHeader::ContentDisposition, RfcHeader::ContentId, RfcHeader::ContentLanguage, RfcHeader::ContentLocation, RfcHeader::ContentTransferEncoding, RfcHeader::ContentDescription, ] { if let Some(value) = message.headers_rfc.remove(&header_name) { text_part.headers_rfc.insert(header_name, value); } if let Some(value) = message.headers_raw.get(&HeaderName::Rfc(header_name)) { text_part .headers_raw .insert(HeaderName::Rfc(header_name), value.to_vec()); } } } add_missing_type(&mut text_part.headers_rfc, "text".into(), "plain".into()); if add_to_html && !is_html { message.html_body.push(message.parts.len()); } else if add_to_text && is_html { message.text_body.push(message.parts.len()); } if add_to_html && is_html { message.html_body.push(message.parts.len()); } else if add_to_text && !is_html { message.text_body.push(message.parts.len()); } else { message.attachments.push(message.parts.len()); } message.parts.push(if is_html { MessagePart::Html(text_part) } else { MessagePart::Text(text_part) }); } else { if add_to_html { message.html_body.push(message.parts.len()); } if add_to_text { message.text_body.push(message.parts.len()); } message.attachments.push(message.parts.len()); let mut binary_part = Part::new( std::mem::take(&mut mime_part_header), std::mem::take(&mut mime_part_header_raw), result_to_bytes(bytes, stream.data), is_encoding_problem, ); // If there is a single part in the message, move MIME headers // to the part if is_message { for header_name in [ RfcHeader::ContentType, RfcHeader::ContentDisposition, RfcHeader::ContentId, RfcHeader::ContentLanguage, RfcHeader::ContentLocation, RfcHeader::ContentTransferEncoding, RfcHeader::ContentDescription, ] { if let Some(value) = message.headers_rfc.remove(&header_name) { binary_part.headers_rfc.insert(header_name, value); } if let Some(value) = message.headers_raw.get(&HeaderName::Rfc(header_name)) { binary_part .headers_raw .insert(HeaderName::Rfc(header_name), value.to_vec()); } } } message.parts.push(if !is_inline { MessagePart::Binary(binary_part) } else { MessagePart::InlineBinary(binary_part) }); }; } else { add_missing_type(&mut mime_part_header, "message".into(), "rfc822".into()); message.attachments.push(message.parts.len()); message.parts.push(MessagePart::Message(Part::new( std::mem::take(&mut mime_part_header), std::mem::take(&mut mime_part_header_raw), MessageAttachment::Raw(result_to_bytes(bytes, stream.data)), is_encoding_problem, ))); } if state.mime_boundary.is_some() { // Currently processing a MIME part let mut last_part_offset = stream.pos; 'inner: loop { if let MimeType::Message = state.mime_type { // Finished processing nested message, restore parent message from stack if let ( Some((mut prev_message, headers, headers_raw)), Some(mut prev_state), ) = (message_stack.pop(), state_stack.pop()) { message.structure = state.get_structure(); message.offset_end = seek_crlf_end(&stream, last_part_offset); message.raw_message = raw_message[message.offset_header..message.offset_end].into(); prev_message.attachments.push(prev_message.parts.len()); prev_message.parts.push(MessagePart::Message(Part::new( headers, headers_raw, MessageAttachment::Parsed(Box::new(message)), false, ))); message = prev_message; prev_state.mime_boundary = state.mime_boundary; state = prev_state; } else { debug_assert!(false, "Failed to restore parent message. Aborting."); break 'outer; } } if skip_multipart_end(&mut stream) { // End of MIME part reached if MimeType::MultipartAlernative == state.mime_type && state.need_html_body && state.need_text_body { // Found HTML part only if state.text_parts == message.text_body.len() && state.html_parts != message.html_body.len() { for &part_id in &message.html_body[state.html_parts..] { message.text_body.push(part_id); } } // Found text part only if state.html_parts == message.html_body.len() && state.text_parts != message.text_body.len() { for &part_id in &message.text_body[state.html_parts..] { message.html_body.push(part_id); } } } if let Some(mut prev_state) = state_stack.pop() { // Add headers and substructure to parent part prev_state.structure.push(state.get_structure()); // Restore ancestor's state state = prev_state; last_part_offset = stream.pos; if let Some(ref mime_boundary) = state.mime_boundary { // Ancestor has a MIME boundary, seek it. if seek_next_part(&mut stream, mime_boundary) { continue 'inner; } } } break 'outer; } else { skip_crlf(&mut stream); // Headers of next part expected next, break inner look. break 'inner; } } } else if stream.pos >= stream.data.len() { break 'outer; } } while let (Some((mut prev_message, headers, headers_raw)), Some(prev_state)) = (message_stack.pop(), state_stack.pop()) { message.offset_end = stream.pos; if !message.is_empty() { message.structure = state.get_structure(); message.raw_message = raw_message[message.offset_header..message.offset_end].into(); prev_message.attachments.push(prev_message.parts.len()); prev_message.parts.push(MessagePart::Message(Part::new( headers, headers_raw, MessageAttachment::Parsed(Box::new(message)), false, ))); } message = prev_message; state = prev_state; } message.raw_message = raw_message.into(); message.structure = state.get_structure(); message.offset_end = stream.pos; if !message.is_empty() { Some(message) } else { None } } } #[cfg(test)] mod tests { use std::{fs, path::PathBuf}; use serde_json::Value; use crate::parsers::message::Message; const SEPARATOR: &[u8] = "\n---- EXPECTED STRUCTURE ----".as_bytes(); #[test] fn parse_full_messages() { for test_suite in ["rfc", "legacy", "thirdparty", "malformed"] { let mut test_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_dir.push("tests"); test_dir.push(test_suite); let mut tests_run = 0; for file_name in fs::read_dir(&test_dir).unwrap() { let file_name = file_name.as_ref().unwrap().path(); if file_name.extension().map_or(false, |e| e == "txt") { let mut input = fs::read(&file_name).unwrap(); let mut pos = 0; for sep_pos in 0..input.len() { if input[sep_pos..sep_pos + SEPARATOR.len()].eq(SEPARATOR) { pos = sep_pos; break; } } assert!( pos > 0, "Failed to find separator in test file '{}'.", file_name.display() ); tests_run += 1; let input = input.split_at_mut(pos); let message = Message::parse(input.0).unwrap(); let json_message = serde_json::to_string_pretty(&message).unwrap(); assert_eq!( serde_json::from_str::<Value>(&json_message).unwrap(), serde_json::from_str::<Value>( std::str::from_utf8(&input.1[SEPARATOR.len()..]).unwrap() ) .unwrap(), "Test failed for '{}', result was:\n{}", file_name.display(), json_message ); } } assert!( tests_run > 0, "Did not find any tests to run in folder {}.", test_dir.display() ); } } /*#[test] fn message_to_yaml() { let mut file_name = PathBuf::from(env!("CARGO_MANIFEST_DIR")); file_name.push("tests"); file_name.push("rfc"); file_name.push("003.txt"); let mut input = fs::read(&file_name).unwrap(); let mut pos = 0; for sep_pos in 0..input.len() { if input[sep_pos..sep_pos + SEPARATOR.len()].eq(SEPARATOR) { pos = sep_pos; break; } } assert!( pos > 0, "Failed to find separator in test file '{}'.", file_name.display() ); let input = input.split_at_mut(pos).0; let message = Message::parse(input).unwrap(); let result = serde_yaml::to_string(&message).unwrap(); fs::write("test.yaml", &result).unwrap(); println!("{:}", result); } #[test] fn generate_test_samples() { const SEPARATOR: &[u8] = "\n---- EXPECTED STRUCTURE ----".as_bytes(); for test_suite in ["legacy", "malformed", "rfc", "thirdparty"] { let mut test_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); test_dir.push("tests"); test_dir.push(test_suite); for file_name in fs::read_dir(test_dir).unwrap() { if file_name .as_ref() .unwrap() .path() .to_str() .unwrap() .contains("COPYING") { continue; } println!("{:}", file_name.as_ref().unwrap().path().display()); let mut input = fs::read(file_name.as_ref().unwrap().path()).unwrap(); let mut pos = 0; for sep_pos in 0..input.len() { if input[sep_pos..sep_pos + SEPARATOR.len()].eq(SEPARATOR) { pos = sep_pos; break; } } assert!(pos > 0, "Failed to find separator."); let input = input.split_at_mut(pos); /*println!( "{}", serde_json::to_string_pretty(&Message::parse(input.0)).unwrap() );*/ let mut output = Vec::new(); output.extend_from_slice(input.0); output.extend_from_slice(SEPARATOR); output.extend_from_slice( serde_json::to_string_pretty(&Message::parse(input.0).unwrap()) .unwrap() .as_bytes(), ); fs::write(file_name.as_ref().unwrap().path(), &output).unwrap(); } } }*/ }
38.493151
100
0.447816
fe87f062350361149d90b8a7788803163a6b7e3e
838
use big_s::S; use milli::Criterion::{Attribute, Exactness, Proximity, Typo, Words}; use milli::{AscDesc, Error, Search, UserError}; use crate::search::{self, EXTERNAL_DOCUMENTS_IDS}; #[test] fn sort_ranking_rule_missing() { let criteria = vec![Words, Typo, Proximity, Attribute, Exactness]; // sortables: `tag` and `asc_desc_rank` let index = search::setup_search_index_with_criteria(&criteria); let rtxn = index.read_txn().unwrap(); let mut search = Search::new(&rtxn, &index); search.query(search::TEST_QUERY); search.limit(EXTERNAL_DOCUMENTS_IDS.len()); search.authorize_typos(true); search.optional_words(true); search.sort_criteria(vec![AscDesc::Asc(S("tag"))]); let result = search.execute(); assert!(matches!(result, Err(Error::UserError(UserError::SortRankingRuleMissing)))); }
34.916667
88
0.707637
4840400bc50f6c4815d87528702b91d5c5672956
2,516
use std::ops::Deref; use config::gobjects::GObject; use env::Env; use library; use nameutil::*; use super::*; use super::imports::Imports; use super::info_base::InfoBase; use traits::*; #[derive(Default)] pub struct Info { pub base: InfoBase, } impl Deref for Info { type Target = InfoBase; fn deref(&self) -> &InfoBase { &self.base } } impl Info { //TODO: add test in tests/ for panic pub fn type_<'a>(&self, library: &'a library::Library) -> &'a library::Record { let type_ = library.type_(self.type_id).maybe_ref() .unwrap_or_else(|| panic!("{} is not a record.", self.full_name)); type_ } } pub fn new(env: &Env, obj: &GObject) -> Option<Info> { info!("Analyzing record {}", obj.name); let full_name = obj.name.clone(); let record_tid = match env.library.find_type(0, &full_name) { Some(tid) => tid, None => return None, }; let type_ = env.type_(record_tid); let name: String = split_namespace_name(&full_name).1.into(); let record: &library::Record = match type_.maybe_ref() { Some(record) => record, None => return None, }; let mut imports = Imports::new(); imports.add("glib::translate::*", None); imports.add("ffi", None); let mut functions = functions::analyze(env, &record.functions, record_tid, obj, &mut imports, None, None); let specials = special_functions::extract(&mut functions); let (version, deprecated_version) = info_base::versions(env, obj, &functions, record.version, record.deprecated_version); let is_shared = specials.get(&special_functions::Type::Ref).is_some() && specials.get(&special_functions::Type::Unref).is_some(); if is_shared { // `copy` will duplicate a struct while `clone` just adds a reference special_functions::unhide(&mut functions, &specials, special_functions::Type::Copy); }; special_functions::analyze_imports(&specials, &mut imports); //don't `use` yourself imports.remove(&name); imports.clean_glib(env); let base = InfoBase { full_name: full_name, type_id: record_tid, name: name, functions: functions, specials: specials, imports: imports, version: version, deprecated_version: deprecated_version, cfg_condition: obj.cfg_condition.clone(), }; let info = Info { base: base, }; Some(info) }
26.484211
97
0.614467
33d5ea9ff725063dfdf21b9e795a9b94d8fb9877
3,284
//! # The hacspec standard library //! //! ## Data types //! The standard library provides two main data types. //! //! ### Sequences //! Sequences [`Seq`](`seq::Seq`) arrays with a fixed length set at runtime. //! They replace Rust vectors, which are not allowed in hacspec. //! //! See the [seq](`mod@seq`) module documentation for more details. //! //! ``` //! use hacspec_lib::prelude::*; //! let x = Seq::<U128>::from_public_slice(&[5, 2, 7, 8, 9]); //! let x = Seq::<u128>::from_native_slice(&[5, 2, 7, 8, 9]); //! let y = ByteSeq::from_hex("0388dace60b6a392f328c2b971b2fe78"); //! ``` //! //! ### Arrays //! Arrays have a fixed length that is known at compile time. //! They replace the Rust arrays, which are not allowed in hacspec. //! //! See the [arrays](`mod@array`) module documentation for more details. //! //! To define a new array type with name `State`, holding `16` `u32` run //! //! ``` //! use hacspec_lib::prelude::*; //! array!(State, 16, u32, type_for_indexes: StateIdx); //! ``` //! //! The `type_for_indexes` defines the index type for this array as `StateIdx`. //! Such an array can now be used similarly to regular Rust arrays. //! //! ``` //! use hacspec_lib::prelude::*; //! array!(State, 16, u32, type_for_indexes: StateIdx); //! fn modify_state(mut state: State) -> State { //! state[1] = state[1] + state[2]; //! state //! } //! ``` //! //! ## Numeric Types //! The standard library provides two main numeric types. //! //! ### Math Integers //! Integers with a fixed upper bound on the byte length. //! See the [math integer](`mod@math_integers`) module documentation for more details. //! //! The following example defines and uses the type `LargeSecretInteger` that can hold unsigned integers up to 2^233-1. //! //! ``` //! use hacspec_lib::prelude::*; //! unsigned_integer!(LargeSecretInteger, 233); //! let a = LargeSecretInteger::from_literal(1); //! let b = LargeSecretInteger::from_literal(2); //! let c = a + b; //! let result = std::panic::catch_unwind(|| { //! // This panics because comparing secret math integers is currently not support. //! assert!(c.equal(LargeSecretInteger::from_literal(3))); //! }); //! assert!(result.is_err()); //! let _max = LargeSecretInteger::from_hex("1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); //! ``` //! //! ## Secret Integers //! All numeric types can be public or secret. //! By default they are secret types. //! Public types are prefixed with `public_`. //! //! ### Secret Machine Integers //! The regular machine integers Rust provides are considered public integers. //! This standard library defines secret variants for all public machine integers defined as follows. //! //! Unsigned secret integers: `U8, U16, U32, U64, U128` //! //! Signed secret integers: `I8, I16, I32, I64, I128` //! //! See the [secret integers](`secret_integers`) for details. #![no_std] #[cfg(all(feature = "alloc", not(feature = "std")))] extern crate alloc; #[cfg(feature = "std")] extern crate std as alloc; pub mod array; mod bigint_integers; mod machine_integers; pub mod math_integers; mod math_util; pub mod prelude; pub mod seq; mod traits; mod transmute; mod util; mod vec_integers; mod vec_integers_public; mod vec_integers_secret; mod vec_util; pub use crate::prelude::*;
31.27619
119
0.673873
1e6a95a0848752ef13a0ba8ed2a1bacfb504e2cd
6,282
//! //! Exposes a [Virtual File Systems (VFS)](https://docs.rs/vfs/) via HTTPS. //! //! The [HttpsFSServer] exposes a VFS (implementing [FileSystem](vfs::filesystem::FileSystem)) via HTTPS. //! [HttpsFS] can be uses to access a [FileSystem](vfs::filesystem::FileSystem) exposed by a [HttpsFSServer]. //! //! # Example //! //! The two examples show the usage of a [HttpsFSServer] and a [HttpsFS]. It is assumed, that the //! examples are executed in the crate root. Therefore, you can find the referenced files in the //! crate repository. //! //! **Please note**, that it is assumed, that the used certificate is issued for "localhost". //! //! You can run the server side examples from the repository root with: //! ```console //! cargo run --example https_fs_server //! ``` //! //! Start the client side example in another terminal with: //! //! ```console //! cargo run --example https_fs //! ``` //! //! ## Server side //! //! This example exposes a [memory file system](vfs::MemoryFS) via HTTPS. The content of the file system //! is lost as soon as the server is terminated. //! //! ```no_run //! # use vfs::MemoryFS; //! # use vfs_https::{HttpsFSResult, HttpsFSServer}; //! # //! # fn main() -> HttpsFSResult<()> { //! // Create a file system, which the server uses to access the files. //! let fs = MemoryFS::new(); //! //! let server = HttpsFSServer::builder(fs) //! // Since this test will not be executed as super user, we are not allowed to listen on //! // a TCP port below 1000, such as the https port 443. Therefore we use a different port. //! .set_port(8443) //! // It is a https server, therefore we need to load a certificate, which the server //! // uses. For the example we use a self signed certificate. If you want to know how to //! // create a self signed certificate, see "/examples/cert/create.sh". //! .load_certificates("examples/cert/cert.crt") //! // We also need to load the private key, which belongs to the certificate. //! .load_private_key("examples/cert/private-key.key") //! // The server needs to authenticate the clients. Therefore we have to provide a method //! // which // validates the user credentials. In this example, only the username 'user' //! // and the password 'pass' is accepted. //! // As authentication process, the 'Basic' method as defined by the //! // [RFC7617](https://tools.ietf.org/html/rfc7617) is used. //! .set_credential_validator(|username: &str, password: &str| { //! username == "user" && password == "pass" //! }); //! //! // Run the server. This call is blocking. //! server.run() //! # } //! ``` //! //! ## Client side //! //! This example connects to a [HttpsFSServer] and creates a file "example.txt" if it does not exists and appends a //! new line to it. Afterwards it reads the whole file and prints the content to stdout. //! As long as the server is not restarted, the output of this program will change with each call. //! //! For the usage of [FileSystem](vfs::filesystem::FileSystem) see the crate [vfs]. //! The crate [chrono] is used for the generation of the time stamp. //! //! ```no_run //! # use chrono::prelude::*; //! # use std::io::Read; //! # use vfs::VfsPath; //! # use vfs_https::HttpsFS; //! # //! # fn main() -> vfs::VfsResult<()> { //! // You can not access the server from a different host, since the used certificate is issued //! // for the localhost and you have to use https://localhost:8443 to access the server. You can //! // also not use IPs, i.g. https://127.0.0.1:8443, since we didn't issue the certificate //! // for the IP. //! let builder = HttpsFS::builder("localhost") //! // Set the port used by the server. The default is 443. //! .set_port(8443) //! // Add the self signed certificate as root certificate. If we don't do this, the client //! // refuses to connect to the HttpsFSServer. If the server uses a certificate issued by //! // an official certificate authority, than we don't need to add an additional root //! // certificate. //! .add_root_certificate("examples/cert/cert.crt") //! // The client will use the following method to get credentials for the authentication. //! .set_credential_provider(|server_msg| { //! println!( //! "Server request authentification with message \"{}\".", //! server_msg //! ); //! (String::from("user"), String::from("pass")) //! }); //! let root: VfsPath = builder.build()?.into(); //! let root = root.join("example.txt")?; //! //! // make sure that the file exists //! if !root.exists()? { //! root.create_file()?; //! } //! //! // add a new line to the file //! let mut file = root.append_file()?; //! let time = Local::now(); //! let line = format!("{}: Hello HttpsFS!\n", time); //! file.write(line.as_bytes())?; //! //! // open file reading //! let file = root.open_file()?; //! //! // One should really use a BufReader, which reads files in chunks of 8kb. //! // The Read trait, issues a new request to the HttpsFSServer with each call, //! // even if only on byte is read. The headers of the http-protocol needs //! // several hundred bytes, which makes small reads inefficient. //! let mut buffed_file = std::io::BufReader::new(file); //! //! // read file content //! let mut content = String::new(); //! buffed_file.read_to_string(&mut content)?; //! //! println!("Content of example.txt: \n{}", content); //! # //! # Ok(()) //! # } //! ``` //! //! //! # TODOs //! - Implement a [CGI](https://en.wikipedia.org/wiki/Common_Gateway_Interface) //! version of the HttpsFSServer. //! * This would allow a user to use any webserver provided by its //! favorite web-hoster as an infrastructure. The advantage is, that the //! web-hoster can overtake the certificate management, which is often //! perceived as a liability. //! - Write a HttpsFS version, which can be compiled to WebAssembly //! - Consider to provide an non-blocking version of HttpsFS //! - Do version check after connecting to a HttpsFSServer #![warn(missing_docs)] mod error; mod httpsfs; mod protocol; mod server; pub use error::{HttpsFSError, HttpsFSResult}; pub use httpsfs::{HttpsFS, HttpsFSBuilder}; pub use server::{HttpsFSServer, HttpsFSServerBuilder};
41.058824
115
0.653295
f7a2a0ef1340d219bfdb5016a9bddec559467489
3,237
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::TBPMR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get() } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct RESERVED8R { bits: u32, } impl RESERVED8R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Value of the field"] pub struct TBPSMRR { bits: u8, } impl TBPSMRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _TBPSMRW<'a> { w: &'a mut W, } impl<'a> _TBPSMRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 8:31 - Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline] pub fn reserved8(&self) -> RESERVED8R { let bits = { const MASK: u32 = 16777215; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u32 }; RESERVED8R { bits } } #[doc = "Bits 0:7 - GPT Timer B Pre-scale Match Register. In 16 bit mode this field holds bits 23 to 16."] #[inline] pub fn tbpsmr(&self) -> TBPSMRR { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TBPSMRR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:7 - GPT Timer B Pre-scale Match Register. In 16 bit mode this field holds bits 23 to 16."] #[inline] pub fn tbpsmr(&mut self) -> _TBPSMRW { _TBPSMRW { w: self } } }
25.896
158
0.520544
bb0edb2a57e54dda7cc5f55b4f23e72c68f4e553
1,823
use std::{rc::Rc, cell::RefCell}; use gtk4::{ traits::{ WidgetExt, BoxExt}, Align }; use libadwaita::{ PreferencesGroup, ActionRow, traits::{PreferencesGroupExt, ActionRowExt} }; use crate::{ model::{SaveOptions, ScreenPreperation}}; pub fn create_settings_page(opt_ref: Rc<RefCell<SaveOptions>>, content_area: &gtk4::Box) { let group = PreferencesGroup::new(); group.set_hexpand(false); group.set_title("Settings"); let options = opt_ref.borrow(); let preparation_names = [ "None", "Clear Screen" ]; let prep_dropdown = gtk4::DropDown::from_strings(&preparation_names); prep_dropdown.set_valign(Align::Center); match options.screen_preparation { crate::model::ScreenPreperation::None => prep_dropdown.set_selected(0), crate::model::ScreenPreperation::ClearScreen | crate::model::ScreenPreperation::Home => prep_dropdown.set_selected(1), } let row = ActionRow::builder() .title("Video Preparation") .build(); row.add_suffix(&prep_dropdown); group.add(&row); let save_sauce_switch = gtk4::Switch::builder() .valign(Align::Center) .active(options.save_sauce) .build(); let row = ActionRow::builder() .title("Save sauce") .build(); row.add_suffix(&save_sauce_switch); group.add(&row); let opt = opt_ref.clone(); prep_dropdown.connect_selected_notify(move |d| { opt.borrow_mut().screen_preparation = if d.selected() == 0 { ScreenPreperation::None } else { ScreenPreperation::ClearScreen }; }); let opt = opt_ref.clone(); save_sauce_switch.connect_state_set(move |_, state| { opt.borrow_mut().save_sauce = state; gtk4::Inhibit(false) }); content_area.append(&group); }
30.898305
127
0.644542
f87f632d3b0aa51c5bb87aae48a3102c2ca1c863
14,007
//! The named standard illuminants used with the 10 degree standard observer use crate::channel::{FreeChannelScalar, PosNormalChannelScalar}; use crate::white_point::{UnitWhitePoint, WhitePoint}; use crate::xyy::XyY; use crate::xyz::Xyz; use num_traits::{cast, Float}; /// Incandescent / Tungsten. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct A; impl<T> WhitePoint<T> for A where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.111420).unwrap(), cast(1.000000).unwrap(), cast(0.351998).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.45117).unwrap(), cast(0.40594).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for A where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// {obsolete} Direct sunlight at noon. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct B; impl<T> WhitePoint<T> for B where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.991778).unwrap(), cast(1.000000).unwrap(), cast(0.843493).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.3498).unwrap(), cast(0.3527).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for B where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// {obsolete} Average / North sky Daylight. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct C; impl<T> WhitePoint<T> for C where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.972857).unwrap(), cast(1.000000).unwrap(), cast(1.161448).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.31039).unwrap(), cast(0.31905).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for C where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Horizon Light. ICC profile PCS. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct D50; impl<T> WhitePoint<T> for D50 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.967206).unwrap(), cast(1.000000).unwrap(), cast(0.814280).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.34773).unwrap(), cast(0.35952).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for D50 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Mid-morning / Mid-afternoon Daylight. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct D55; impl<T> WhitePoint<T> for D55 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.957967).unwrap(), cast(1.000000).unwrap(), cast(0.909253).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.33411).unwrap(), cast(0.34877).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for D55 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Noon Daylight: Television, sRGB color space. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct D65; impl<T> WhitePoint<T> for D65 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.948097).unwrap(), cast(1.000000).unwrap(), cast(1.073051).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.31382).unwrap(), cast(0.331).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for D65 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// North sky Daylight. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct D75; impl<T> WhitePoint<T> for D75 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.944171).unwrap(), cast(1.000000).unwrap(), cast(1.206427).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.29968).unwrap(), cast(0.3174).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for D75 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Equal energy. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct E; impl<T> WhitePoint<T> for E where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.000000).unwrap(), cast(1.000000).unwrap(), cast(1.000030).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.33333).unwrap(), cast(0.33333).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for E where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Daylight Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F1; impl<T> WhitePoint<T> for F1 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.947913).unwrap(), cast(1.000000).unwrap(), cast(1.031914).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.31811).unwrap(), cast(0.33559).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F1 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Cool White Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F2; impl<T> WhitePoint<T> for F2 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.032450).unwrap(), cast(1.000000).unwrap(), cast(0.689897).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.37925).unwrap(), cast(0.36733).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F2 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// White Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F3; impl<T> WhitePoint<T> for F3 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.089683).unwrap(), cast(1.000000).unwrap(), cast(0.519648).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.41761).unwrap(), cast(0.38324).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F3 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Warm White Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F4; impl<T> WhitePoint<T> for F4 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.149614).unwrap(), cast(1.000000).unwrap(), cast(0.409633).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.4492).unwrap(), cast(0.39074).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F4 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Daylight Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F5; impl<T> WhitePoint<T> for F5 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.933686).unwrap(), cast(1.000000).unwrap(), cast(0.986363).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.31975).unwrap(), cast(0.34246).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F5 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Lite White Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F6; impl<T> WhitePoint<T> for F6 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.021481).unwrap(), cast(1.000000).unwrap(), cast(0.620736).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.3866).unwrap(), cast(0.37847).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F6 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// D65 simulator, Daylight simulator. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F7; impl<T> WhitePoint<T> for F7 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.957797).unwrap(), cast(1.000000).unwrap(), cast(1.076183).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.31569).unwrap(), cast(0.3296).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F7 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// D50 simulator, Sylvania F40 Design 50. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F8; impl<T> WhitePoint<T> for F8 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.971146).unwrap(), cast(1.000000).unwrap(), cast(0.811347).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.34902).unwrap(), cast(0.35939).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F8 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Cool White Deluxe Fluorescent. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F9; impl<T> WhitePoint<T> for F9 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.021163).unwrap(), cast(1.000000).unwrap(), cast(0.678256).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.37829).unwrap(), cast(0.37045).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F9 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Philips TL85, Ultralume 50. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F10; impl<T> WhitePoint<T> for F10 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(0.990012).unwrap(), cast(1.000000).unwrap(), cast(0.831340).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.3509).unwrap(), cast(0.35444).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F10 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Philips TL84, Ultralume 40. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F11; impl<T> WhitePoint<T> for F11 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.038197).unwrap(), cast(1.000000).unwrap(), cast(0.655550).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.38541).unwrap(), cast(0.37123).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F11 where T: Float + FreeChannelScalar + PosNormalChannelScalar {} /// Philips TL83, Ultralume 30. #[derive(Clone, Debug, PartialEq, Eq, Default, Copy)] pub struct F12; impl<T> WhitePoint<T> for F12 where T: Float + FreeChannelScalar + PosNormalChannelScalar, { #[inline] fn get_xyz(&self) -> Xyz<T> { Xyz::new( cast(1.114284).unwrap(), cast(1.000000).unwrap(), cast(0.403530).unwrap(), ) } #[inline] fn get_xy_chromaticity(&self) -> XyY<T> { XyY::new( cast(0.44256).unwrap(), cast(0.39717).unwrap(), cast(1.0).unwrap(), ) } } impl<T> UnitWhitePoint<T> for F12 where T: Float + FreeChannelScalar + PosNormalChannelScalar {}
27.464706
96
0.562005
188b3ca19570c126223473da8ee845d6ee9d7b73
69
mod mirrored_updater; mod run_counter_updater; mod sequence_handler;
17.25
24
0.869565
0978cb182caae51f1c148bbe56d12b38d96834b1
7,096
use crate::unicode::UnicodeIter; use fontdue::{Font, FontSettings}; const NOTO_SANS_MONO_REGULAR: &[u8] = include_bytes!("res/NotoSansMono-Regular.ttf"); const NOTO_SANS_MONO_BOLD: &[u8] = include_bytes!("res/NotoSansMono-Bold.ttf"); const NOTO_SANS_MONO_LIGHT: &[u8] = include_bytes!("res/NotoSansMono-Light.ttf"); /// All available fonts. Must match the order in [`FontWeight`]! const NOTO_SANS_FAMILY: [&[u8]; 3] = [ NOTO_SANS_MONO_LIGHT, NOTO_SANS_MONO_REGULAR, NOTO_SANS_MONO_BOLD, ]; pub const fn noto_font_by_weight(typ: &FontWeight) -> &'static [u8] { NOTO_SANS_FAMILY[typ.val()] } /// This is a partially a copy from the code in `codegen_templates/lib.rs.template.txt` /// or a template for it. /// Supported font weights. #[derive(Debug, Copy, Clone)] #[repr(usize)] pub enum FontWeight { Light, Regular, Bold, } impl FontWeight { /// Returns a slice to iterate over all available font weights. pub const fn variants() -> &'static [Self] { &[Self::Light, Self::Regular, Self::Bold] } /// Returns the numeric value of the enum variant. pub const fn val(self) -> usize { self as _ } /// Returns a lowercase string describing the font weight. pub const fn mod_name(self) -> &'static str { match self { FontWeight::Light => "light", FontWeight::Regular => "regular", FontWeight::Bold => "bold", } } } /// Makes sure the index is in bounds [0..upper_bound] and /// of type usize in the end. macro_rules! trim_index_to_bounds { ($num:ident, $bounds:expr) => { if ($num as usize) >= $bounds { // usually, here it happens that -1 gets truncated to 0 and // bounds + 1 back to bounds // eprintln!("{} is out of bound {}!", $num, $bounds); ($bounds - 1) as usize } else if $num < 0 { 0_usize } else { $num as usize } }; } /// All font-related information to render characters with [`fontdue`] /// into a bitmap font. Currently, the usage of Noto Sans Mono is hard-coded. /// /// Guarantees, that each bitmap font raster centers the letter in a vertical /// and horizontal way. There will be a small vertical padding to other lines /// (if rendered as multiline) but almost no padding to the left and right by /// default. /// /// The raster is not XxX but XxY, because a mono font not necessarily needs to /// be XxX, as long as each character has the same width. #[derive(Debug)] pub struct ToBitmapFont { font: Font, bitmap_height: usize, bitmap_width: usize, font_size: f32, } impl ToBitmapFont { /// Creates a new object, ready to rasterize characters into a bitmap. /// /// # Parameters /// * `bitmap_height` height of the bitmap. A little bit bigger than the font on the screen. /// Values are for example 14, 16, 24,. /// * `font_bytes` Raw bytes of a font file, that [`fontdue`] can parse. pub fn new(bitmap_height: usize, font_bytes: &[u8]) -> Self { // We need some padding at the top and the bottom of each box, because // of letters such as "Ä" and "y". I figured the value out just by trying // with my "rasterize_chars_in_window" binary. It depends on the y_offset // in `rasterize_to_bitmap()` let font_size = (bitmap_height as f32 * 0.84).ceil(); let font = Font::from_bytes( font_bytes, FontSettings { scale: font_size, ..Default::default() }, ) .unwrap(); let bitmap_width = Self::find_max_width(&font, font_size); Self { font, bitmap_height, bitmap_width, font_size, } } /// Rasterizes a char for the given [`ToBitmapFont`] object into a bitmap. Every letter in the /// resulting bitmap mono font is horizontal and vertical aligned to the center. Furthermore, /// the resulting mono font contains already a vertical line spacing of a few pixels, but no /// padding to the left and right. pub fn rasterize_to_bitmap(&self, c: char) -> Vec<Vec<u8>> { let (metrics, fontdue_bitmap) = self.font.rasterize(c, self.font_size); // the bitmap that will contain the properly aligned rasterized char let mut letter_bitmap = vec![vec![0_u8; self.bitmap_width]; self.bitmap_height]; for ((y, x), intensity) in fontdue_bitmap .iter() .enumerate() .map(|(i, p)| (i as isize, p)) .map(|(i, p)| { // align to horizontal center let x_offset = (self.bitmap_width as isize - metrics.width as isize) / 2; // align to vertical center // 1) bounds:height: align big letters to groundline regarding the font size let mut y_offset = self.font_size as isize - metrics.height as isize; // 2) move downwards, because there are parts "below the ground line" (like in y) y_offset -= metrics.ymin as isize; // 3) move everything slightly to the top; I figured this out by trying with // my "rasterize_chars_in_window" binary y_offset -= (self.bitmap_height as f32 * 0.07) as isize; let x = i % metrics.width as isize; let y = i as isize / metrics.width as isize; let x = x + x_offset; let y = y + y_offset; // if some letter is "too" big and out of bounds the box: cut and prevent error let x = trim_index_to_bounds!(x, self.bitmap_width()); let y = trim_index_to_bounds!(y, self.bitmap_height()); ((y, x), p) }) { letter_bitmap[y][x] = *intensity; } letter_bitmap } /// A brute force approach to find the maximum width, that a supported unicode /// char will have for the given font size. This way, the width of the final /// bitmap can be reduced to HEIGHT x WIDTH instead of HEIGHT x HEIGHT, which /// would indicate a big space between all letters. fn find_max_width(font: &Font, font_size: f32) -> usize { UnicodeIter::new() .filter(|x| x.is_visible_char()) .map(|s| s.get_char()) .map(|c| font.rasterize(c, font_size).0.width) .max() .unwrap() } pub const fn bitmap_height(&self) -> usize { self.bitmap_height } pub const fn bitmap_width(&self) -> usize { self.bitmap_width } pub const fn font_size(&self) -> f32 { self.font_size } } #[cfg(test)] mod tests { use super::*; #[test] fn test_font_props() { let props = ToBitmapFont::new(16, NOTO_SANS_MONO_REGULAR); println!("bitmap_height = {}", props.bitmap_height()); println!("bitmap_width = {}", props.bitmap_width()); println!("font_size = {}", props.font_size()); } }
34.955665
98
0.59611
692dbe95bbed09507bf0421e8a1c4d52857f44b8
849
#[macro_use] extern crate permutation_rs; use std::collections::HashMap; use permutation_rs::group::{GroupElement, GroupAction}; use permutation_rs::group::permutation::Permutation; fn main() { let base = 0; let mut transversal = HashMap::new(); transversal.insert(0, permute!(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5)); transversal.insert(1, permute!(0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 0)); transversal.insert(2, permute!(0, 2, 1, 3, 2, 4, 3, 5, 4, 0, 5, 1)); transversal.insert(3, permute!(0, 3, 1, 4, 2, 5, 3, 0, 4, 1, 5, 2)); transversal.insert(4, permute!(0, 4, 1, 5, 2, 0, 3, 1, 4, 2, 5, 3)); transversal.insert(5, permute!(0, 5, 1, 0, 2, 1, 3, 2, 4, 3, 5, 4)); let state = permute!(0, 3, 1, 0, 2, 5, 3, 4, 4, 1, 5, 2); let image = state.act_on(&base); println!("{}^{} = {}", &base, &state, &image); }
35.375
72
0.566549