hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
bbf32be52846a7566fcd266e29de8774a36167b2
4,003
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::UPINRQ4 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct INRQR { bits: u8, } impl INRQR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct INMODER { bits: bool, } impl INMODER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _INRQW<'a> { w: &'a mut W, } impl<'a> _INRQW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _INMODEW<'a> { w: &'a mut W, } impl<'a> _INMODEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:7 - IN Request Number before Freeze"] #[inline] pub fn inrq(&self) -> INRQR { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; INRQR { bits } } #[doc = "Bit 8 - IN Request Mode"] #[inline] pub fn inmode(&self) -> INMODER { let bits = { const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }; INMODER { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 1 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:7 - IN Request Number before Freeze"] #[inline] pub fn inrq(&mut self) -> _INRQW { _INRQW { w: self } } #[doc = "Bit 8 - IN Request Mode"] #[inline] pub fn inmode(&mut self) -> _INMODEW { _INMODEW { w: self } } }
24.260606
59
0.49313
f591f05d365c3a3ec8ad5ab0a0571875e3995afe
555
#[doc = "Reader of register CHSTATUS"] pub type R = crate::R<u8, super::CHSTATUS>; #[doc = "Reader of field `RDYUSR`"] pub type RDYUSR_R = crate::R<bool, bool>; #[doc = "Reader of field `BUSYCH`"] pub type BUSYCH_R = crate::R<bool, bool>; impl R { #[doc = "Bit 0 - Ready User"] #[inline(always)] pub fn rdyusr(&self) -> RDYUSR_R { RDYUSR_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Busy Channel"] #[inline(always)] pub fn busych(&self) -> BUSYCH_R { BUSYCH_R::new(((self.bits >> 1) & 0x01) != 0) } }
29.210526
53
0.569369
62ef3657aa3e2398f2b97c68115f659dad3ae684
3,245
pub fn run() -> String { let program = loader::load_integer_row_list("./day/07/input.csv")[0].clone(); let settings = vec![5, 6, 7, 8, 9]; let result = mathy::permutations_in_memory(settings) .iter() .map(|setting| { let mut amps = Amplifiers::new(&program, &setting); amps.run_to_end(0) }) .max() .unwrap(); format!("best: {}", result) } const AMPLIFIER_COUNT: usize = 5; use icc::helpers::State; pub struct Amplifiers { amplifiers: Vec<icc::Computer>, buffer: Vec<(usize, i64)>, send_to: Vec<usize>, } impl Amplifiers { pub fn new(program: &[i64], pss: &[i64]) -> Amplifiers { Amplifiers { amplifiers: (0..AMPLIFIER_COUNT) .map(|id| icc::Computer::load(id, program)) .collect(), buffer: pss.iter().cloned().enumerate().collect(), send_to: (0..AMPLIFIER_COUNT) .map(|dest| (dest + 1) % AMPLIFIER_COUNT) .collect(), } } fn post_messsage(&mut self, from: usize, message: i64) { assert!(self.send_to.len() > from); self.buffer.push((self.send_to[from], message)) } fn print_buffer(&self) { println!("buffer depth: {}", self.buffer.len()); self .buffer .iter() .cloned() .for_each(|(id, msg)| println!("({}, {})", id, msg)); } fn run(&mut self) -> Option<i64> { let mut halted: std::collections::HashSet<usize> = (0..AMPLIFIER_COUNT).collect(); let mut idx = 0; loop { match self.amplifiers[idx].run() { State::Crashed => panic!("amplifier {} crashed", idx), State::Halted => { halted.remove(&idx); } State::Ready => (), State::Running => (), State::Output(id, msg) => { println!("{} sending {}", id, msg); self.post_messsage(id, msg) } State::Input => { if let Some(pos) = self.buffer.iter().position(|(addr, _value)| *addr == idx) { let (_addr, value) = self.buffer.remove(pos); if !self.amplifiers[idx].input_value(value) { panic!("{} asked for input and then refused it", idx) } } } } idx += 1; idx %= AMPLIFIER_COUNT; if halted.is_empty() { return self.buffer.pop().map(|(_id, val)| val); } } } pub fn run_to_end(&mut self, init: i64) -> i64 { self.buffer.push((0, init)); self.print_buffer(); self.run().unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_1() { let program = vec![ 3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1, 28, 1005, 28, 6, 99, 0, 0, 5, ]; let mut amps = Amplifiers::new(&program, &[9, 8, 7, 6, 5]); let actual = amps.run_to_end(0); assert_eq!(actual, 139_629_729); } #[test] fn test_2() { let program = vec![ 3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007, 54, 5, 55, 1005, 55, 26, 1001, 54, -5, 54, 1105, 1, 12, 1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55, 2, 53, 55, 53, 4, 53, 1001, 56, -1, 56, 1005, 56, 6, 99, 0, 0, 0, 0, 10, ]; let mut amps = Amplifiers::new(&program, &[9, 7, 8, 5, 6]); let actual = amps.run_to_end(0); assert_eq!(actual, 18_216); } }
28.973214
99
0.534052
1c33f992c10bea44bfd3b8dbdfd8c859f1f3a102
22,158
//! //! Plot to SVG and style with CSS //! //! You can find poloto on [github](https://github.com/tiby312/poloto) and [crates.io](https://crates.io/crates/poloto). //! Documentation at [docs.rs](https://docs.rs/poloto) //! //! Check out the [github examples](https://github.com/tiby312/poloto/tree/master/examples). //! The latest graph outputs of the examples can be found in the [assets](https://github.com/tiby312/poloto/tree/master/assets) folder. //! //! #[cfg(doctest)] mod test_readme { macro_rules! external_doc_test { ($x:expr) => { #[doc = $x] extern "C" {} }; } external_doc_test!(include_str!("../README.md")); } use std::fmt; pub use tagger::upgrade_write; pub use crop::Crop; pub use crop::Croppable; mod crop; mod render; pub mod tick_fmt; pub mod util; ///The width of the svg tag. const WIDTH: f64 = 800.0; ///The height of the svg tag. const HEIGHT: f64 = 500.0; trait PlotTrait<X: PlotNum, Y: PlotNum> { fn write_name(&self, a: &mut dyn fmt::Write) -> fmt::Result; fn iter_first(&mut self) -> &mut dyn Iterator<Item = (X, Y)>; fn iter_second(&mut self) -> &mut dyn Iterator<Item = (X, Y)>; } use std::marker::PhantomData; use fmt::Display; struct PlotStruct<X: PlotNum, Y: PlotNum, I: Iterator<Item = (X, Y)> + Clone, F: Display> { first: I, second: I, func: F, _p: PhantomData<(X, Y)>, } impl<X: PlotNum, Y: PlotNum, I: Iterator<Item = (X, Y)> + Clone, F: Display> PlotStruct<X, Y, I, F> { fn new(it: I, func: F) -> Self { let it2 = it.clone(); PlotStruct { first: it, second: it2, func, _p: PhantomData, } } } impl<X: PlotNum, Y: PlotNum, D: Iterator<Item = (X, Y)> + Clone, F: Display> PlotTrait<X, Y> for PlotStruct<X, Y, D, F> { fn write_name(&self, a: &mut dyn fmt::Write) -> fmt::Result { write!(a, "{}", self.func) } fn iter_first(&mut self) -> &mut dyn Iterator<Item = (X, Y)> { &mut self.first } fn iter_second(&mut self) -> &mut dyn Iterator<Item = (X, Y)> { &mut self.second } } enum PlotType { Scatter, Line, Histo, LineFill, LineFillRaw } struct Plot<'a, X: PlotNum, Y: PlotNum> { plot_type: PlotType, plots: Box<dyn PlotTrait<X, Y> + 'a>, } /// /// Default SVG Header for a Poloto graph. /// pub const SVG_HEADER: &str = r##"<svg class="poloto" width="800" height="500" viewBox="0 0 800 500" xmlns="http://www.w3.org/2000/svg">"##; /// /// Default SVG end tag. /// pub const SVG_END: &str = "</svg>"; /// Default light theme pub const STYLE_CONFIG_LIGHT_DEFAULT: &str = ".poloto { \ stroke-linecap:round; \ stroke-linejoin:round; \ font-family: 'Tahoma', sans-serif; \ background-color: AliceBlue;\ } \ .poloto_scatter{stroke-width:7} \ .poloto_line{stroke-width:2} \ .poloto_text{fill: black;} \ .poloto_axis_lines{stroke: black;stroke-width:3;fill:none;stroke-dasharray:none} \ .poloto0stroke{stroke: blue;} \ .poloto1stroke{stroke: red;} \ .poloto2stroke{stroke: green;} \ .poloto3stroke{stroke: gold;} \ .poloto4stroke{stroke: aqua;} \ .poloto5stroke{stroke: lime;} \ .poloto6stroke{stroke: orange;} \ .poloto7stroke{stroke: chocolate;} \ .poloto0fill{fill:blue;} \ .poloto1fill{fill:red;} \ .poloto2fill{fill:green;} \ .poloto3fill{fill:gold;} \ .poloto4fill{fill:aqua;} \ .poloto5fill{fill:lime;} \ .poloto6fill{fill:orange;} \ .poloto7fill{fill:chocolate;}"; /// Default dark theme pub const STYLE_CONFIG_DARK_DEFAULT: &str = ".poloto { \ stroke-linecap:round; \ stroke-linejoin:round; \ font-family: 'Tahoma', sans-serif; \ background-color: #262626;\ } \ .poloto_scatter{stroke-width:7} \ .poloto_line{stroke-width:2} \ .poloto_text{fill: white;} \ .poloto_axis_lines{stroke: white;stroke-width:3;fill:none;stroke-dasharray:none} \ .poloto0stroke{stroke: blue;} \ .poloto1stroke{stroke: red;} \ .poloto2stroke{stroke: green;} \ .poloto3stroke{stroke: gold;} \ .poloto4stroke{stroke: aqua;} \ .poloto5stroke{stroke: lime;} \ .poloto6stroke{stroke: orange;} \ .poloto7stroke{stroke: chocolate;} \ .poloto0fill{fill:blue;} \ .poloto1fill{fill:red;} \ .poloto2fill{fill:green;} \ .poloto3fill{fill:gold;} \ .poloto4fill{fill:aqua;} \ .poloto5fill{fill:lime;} \ .poloto6fill{fill:orange;} \ .poloto7fill{fill:chocolate;}"; /* /// The demsions of the svg graph `[800,500]`. pub const DIMENSIONS: [usize; 2] = [800, 500]; */ /// Iterators that are passed to the [`Plotter`] plot functions must produce /// items that implement this trait. pub trait Plottable<X: PlotNum, Y: PlotNum> { /// Produce one plot fn make_plot(self) -> (X, Y); } impl<T: PlotNum> Plottable<T, T> for [T; 2] { fn make_plot(self) -> (T, T) { let [x, y] = self; (x, y) } } impl<T: PlotNum> Plottable<T, T> for &[T; 2] { fn make_plot(self) -> (T, T) { let [x, y] = *self; (x, y) } } impl<A: PlotNum, B: PlotNum> Plottable<A, B> for (A, B) { fn make_plot(self) -> (A, B) { self } } impl<A: PlotNum, B: PlotNum> Plottable<A, B> for &(A, B) { fn make_plot(self) -> (A, B) { *self } } /// /// Create a Plotter /// pub fn plot<'a, X: PlotNum, Y: PlotNum>( title: impl Display + 'a, xname: impl Display + 'a, yname: impl Display + 'a, ) -> Plotter<'a, X, Y> { Plotter::new(title, xname, yname) } trait MyFmt<X: PlotNum> { fn write( &self, formatter: &mut std::fmt::Formatter, value: X, step: Option<X>, ) -> std::fmt::Result; } struct Foo<A, X>(A, PhantomData<X>); impl<X: PlotNum, A: Fn(&mut std::fmt::Formatter, X, Option<X>) -> std::fmt::Result> Foo<A, X> { fn new(a: A) -> Foo<A, X> { Foo(a, PhantomData) } } impl<X: PlotNum, A: Fn(&mut std::fmt::Formatter, X, Option<X>) -> std::fmt::Result> MyFmt<X> for Foo<A, X> { fn write( &self, formatter: &mut std::fmt::Formatter, value: X, step: Option<X>, ) -> std::fmt::Result { (self.0)(formatter, value, step) } } /// Keeps track of plots. /// User supplies iterators that will be iterated on when /// render is called. /// /// * The svg element belongs to the `poloto` css class. /// * The title,xname,yname,legend text SVG elements belong to the `poloto_text` class. /// * The axis line SVG elements belong to the `poloto_axis_lines` class. /// * The background belongs to the `poloto_background` class. /// pub struct Plotter<'a, X: PlotNum + 'a, Y: PlotNum + 'a> { title: Box<dyn fmt::Display + 'a>, xname: Box<dyn fmt::Display + 'a>, yname: Box<dyn fmt::Display + 'a>, plots: Vec<Plot<'a, X, Y>>, xmarkers: Vec<X>, ymarkers: Vec<Y>, num_css_classes: Option<usize>, preserve_aspect: bool, xtick_fmt: Box<dyn MyFmt<X> + 'a>, ytick_fmt: Box<dyn MyFmt<Y> + 'a>, } impl<'a, X: PlotNum, Y: PlotNum> Plotter<'a, X, Y> { /// /// Create a plotter with the specified element. /// /// ``` /// let mut p = poloto::Plotter::new("title", "x", "y"); /// p.line("",[[1,1]]); /// ``` pub fn new( title: impl Display + 'a, xname: impl Display + 'a, yname: impl Display + 'a, ) -> Plotter<'a, X, Y> { Plotter { title: Box::new(title), xname: Box::new(xname), yname: Box::new(yname), plots: Vec::new(), xmarkers: Vec::new(), ymarkers: Vec::new(), num_css_classes: Some(8), preserve_aspect: false, xtick_fmt: Box::new(Foo::<_, X>::new(move |a, b, c| b.fmt_tick(a, c))), ytick_fmt: Box::new(Foo::<_, Y>::new(move |a, b, c| b.fmt_tick(a, c))), } } /// Create a line from plots using a SVG polyline element. /// The element belongs to the `.poloto[N]stroke` css class. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// ``` pub fn line<I>(&mut self, name: impl Display + 'a, plots: I) -> &mut Self where I: IntoIterator, I::IntoIter: Clone + 'a, I::Item: Plottable<X, Y>, { self.plots.push(Plot { plot_type: PlotType::Line, plots: Box::new(PlotStruct::new( plots.into_iter().map(|x| x.make_plot()), name, )), }); self } /// Create a line from plots that will be filled underneath using a SVG path element. /// The path element belongs to the `.poloto[N]fill` css class. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line_fill("", &data); /// ``` pub fn line_fill<I>(&mut self, name: impl Display + 'a, plots: I) -> &mut Self where I: IntoIterator, I::IntoIter: Clone + 'a, I::Item: Plottable<X, Y>, { self.plots.push(Plot { plot_type: PlotType::LineFill, plots: Box::new(PlotStruct::new( plots.into_iter().map(|x| x.make_plot()), name, )), }); self } /// Create a line from plots that will be filled using a SVG path element. /// The first and last points will be connected and then filled in. /// The path element belongs to the `.poloto[N]fill` css class. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line_fill_raw("", &data); /// ``` pub fn line_fill_raw<I>(&mut self, name: impl Display + 'a, plots: I) -> &mut Self where I: IntoIterator, I::IntoIter: Clone + 'a, I::Item: Plottable<X, Y>, { self.plots.push(Plot { plot_type: PlotType::LineFillRaw, plots: Box::new(PlotStruct::new( plots.into_iter().map(|x| x.make_plot()), name, )), }); self } /// Create a scatter plot from plots, using a SVG path with lines with zero length. /// Each point can be sized using the stroke width. /// The path belongs to the CSS classes `poloto_scatter` and `.poloto[N]stroke` css class /// with the latter class overriding the former. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.scatter("", &data); /// ``` pub fn scatter<I>(&mut self, name: impl Display + 'a, plots: I) -> &mut Self where I: IntoIterator, I::IntoIter: Clone + 'a, I::Item: Plottable<X, Y>, { self.plots.push(Plot { plot_type: PlotType::Scatter, plots: Box::new(PlotStruct::new( plots.into_iter().map(|x| x.make_plot()), name, )), }); self } /// Create a histogram from plots using SVG rect elements. /// Each bar's left side will line up with a point. /// Each rect element belongs to the `.poloto[N]fill` css class. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.histogram("", &data); /// ``` pub fn histogram<I>(&mut self, name: impl Display + 'a, plots: I) -> &mut Self where I: IntoIterator, I::IntoIter: Clone + 'a, I::Item: Plottable<X, Y>, { self.plots.push(Plot { plot_type: PlotType::Histo, plots: Box::new(PlotStruct::new( plots.into_iter().map(|x| x.make_plot()), name, )), }); self } /// Add x values that the scaled graph must fit. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// /// // Include origin in the graph. /// plotter.xmarker(0.0).ymarker(0.0); /// ``` pub fn xmarker(&mut self, marker: X) -> &mut Self { self.xmarkers.push(marker); self } /// Add y values that the scaled graph must fit. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// /// // Include origin in the graph. /// plotter.xmarker(0.0).ymarker(0.0); /// ``` pub fn ymarker(&mut self, marker: Y) -> &mut Self { self.ymarkers.push(marker); self } /// /// Preserve the aspect ratio by drawing a smaller graph in the same area. /// pub fn preserve_aspect(&mut self) -> &mut Self { self.preserve_aspect = true; self } /// /// The number of distinct css classes. If there are more plots than /// classes, then they will wrap around. The default value is 8. /// /// A value of None, means it will never wrap around. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// plotter.num_css_class(Some(30)); /// ``` /// pub fn num_css_class(&mut self, a: Option<usize>) -> &mut Self { self.num_css_classes = a; self } /// /// Move a plotter out from behind a mutable reference leaving /// an empty plotter. /// pub fn move_into(&mut self) -> Plotter<'a, X, Y> { let mut empty = crate::Plotter::new("", "", ""); core::mem::swap(&mut empty, self); empty } /// /// Overrides the [`PlotNum::fmt_tick`] function. /// The callback function provided will get called on each /// interval tick to be drawn. The callback function is passed /// the value of the interval, as well as the step-size. /// This function is also called to display `k` and `j` values /// for when the magnitudes of of the plots are extreme. /// In those cases, the step-size is not provided and `None` is passed. /// pub fn xinterval_fmt( &mut self, a: impl Fn(&mut std::fmt::Formatter, X, Option<X>) -> std::fmt::Result + 'a, ) -> &mut Self { self.xtick_fmt = Box::new(Foo::new(a)); self } /// /// Overrides the [`PlotNum::fmt_tick`] function. /// The callback function provided will get called on each /// interval tick to be drawn. The callback function is passed /// the value of the interval, as well as the step-size. /// This function is also called to display `k` and `j` values /// for when the magnitudes of of the plots are extreme. /// In those cases, the step-size is not provided and `None` is passed. /// pub fn yinterval_fmt( &mut self, a: impl Fn(&mut std::fmt::Formatter, Y, Option<Y>) -> std::fmt::Result + 'a, ) -> &mut Self { self.ytick_fmt = Box::new(Foo::new(a)); self } /// /// Use the plot iterators to write out the graph elements. /// Does not add a svg tag, or any styling elements. /// Use this if you want to embed a svg into your html. /// You will just have to add your own svg sag and then supply styling. /// /// Panics if the render fails. /// /// In order to meet a more flexible builder pattern, instead of consuming the Plotter, /// this function will mutable borrow the Plotter and leave it with empty data. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// let mut k=String::new(); /// plotter.render(&mut k); /// ``` pub fn render<T: std::fmt::Write>(&mut self, a: T) -> fmt::Result { render::render(self, a) } } /// /// Make a graph with a svg tag and a simple css theme. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// let mut k=String::new(); /// poloto::simple_theme(&mut k,plotter); /// ``` pub fn simple_theme<T: std::fmt::Write, X: PlotNum, Y: PlotNum>( mut a: T, mut p: Plotter<X, Y>, ) -> std::fmt::Result { write!( &mut a, "{}<style>{}</style>{}{}", SVG_HEADER, STYLE_CONFIG_LIGHT_DEFAULT, disp(|a| p.render(a)), SVG_END ) } /// /// Make a graph with a svg tag and a simple dark css theme. /// /// ``` /// let data = [[1.0,4.0], [2.0,5.0], [3.0,6.0]]; /// let mut plotter = poloto::plot("title", "x", "y"); /// plotter.line("", &data); /// let mut k=String::new(); /// poloto::simple_theme_dark(&mut k,plotter); /// ``` pub fn simple_theme_dark<T: std::fmt::Write, X: PlotNum, Y: PlotNum>( mut a: T, mut p: Plotter<X, Y>, ) -> std::fmt::Result { write!( &mut a, "{}<style>{}</style>{}{}", SVG_HEADER, STYLE_CONFIG_DARK_DEFAULT, disp(|a| p.render(a)), SVG_END ) } /// Shorthand for `moveable_format(move |w|write!(w,...))` /// Similar to `format_args!()` except has a more flexible lifetime. #[macro_export] macro_rules! formatm { ($($arg:tt)*) => { $crate::DisplayableClosure::new(move |w| write!(w,$($arg)*)) } } /// Convert a moved closure into a impl fmt::Display. /// This is useful because std's `format_args!()` macro /// has a shorter lifetime. pub struct DisplayableClosure<F>(pub F); impl<F: Fn(&mut fmt::Formatter) -> fmt::Result> DisplayableClosure<F> { pub fn new(a: F) -> Self { DisplayableClosure(a) } } impl<F: Fn(&mut fmt::Formatter) -> fmt::Result> fmt::Display for DisplayableClosure<F> { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { (self.0)(formatter) } } /// /// Leverage rust's display format system using `RefCell` under the hood. /// pub fn disp<F: FnOnce(&mut fmt::Formatter) -> fmt::Result>(a: F) -> DisplayableClosureOnce<F> { DisplayableClosureOnce::new(a) } use std::cell::RefCell; /// /// Wrap a mutable closure in a `RefCell` to allow it to be called inside of `fmt::Display::fmt` /// pub struct DisplayableClosureOnce<F>(pub RefCell<Option<F>>); impl<F: FnOnce(&mut fmt::Formatter) -> fmt::Result> DisplayableClosureOnce<F> { pub fn new(a: F) -> Self { DisplayableClosureOnce(RefCell::new(Some(a))) } } impl<F: FnOnce(&mut fmt::Formatter) -> fmt::Result> fmt::Display for DisplayableClosureOnce<F> { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { if let Some(f) = (self.0.borrow_mut()).take() { (f)(formatter) } else { Ok(()) } } } /// /// A disconnectable number. A number that can me marked as a hole to signify that there is a disconnect in plots. /// See [`Croppable`] /// pub trait DiscNum: PlotNum { /// Create a hole value. fn hole() -> Self; } /// /// A plottable number. In order to be able to plot a number, we need information on how /// to display it as well as the interval ticks. /// pub trait PlotNum: PartialOrd + Copy + std::fmt::Display { /// Is this a hole value to inject discontinuty? fn is_hole(&self) -> bool { false } /// /// Given an ideal number of intervals across the min and max values, /// Calculate information related to where the interval ticks should go. /// fn compute_ticks(ideal_num_steps: u32, range: [Self; 2]) -> TickInfo<Self>; /// If there is only one point in a graph, or no point at all, /// the range to display in the graph. fn unit_range(offset: Option<Self>) -> [Self; 2]; /// Provided a min and max range, scale the current value against max. fn scale(&self, val: [Self; 2], max: f64) -> f64; /// Used to display a tick /// Before overriding this, consider using [`crate::Plotter::xinterval_fmt`] and [`crate::Plotter::yinterval_fmt`]. fn fmt_tick( &self, formatter: &mut std::fmt::Formatter, _step: Option<Self>, ) -> std::fmt::Result { write!(formatter, "{}", self) } /// Compute the exact size of the ticks. /// This can be overridden such that it always returns `None`. /// This way there will be no dashes, and it will just be a solid line. fn dash_size( ideal_dash_size: f64, tick_info: &TickInfo<Self>, range: [Self; 2], max: f64, ) -> Option<f64>; } /// /// One interval tick /// #[derive(Debug, Clone)] pub struct Tick<I> { pub position: I, /// If [`TickInfo::display_relative`] is `None`, then this has the same value as [`Tick::position`] pub value: I, } /// /// Information on the properties of all the interval ticks for one dimension. /// #[derive(Debug, Clone)] pub struct TickInfo<I> { /// List of the position of each tick to be displayed. pub ticks: Vec<Tick<I>>, /// The difference between two adjacent ticks pub step: I, /// The starting tick position pub start_step: I, /// The number of dashes between two ticks must be a multiple of this number. pub dash_multiple: u32, /// If we want to display the tick values relatively, this will /// have the base start to start with. pub display_relative: Option<I>, } impl<I> TickInfo<I> { pub fn map<J>(self, func: impl Fn(I) -> J) -> TickInfo<J> { TickInfo { ticks: self .ticks .into_iter() .map(|x| Tick { position: func(x.position), value: func(x.value), }) .collect(), step: func(self.step), start_step: func(self.start_step), dash_multiple: self.dash_multiple, display_relative: self.display_relative.map(|x| func(x)), } } }
29.943243
139
0.561784
cc2364a56bc554a5aebf66fc7daa0ca06ff4236b
5,042
/*! PE headers. */ use std::slice; use std::ops::Range; use super::Pe; use super::image::*; /// Describes the PE headers. #[derive(Copy, Clone)] pub struct Headers<P> { pe: P, } impl<'a, P: Pe<'a>> Headers<P> { pub(crate) fn new(pe: P) -> Headers<P> { Headers { pe } } /// Gets the PE instance. pub fn pe(&self) -> P { self.pe } /// Gets the PE headers as a byte slice. pub fn image(&self) -> &'a [u8] { unsafe { self.pe.image().get_unchecked(..self.pe.optional_header().SizeOfHeaders as usize) } } /// Calculates the optional header's CheckSum. pub fn check_sum(&self) -> u32 { let image = self.pe.image(); let check_sum_position = (self.pe.dos_header().e_lfanew as usize + offset_of!(IMAGE_NT_HEADERS, OptionalHeader.CheckSum)) / 4; let dwords = unsafe { slice::from_raw_parts(image.as_ptr() as *const u32, image.len() / 4) }; let mut check_sum = 0u64; for i in 0..dwords.len() { if i == check_sum_position { continue; } let dw = dwords[i]; check_sum = (check_sum & 0xffffffff) + dw as u64 + (check_sum >> 32); if check_sum > 0xffffffff { check_sum = (check_sum & 0xffffffff) + (check_sum >> 32); } } check_sum = (check_sum & 0xffff) + (check_sum >> 16); check_sum = check_sum + (check_sum >> 16); check_sum = check_sum & 0xffff; check_sum += image.len() as u64; check_sum as u32 } /// Gets the code range from the optional header. pub fn code_range(&self) -> Range<Rva> { let optional_header = self.pe.optional_header(); optional_header.BaseOfCode..u32::wrapping_add(optional_header.BaseOfCode, optional_header.SizeOfCode) } /// Gets the full image range excluding the PE headers. pub fn image_range(&self) -> Range<Rva> { let optional_header = self.pe.optional_header(); optional_header.SizeOfHeaders..optional_header.SizeOfImage } } /* "headers": { "DosHeader": { .. } "NtHeaders": { "Signature": .., "FileHeader": { .. } "OptionalHeader": { .. } } "DataDirectory": [ .. ] "SectionHeaders": [ .. ] "details": { "DosHeader.e_magic": "MZ", "NtHeaders.Signature": "PE", "FileHeader.Machine": "AMD", "FileHeader.Characteristics": [], "OptionalHeader.Magic": "PE32+", "DataDirectory.Names": ["Exports", "Imports", ..], "DataDirectory.Sections": [1, 1, ..], "SectionHeaders.Characteristics": [["executable", "read", "write"], ["read"], ["read", "write"]], } } */ #[cfg(feature = "serde")] mod serde { use crate::util::serde_helper::*; use crate::stringify; use super::{Pe, Headers}; impl<'a, P: Pe<'a>> Serialize for Headers<P> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("Headers", 5)?; state.serialize_field("DosHeader", self.pe.dos_header())?; state.serialize_field("NtHeaders", self.pe.nt_headers())?; state.serialize_field("DataDirectory", self.pe.data_directory())?; state.serialize_field("SectionHeaders", self.pe.section_headers())?; state.serialize_field("details", &Details { pe: self.pe })?; state.end() } } struct Details<P> { pe: P } impl<'a, P: Pe<'a>> Serialize for Details<P> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("Details", 11)?; state.serialize_field("DosHeader.e_magic", "MZ")?; state.serialize_field("NtHeaders.Signature", "PE")?; let file_header = self.pe.file_header(); state.serialize_field("FileHeader.Machine", &stringify::Machine(file_header.Machine).to_str())?; state.serialize_field("FileHeader.Characteristics", &SerdeIter(stringify::FileChars(file_header.Characteristics).to_strs()))?; let optional_header = self.pe.optional_header(); state.serialize_field("OptionalHeader.Magic", &stringify::OptionalMagic(optional_header.Magic).to_str())?; state.serialize_field("OptionalHeader.CheckSum", &Headers { pe: self.pe }.check_sum())?; state.serialize_field("OptionalHeader.Subsystem", &stringify::Subsystem(optional_header.Subsystem).to_str())?; state.serialize_field("OptionalHeader.DllCharacteristics", &SerdeIter(stringify::DllChars(optional_header.DllCharacteristics).to_strs()))?; let data_directory_names = (0..self.pe.data_directory().len()).map(stringify::DirectoryEntry).map(stringify::DirectoryEntry::to_str); state.serialize_field("DataDirectory.Names", &SerdeIter(data_directory_names))?; let data_directory_sects = self.pe.data_directory().iter().map(|dd| { self.pe.section_headers().iter().position(|&sect| dd.VirtualAddress >= sect.VirtualAddress && dd.VirtualAddress < sect.VirtualAddress + sect.VirtualSize) }); state.serialize_field("DataDirectory.Sections", &SerdeIter(data_directory_sects))?; let sections_chars = self.pe.section_headers().iter().map(|sect| { let section_chars = sect.Characteristics; SerdeIter(stringify::SectionChars(section_chars).to_strs()) }); state.serialize_field("SectionHeaders.Characteristics", &SerdeIter(sections_chars))?; state.end() } } }
35.258741
157
0.684054
72519f09f3f932c6474c38ffc063d7ead21262f8
93,469
// Copyright 2016 Mozilla Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::compiler::{Cacheable, ColorMode, Compiler, CompilerArguments, CompileCommand, CompilerHasher, CompilerKind, Compilation, HashResult}; #[cfg(feature = "dist-client")] use crate::compiler::OutputsRewriter; use crate::compiler::args::*; use crate::dist; #[cfg(feature = "dist-client")] use crate::dist::pkg; use futures::Future; use futures_cpupool::CpuPool; use log::Level::Trace; #[cfg(feature = "dist-client")] use lru_disk_cache::{LruCache,Meter}; use crate::mock_command::{CommandCreatorSync, RunCommand}; #[cfg(feature = "dist-client")] #[cfg(feature = "dist-client")] use std::borrow::Borrow; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; #[cfg(feature = "dist-client")] use std::collections::hash_map::RandomState; #[cfg(feature = "dist-client")] use std::env::consts::{DLL_PREFIX, EXE_EXTENSION}; use std::env::consts::DLL_EXTENSION; use std::ffi::OsString; use std::fmt; use std::fs; use std::hash::Hash; #[cfg(feature = "dist-client")] use std::io; use std::io::Read; use std::iter; use std::path::{Path, PathBuf}; use std::process; #[cfg(feature = "dist-client")] use std::sync::{Arc, Mutex}; use std::time; use tempdir::TempDir; use crate::util::{fmt_duration_as_secs, run_input_output, Digest, hash_all}; use crate::util::{HashToDigest, OsStrExt, ref_env}; use crate::errors::*; /// Can dylibs (like proc macros) be distributed on this platform? #[cfg(all(feature = "dist-client", target_os = "linux", target_arch = "x86_64"))] const CAN_DIST_DYLIBS: bool = true; #[cfg(all(feature = "dist-client", not(all(target_os = "linux", target_arch = "x86_64"))))] const CAN_DIST_DYLIBS: bool = false; #[cfg(feature = "dist-client")] const RLIB_PREFIX: &str = "lib"; #[cfg(feature = "dist-client")] const RLIB_EXTENSION: &str = "rlib"; /// Directory in the sysroot containing binary to which rustc is linked. #[cfg(feature = "dist-client")] const BINS_DIR: &str = "bin"; /// Directory in the sysroot containing shared libraries to which rustc is linked. #[cfg(not(windows))] const LIBS_DIR: &str = "lib"; /// Directory in the sysroot containing shared libraries to which rustc is linked. #[cfg(windows)] const LIBS_DIR: &str = "bin"; /// A struct on which to hang a `Compiler` impl. #[derive(Debug, Clone)] pub struct Rust { /// The path to the rustc executable. executable: PathBuf, /// The host triple for this rustc. host: String, /// The path to the rustc sysroot. sysroot: PathBuf, /// The SHA-1 digests of all the shared libraries in rustc's $sysroot/lib (or /bin on Windows). compiler_shlibs_digests: Vec<String>, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option<Arc<RlibDepReader>>, } /// A struct on which to hang a `CompilerHasher` impl. #[derive(Debug, Clone)] pub struct RustHasher { /// The path to the rustc executable. executable: PathBuf, /// The host triple for this rustc. host: String, /// The path to the rustc sysroot. sysroot: PathBuf, /// The SHA-1 digests of all the shared libraries in rustc's $sysroot/lib (or /bin on Windows). compiler_shlibs_digests: Vec<String>, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option<Arc<RlibDepReader>>, /// Parsed arguments from the rustc invocation parsed_args: ParsedArguments, } #[derive(Debug, Clone, PartialEq)] pub struct ParsedArguments { /// The full commandline, with all parsed aguments arguments: Vec<Argument<ArgData>>, /// The location of compiler outputs. output_dir: PathBuf, /// Paths to extern crates used in the compile. externs: Vec<PathBuf>, /// The directories searched for rlibs crate_link_paths: Vec<PathBuf>, /// Static libraries linked to in the compile. staticlibs: Vec<PathBuf>, /// The crate name passed to --crate-name. crate_name: String, /// The crate types that will be generated crate_types: CrateTypes, /// If dependency info is being emitted, the name of the dep info file. dep_info: Option<PathBuf>, /// rustc says that emits .rlib for --emit=metadata /// https://github.com/rust-lang/rust/issues/54852 emit: HashSet<String>, /// The value of any `--color` option passed on the commandline. color_mode: ColorMode, /// Whether `--json` was passed to this invocation. has_json: bool, } /// A struct on which to hang a `Compilation` impl. #[derive(Debug, Clone)] pub struct RustCompilation { /// The path to the rustc executable. executable: PathBuf, /// The host triple for this rustc. host: String, /// The sysroot for this rustc sysroot: PathBuf, /// A shared, caching reader for rlib dependencies #[cfg(feature = "dist-client")] rlib_dep_reader: Option<Arc<RlibDepReader>>, /// All arguments passed to rustc arguments: Vec<Argument<ArgData>>, /// The compiler inputs. inputs: Vec<PathBuf>, /// The compiler outputs. outputs: HashMap<String, PathBuf>, /// The directories searched for rlibs crate_link_paths: Vec<PathBuf>, /// The crate name being compiled. crate_name: String, /// The crate types that will be generated crate_types: CrateTypes, /// If dependency info is being emitted, the name of the dep info file. dep_info: Option<PathBuf>, /// The current working directory cwd: PathBuf, /// The environment variables env_vars: Vec<(OsString, OsString)>, } // The selection of crate types for this compilation #[derive(Debug, Clone, PartialEq)] pub struct CrateTypes { rlib: bool, staticlib: bool, } lazy_static! { /// Emit types that we will cache. static ref ALLOWED_EMIT: HashSet<&'static str> = [ "link", "metadata", "dep-info", ].iter().map(|s| *s).collect(); } /// Version number for cache key. const CACHE_VERSION: &[u8] = b"4"; /// Get absolute paths for all source files listed in rustc's dep-info output. fn get_source_files<T>(creator: &T, crate_name: &str, executable: &Path, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)], pool: &CpuPool) -> SFuture<Vec<PathBuf>> where T: CommandCreatorSync, { let start = time::Instant::now(); // Get the full list of source files from rustc's dep-info. let temp_dir = ftry!(TempDir::new("sccache").chain_err(|| "Failed to create temp dir")); let dep_file = temp_dir.path().join("deps.d"); let mut cmd = creator.clone().new_command_sync(executable); cmd.args(&arguments) .args(&["--emit", "dep-info"]) .arg("-o") .arg(&dep_file) .env_clear() .envs(ref_env(env_vars)) .current_dir(cwd); trace!("[{}]: get dep-info: {:?}", crate_name, cmd); let dep_info = run_input_output(cmd, None); // Parse the dep-info file, then hash the contents of those files. let pool = pool.clone(); let cwd = cwd.to_owned(); let crate_name = crate_name.to_owned(); Box::new(dep_info.and_then(move |_| -> SFuture<_> { let name2 = crate_name.clone(); let parsed = pool.spawn_fn(move || { parse_dep_file(&dep_file, &cwd).chain_err(|| { format!("Failed to parse dep info for {}", name2) }) }); Box::new(parsed.map(move |files| { trace!("[{}]: got {} source files from dep-info in {}", crate_name, files.len(), fmt_duration_as_secs(&start.elapsed())); // Just to make sure we capture temp_dir. drop(temp_dir); files })) })) } /// Parse dependency info from `file` and return a Vec of files mentioned. /// Treat paths as relative to `cwd`. fn parse_dep_file<T, U>(file: T, cwd: U) -> Result<Vec<PathBuf>> where T: AsRef<Path>, U: AsRef<Path>, { let mut f = fs::File::open(file)?; let mut deps = String::new(); f.read_to_string(&mut deps)?; Ok(parse_dep_info(&deps, cwd)) } fn parse_dep_info<T>(dep_info: &str, cwd: T) -> Vec<PathBuf> where T: AsRef<Path> { let cwd = cwd.as_ref(); // Just parse the first line, which should have the dep-info file and all // source files. let line = match dep_info.lines().next() { None => return vec![], Some(l) => l, }; let pos = match line.find(": ") { None => return vec![], Some(p) => p, }; let mut deps = Vec::new(); let mut current_dep = String::new(); let mut iter = line[pos + 2..].chars().peekable(); loop { match iter.next() { Some('\\') => { if iter.peek() == Some(&' ') { current_dep.push(' '); iter.next(); } else { current_dep.push('\\'); } }, Some(' ') => { deps.push(current_dep); current_dep = String::new(); }, Some(c) => current_dep.push(c), None => { if !current_dep.is_empty() { deps.push(current_dep); } break }, } } let mut deps = deps.iter().map(|s| cwd.join(s)).collect::<Vec<_>>(); deps.sort(); deps } /// Run `rustc --print file-names` to get the outputs of compilation. fn get_compiler_outputs<T>(creator: &T, executable: &Path, arguments: &[OsString], cwd: &Path, env_vars: &[(OsString, OsString)]) -> SFuture<Vec<String>> where T: CommandCreatorSync, { let mut cmd = creator.clone().new_command_sync(executable); cmd.args(&arguments) .args(&["--print", "file-names"]) .env_clear() .envs(ref_env(env_vars)) .current_dir(cwd); if log_enabled!(Trace) { trace!("get_compiler_outputs: {:?}", cmd); } let outputs = run_input_output(cmd, None); Box::new(outputs.and_then(move |output| -> Result<_> { let outstr = String::from_utf8(output.stdout).chain_err(|| "Error parsing rustc output")?; if log_enabled!(Trace) { trace!("get_compiler_outputs: {:?}", outstr); } Ok(outstr.lines().map(|l| l.to_owned()).collect()) })) } impl Rust { /// Create a new Rust compiler instance, calculating the hashes of /// all the shared libraries in its sysroot. pub fn new<T>(mut creator: T, executable: PathBuf, env_vars: &[(OsString, OsString)], rustc_verbose_version: &str, pool: CpuPool) -> SFuture<Rust> where T: CommandCreatorSync, { // Taken from Cargo let host = ftry!(rustc_verbose_version .lines() .find(|l| l.starts_with("host: ")) .map(|l| &l[6..]) .ok_or_else(|| Error::from("rustc verbose version didn't have a line for `host:`"))) .to_string(); let mut cmd = creator.new_command_sync(&executable); cmd.stdout(process::Stdio::piped()) .stderr(process::Stdio::null()) .arg("--print=sysroot") .env_clear() .envs(ref_env(env_vars)); let output = run_input_output(cmd, None); let sysroot_and_libs = output.and_then(move |output| -> Result<_> { //debug!("output.and_then: {}", output); let outstr = String::from_utf8(output.stdout).chain_err(|| "Error parsing sysroot")?; let sysroot = PathBuf::from(outstr.trim_end()); let libs_path = sysroot.join(LIBS_DIR); let mut libs = fs::read_dir(&libs_path).chain_err(|| format!("Failed to list rustc sysroot: `{:?}`", libs_path))?.filter_map(|e| { e.ok().and_then(|e| { e.file_type().ok().and_then(|t| { let p = e.path(); if t.is_file() && p.extension().map(|e| e == DLL_EXTENSION).unwrap_or(false) { Some(p) } else { None } }) }) }).collect::<Vec<_>>(); libs.sort(); Ok((sysroot, libs)) }); #[cfg(feature = "dist-client")] let rlib_dep_reader = { let executable = executable.clone(); let env_vars = env_vars.to_owned(); pool.spawn_fn(move || Ok(RlibDepReader::new_with_check(executable, &env_vars))) }; #[cfg(feature = "dist-client")] return Box::new(sysroot_and_libs.join(rlib_dep_reader).and_then(move |((sysroot, libs), rlib_dep_reader)| { let rlib_dep_reader = match rlib_dep_reader { Ok(r) => Some(Arc::new(r)), Err(e) => { warn!("Failed to initialise RlibDepDecoder, distributed compiles will be inefficient: {}", e); None }, }; hash_all(&libs, &pool).map(move |digests| { Rust { executable: executable, host, sysroot, compiler_shlibs_digests: digests, rlib_dep_reader, } }) })); #[cfg(not(feature = "dist-client"))] return Box::new(sysroot_and_libs.and_then(move |(sysroot, libs)| { hash_all(&libs, &pool).map(move |digests| { Rust { executable: executable, host, sysroot, compiler_shlibs_digests: digests, } }) })); } } impl<T> Compiler<T> for Rust where T: CommandCreatorSync, { fn kind(&self) -> CompilerKind { CompilerKind::Rust } #[cfg(feature = "dist-client")] fn get_toolchain_packager(&self) -> Box<dyn pkg::ToolchainPackager> { Box::new(RustToolchainPackager { sysroot: self.sysroot.clone() }) } /// Parse `arguments` as rustc command-line arguments, determine if /// we can cache the result of compilation. This is only intended to /// cover a subset of rustc invocations, primarily focused on those /// that will occur when cargo invokes rustc. /// /// Caveats: /// * We don't support compilation from stdin. /// * We require --emit. /// * We only support `link` and `dep-info` in --emit (and don't support *just* 'dep-info') /// * We require `--out-dir`. /// * We don't support `-o file`. fn parse_arguments(&self, arguments: &[OsString], cwd: &Path) -> CompilerArguments<Box<dyn CompilerHasher<T> + 'static>> { match parse_arguments(arguments, cwd) { CompilerArguments::Ok(args) => { CompilerArguments::Ok(Box::new(RustHasher { executable: self.executable.clone(), host: self.host.clone(), sysroot: self.sysroot.clone(), compiler_shlibs_digests: self.compiler_shlibs_digests.clone(), #[cfg(feature = "dist-client")] rlib_dep_reader: self.rlib_dep_reader.clone(), parsed_args: args, })) } CompilerArguments::NotCompilation => CompilerArguments::NotCompilation, CompilerArguments::CannotCache(why, extra_info) => CompilerArguments::CannotCache(why, extra_info), } } fn box_clone(&self) -> Box<dyn Compiler<T>> { Box::new((*self).clone()) } } macro_rules! make_os_string { ($( $v:expr ),*) => {{ let mut s = OsString::new(); $( s.push($v); )* s }}; } #[derive(Clone, Debug, PartialEq)] struct ArgCrateTypes { rlib: bool, staticlib: bool, others: HashSet<String>, } impl FromArg for ArgCrateTypes { fn process(arg: OsString) -> ArgParseResult<Self> { let arg = String::process(arg)?; let mut crate_types = ArgCrateTypes { rlib: false, staticlib: false, others: HashSet::new(), }; for ty in arg.split(",") { match ty { // It is assumed that "lib" always refers to "rlib", which // is true right now but may not be in the future "lib" | "rlib" => crate_types.rlib = true, "staticlib" => crate_types.staticlib = true, other => { crate_types.others.insert(other.to_owned()); }, } } Ok(crate_types) } } impl IntoArg for ArgCrateTypes { fn into_arg_os_string(self) -> OsString { let ArgCrateTypes { rlib, staticlib, others } = self; let mut types: Vec<_> = others.iter().map(String::as_str) .chain(if rlib { Some("rlib") } else { None }) .chain(if staticlib { Some("staticlib") } else { None }).collect(); types.sort(); let types_string = types.join(","); types_string.into() } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgCrateTypes { rlib, staticlib, others } = self; let mut types: Vec<_> = others.iter().map(String::as_str) .chain(if rlib { Some("rlib") } else { None }) .chain(if staticlib { Some("staticlib") } else { None }).collect(); types.sort(); let types_string = types.join(","); Ok(types_string) } } #[derive(Clone, Debug, PartialEq)] struct ArgLinkLibrary { kind: String, name: String, } impl FromArg for ArgLinkLibrary { fn process(arg: OsString) -> ArgParseResult<Self> { let (kind, name) = match split_os_string_arg(arg, "=")? { (kind, Some(name)) => (kind, name), // If no kind is specified, the default is dylib. (name, None) => ("dylib".to_owned(), name), }; Ok(ArgLinkLibrary { kind, name }) } } impl IntoArg for ArgLinkLibrary { fn into_arg_os_string(self) -> OsString { let ArgLinkLibrary { kind, name } = self; make_os_string!(kind, "=", name) } fn into_arg_string(self, _transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgLinkLibrary { kind, name } = self; Ok(format!("{}={}", kind, name)) } } #[derive(Clone, Debug, PartialEq)] struct ArgLinkPath { kind: String, path: PathBuf, } impl FromArg for ArgLinkPath { fn process(arg: OsString) -> ArgParseResult<Self> { let (kind, path) = match split_os_string_arg(arg, "=")? { (kind, Some(path)) => (kind, path), // If no kind is specified, the path is used to search for all kinds (path, None) => ("all".to_owned(), path), }; Ok(ArgLinkPath { kind, path: path.into() }) } } impl IntoArg for ArgLinkPath { fn into_arg_os_string(self) -> OsString { let ArgLinkPath { kind, path } = self; make_os_string!(kind, "=", path) } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgLinkPath { kind, path } = self; Ok(format!("{}={}", kind, path.into_arg_string(transformer)?)) } } #[derive(Clone, Debug, PartialEq)] struct ArgCodegen { opt: String, value: Option<String>, } impl FromArg for ArgCodegen { fn process(arg: OsString) -> ArgParseResult<Self> { let (opt, value) = split_os_string_arg(arg, "=")?; Ok(ArgCodegen { opt, value }) } } impl IntoArg for ArgCodegen { fn into_arg_os_string(self) -> OsString { let ArgCodegen { opt, value } = self; if let Some(value) = value { make_os_string!(opt, "=", value) } else { make_os_string!(opt) } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgCodegen { opt, value } = self; Ok(if let Some(value) = value { format!("{}={}", opt, value.into_arg_string(transformer)?) } else { opt }) } } #[derive(Clone, Debug, PartialEq)] struct ArgExtern { name: String, path: PathBuf, } impl FromArg for ArgExtern { fn process(arg: OsString) -> ArgParseResult<Self> { if let (name, Some(path)) = split_os_string_arg(arg, "=")? { Ok(ArgExtern { name, path: path.into() }) } else { Err(ArgParseError::Other("no path for extern")) } } } impl IntoArg for ArgExtern { fn into_arg_os_string(self) -> OsString { let ArgExtern { name, path } = self; make_os_string!(name, "=", path) } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { let ArgExtern { name, path } = self; Ok(format!("{}={}", name, path.into_arg_string(transformer)?)) } } #[derive(Clone, Debug, PartialEq)] enum ArgTarget { Name(String), Path(PathBuf), Unsure(OsString), } impl FromArg for ArgTarget { fn process(arg: OsString) -> ArgParseResult<Self> { // Is it obviously a json file path? if Path::new(&arg).extension().map(|ext| ext == "json").unwrap_or(false) { return Ok(ArgTarget::Path(arg.into())) } // Time for clever detection - if we append .json (even if it's clearly // a directory, i.e. resulting in /my/dir/.json), does the path exist? let mut path = arg.clone(); path.push(".json"); if Path::new(&path).is_file() { // Unfortunately, we're now not sure what will happen without having // a list of all the built-in targets handy, as they don't get .json // auto-added for target json discovery return Ok(ArgTarget::Unsure(arg)) } // The file doesn't exist so it can't be a path, safe to assume it's a name Ok(ArgTarget::Name(arg.into_string().map_err(ArgParseError::InvalidUnicode)?)) } } impl IntoArg for ArgTarget { fn into_arg_os_string(self) -> OsString { match self { ArgTarget::Name(s) => s.into(), ArgTarget::Path(p) => p.into(), ArgTarget::Unsure(s) => s.into(), } } fn into_arg_string(self, transformer: PathTransformerFn<'_>) -> ArgToStringResult { Ok(match self { ArgTarget::Name(s) => s, ArgTarget::Path(p) => p.into_arg_string(transformer)?, ArgTarget::Unsure(s) => s.into_arg_string(transformer)?, }) } } ArgData!{ TooHardFlag, TooHard(OsString), TooHardPath(PathBuf), NotCompilationFlag, NotCompilation(OsString), LinkLibrary(ArgLinkLibrary), LinkPath(ArgLinkPath), Emit(String), Extern(ArgExtern), Color(String), Json(String), CrateName(String), CrateType(ArgCrateTypes), OutDir(PathBuf), CodeGen(ArgCodegen), PassThrough(OsString), Target(ArgTarget), } use self::ArgData::*; // These are taken from https://github.com/rust-lang/rust/blob/b671c32ddc8c36d50866428d83b7716233356721/src/librustc/session/config.rs#L1186 counted_array!(static ARGS: [ArgInfo<ArgData>; _] = [ flag!("-", TooHardFlag), take_arg!("--allow", OsString, CanBeSeparated('='), PassThrough), take_arg!("--cap-lints", OsString, CanBeSeparated('='), PassThrough), take_arg!("--cfg", OsString, CanBeSeparated('='), PassThrough), take_arg!("--codegen", ArgCodegen, CanBeSeparated('='), CodeGen), take_arg!("--color", String, CanBeSeparated('='), Color), take_arg!("--crate-name", String, CanBeSeparated('='), CrateName), take_arg!("--crate-type", ArgCrateTypes, CanBeSeparated('='), CrateType), take_arg!("--deny", OsString, CanBeSeparated('='), PassThrough), take_arg!("--emit", String, CanBeSeparated('='), Emit), take_arg!("--error-format", OsString, CanBeSeparated('='), PassThrough), take_arg!("--explain", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--extern", ArgExtern, CanBeSeparated('='), Extern), take_arg!("--forbid", OsString, CanBeSeparated('='), PassThrough), flag!("--help", NotCompilationFlag), take_arg!("--json", String, CanBeSeparated('='), Json), take_arg!("--out-dir", PathBuf, CanBeSeparated('='), OutDir), take_arg!("--pretty", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--print", OsString, CanBeSeparated('='), NotCompilation), take_arg!("--remap-path-prefix", OsString, CanBeSeparated('='), TooHard), take_arg!("--sysroot", PathBuf, CanBeSeparated('='), TooHardPath), take_arg!("--target", ArgTarget, CanBeSeparated('='), Target), take_arg!("--unpretty", OsString, CanBeSeparated('='), NotCompilation), flag!("--version", NotCompilationFlag), take_arg!("--warn", OsString, CanBeSeparated('='), PassThrough), take_arg!("-A", OsString, CanBeSeparated, PassThrough), take_arg!("-C", ArgCodegen, CanBeSeparated, CodeGen), take_arg!("-D", OsString, CanBeSeparated, PassThrough), take_arg!("-F", OsString, CanBeSeparated, PassThrough), take_arg!("-L", ArgLinkPath, CanBeSeparated, LinkPath), flag!("-V", NotCompilationFlag), take_arg!("-W", OsString, CanBeSeparated, PassThrough), take_arg!("-Z", OsString, CanBeSeparated, PassThrough), take_arg!("-l", ArgLinkLibrary, CanBeSeparated, LinkLibrary), take_arg!("-o", PathBuf, CanBeSeparated, TooHardPath), ]); fn parse_arguments(arguments: &[OsString], cwd: &Path) -> CompilerArguments<ParsedArguments> { let mut args = vec![]; let mut emit: Option<HashSet<String>> = None; let mut input = None; let mut output_dir = None; let mut crate_name = None; let mut crate_types = CrateTypes { rlib: false, staticlib: false }; let mut extra_filename = None; let mut externs = vec![]; let mut crate_link_paths = vec![]; let mut static_lib_names = vec![]; let mut static_link_paths: Vec<PathBuf> = vec![]; let mut color_mode = ColorMode::Auto; let mut has_json = false; for arg in ArgsIter::new(arguments.iter().map(|s| s.clone()), &ARGS[..]) { let arg = try_or_cannot_cache!(arg, "argument parse"); match arg.get_data() { Some(TooHardFlag) | Some(TooHard(_)) | Some(TooHardPath(_)) => { cannot_cache!(arg.flag_str().expect( "Can't be Argument::Raw/UnknownFlag", )) } Some(NotCompilationFlag) | Some(NotCompilation(_)) => return CompilerArguments::NotCompilation, Some(LinkLibrary(ArgLinkLibrary { kind, name })) => { if kind == "static" { static_lib_names.push(name.to_owned()) } }, Some(LinkPath(ArgLinkPath { kind, path })) => { // "crate" is not typically necessary as cargo will normally // emit explicit --extern arguments if kind == "crate" || kind == "dependency" || kind == "all" { crate_link_paths.push(cwd.join(path)) } if kind == "native" || kind == "all" { static_link_paths.push(cwd.join(path)) } }, Some(Emit(value)) => { if emit.is_some() { // We don't support passing --emit more than once. cannot_cache!("more than one --emit"); } emit = Some(value.split(",").map(str::to_owned).collect()) } Some(CrateType(ArgCrateTypes { rlib, staticlib, others })) => { // We can't cache non-rlib/staticlib crates, because rustc invokes the // system linker to link them, and we don't know about all the linker inputs. if !others.is_empty() { let others: Vec<&str> = others.iter().map(String::as_str).collect(); let others_string = others.join(","); cannot_cache!("crate-type", others_string) } crate_types.rlib |= rlib; crate_types.staticlib |= staticlib; } Some(CrateName(value)) => crate_name = Some(value.clone()), Some(OutDir(value)) => output_dir = Some(value.clone()), Some(Extern(ArgExtern { name: _, path })) => externs.push(path.clone()), Some(CodeGen(ArgCodegen { opt, value })) => { match (opt.as_ref(), value) { ("extra-filename", Some(value)) => extra_filename = Some(value.to_owned()), ("extra-filename", None) => cannot_cache!("extra-filename"), // Incremental compilation makes a mess of sccache's entire world // view. It produces additional compiler outputs that we don't cache, // and just letting rustc do its work in incremental mode is likely // to be faster than trying to fetch a result from cache anyway, so // don't bother caching compiles where it's enabled currently. // Longer-term we would like to figure out better integration between // sccache and rustc in the incremental scenario: // https://github.com/mozilla/sccache/issues/236 ("incremental", _) => cannot_cache!("incremental"), (_, _) => (), } } Some(Color(value)) => { // We'll just assume the last specified value wins. color_mode = match value.as_ref() { "always" => ColorMode::On, "never" => ColorMode::Off, _ => ColorMode::Auto, }; } Some(Json(_)) => { has_json = true; } Some(PassThrough(_)) => (), Some(Target(target)) => { match target { ArgTarget::Path(_) | ArgTarget::Unsure(_) => cannot_cache!("target"), ArgTarget::Name(_) => (), } } None => { match arg { Argument::Raw(ref val) => { if input.is_some() { // Can't cache compilations with multiple inputs. cannot_cache!("multiple input files"); } input = Some(val.clone()); } Argument::UnknownFlag(_) => {} _ => unreachable!(), } } } // We'll drop --color arguments, we're going to pass --color=always and the client will // strip colors if necessary. match arg.get_data() { Some(Color(_)) => {} _ => args.push(arg.normalize(NormalizedDisposition::Separated)), } } // Unwrap required values. macro_rules! req { ($x:ident) => { let $x = if let Some($x) = $x { $x } else { debug!("Can't cache compilation, missing `{}`", stringify!($x)); cannot_cache!(concat!("missing ", stringify!($x))); }; } }; // We don't actually save the input value, but there needs to be one. req!(input); drop(input); req!(output_dir); req!(emit); req!(crate_name); // We won't cache invocations that are not producing // binary output. if !emit.is_empty() && !emit.contains("link") && !emit.contains("metadata") { return CompilerArguments::NotCompilation; } // If it's not an rlib and not a staticlib then crate-type wasn't passed, // so it will usually be inferred as a binary, though the `#![crate_type` // annotation may dictate otherwise - either way, we don't know what to do. if let CrateTypes { rlib: false, staticlib: false } = crate_types { cannot_cache!("crate-type", "No crate-type passed".to_owned()) } // We won't cache invocations that are outputting anything but // linker output and dep-info. if emit.iter().any(|e| !ALLOWED_EMIT.contains(e.as_str())) { cannot_cache!("unsupported --emit"); } // Figure out the dep-info filename, if emitting dep-info. let dep_info = if emit.contains("dep-info") { let mut dep_info = crate_name.clone(); if let Some(extra_filename) = extra_filename { dep_info.push_str(&extra_filename[..]); } dep_info.push_str(".d"); Some(dep_info) } else { None }; // Locate all static libs specified on the commandline. let staticlibs = static_lib_names.into_iter().filter_map(|name| { for path in static_link_paths.iter() { for f in &[format_args!("lib{}.a", name), format_args!("{}.lib", name), format_args!("{}.a", name)] { let lib_path = path.join(fmt::format(*f)); if lib_path.exists() { return Some(lib_path); } } } // rustc will just error if there's a missing static library, so don't worry about // it too much. None }).collect(); // We'll figure out the source files and outputs later in // `generate_hash_key` where we can run rustc. // Cargo doesn't deterministically order --externs, and we need the hash inputs in a // deterministic order. externs.sort(); CompilerArguments::Ok(ParsedArguments { arguments: args, output_dir: output_dir.into(), crate_types, externs: externs, crate_link_paths, staticlibs: staticlibs, crate_name: crate_name.to_string(), dep_info: dep_info.map(|s| s.into()), emit, color_mode, has_json, }) } impl<T> CompilerHasher<T> for RustHasher where T: CommandCreatorSync, { fn generate_hash_key(self: Box<Self>, creator: &T, cwd: PathBuf, env_vars: Vec<(OsString, OsString)>, _may_dist: bool, pool: &CpuPool) -> SFuture<HashResult> { let me = *self; let RustHasher { executable, host, sysroot, compiler_shlibs_digests, #[cfg(feature = "dist-client")] rlib_dep_reader, parsed_args: ParsedArguments { arguments, output_dir, externs, crate_link_paths, staticlibs, crate_name, crate_types, dep_info, emit, color_mode: _, has_json, }, } = me; trace!("[{}]: generate_hash_key", crate_name); // TODO: this doesn't produce correct arguments if they should be concatenated - should use iter_os_strings let os_string_arguments: Vec<(OsString, Option<OsString>)> = arguments.iter() .map(|arg| (arg.to_os_string(), arg.get_data().cloned().map(IntoArg::into_arg_os_string))).collect(); // `filtered_arguments` omits --emit and --out-dir arguments. // It's used for invoking rustc with `--emit=dep-info` to get the list of // source files for this crate. let filtered_arguments = os_string_arguments.iter() .filter_map(|&(ref arg, ref val)| { if arg == "--emit" || arg == "--out-dir" { None } else { Some((arg, val)) } }) .flat_map(|(arg, val)| Some(arg).into_iter().chain(val)) .map(|a| a.clone()) .collect::<Vec<_>>(); // Find all the source files and hash them let source_hashes_pool = pool.clone(); let source_files = get_source_files(creator, &crate_name, &executable, &filtered_arguments, &cwd, &env_vars, pool); let source_files_and_hashes = source_files .and_then(move |source_files| { hash_all(&source_files, &source_hashes_pool).map(|source_hashes| (source_files, source_hashes)) }); // Hash the contents of the externs listed on the commandline. trace!("[{}]: hashing {} externs", crate_name, externs.len()); let abs_externs = externs.iter().map(|e| cwd.join(e)).collect::<Vec<_>>(); let extern_hashes = hash_all(&abs_externs, pool); // Hash the contents of the staticlibs listed on the commandline. trace!("[{}]: hashing {} staticlibs", crate_name, staticlibs.len()); let abs_staticlibs = staticlibs.iter().map(|s| cwd.join(s)).collect::<Vec<_>>(); let staticlib_hashes = hash_all(&abs_staticlibs, pool); let creator = creator.clone(); let hashes = source_files_and_hashes.join3(extern_hashes, staticlib_hashes); Box::new(hashes.and_then(move |((source_files, source_hashes), extern_hashes, staticlib_hashes)| -> SFuture<_> { // If you change any of the inputs to the hash, you should change `CACHE_VERSION`. let mut m = Digest::new(); // Hash inputs: // 1. A version m.update(CACHE_VERSION); // 2. compiler_shlibs_digests for d in compiler_shlibs_digests { m.update(d.as_bytes()); } let weak_toolchain_key = m.clone().finish(); // 3. The full commandline (self.arguments) // TODO: there will be full paths here, it would be nice to // normalize them so we can get cross-machine cache hits. // A few argument types are not passed in a deterministic order // by cargo: --extern, -L, --cfg. We'll filter those out, sort them, // and append them to the rest of the arguments. let args = { let (mut sortables, rest): (Vec<_>, Vec<_>) = os_string_arguments.iter() // We exclude a few arguments from the hash: // -L, --extern, --out-dir // These contain paths which aren't relevant to the output, and the compiler inputs // in those paths (rlibs and static libs used in the compilation) are used as hash // inputs below. .filter(|&&(ref arg, _)| { !(arg == "--extern" || arg == "-L" || arg == "--out-dir") }) // A few argument types were not passed in a deterministic order // by older versions of cargo: --extern, -L, --cfg. We'll filter the rest of those // out, sort them, and append them to the rest of the arguments. .partition(|&&(ref arg, _)| arg == "--cfg"); sortables.sort(); rest.into_iter() .chain(sortables) .flat_map(|&(ref arg, ref val)| { iter::once(arg).chain(val.as_ref()) }) .fold(OsString::new(), |mut a, b| { a.push(b); a }) }; args.hash(&mut HashToDigest { digest: &mut m }); // 4. The digest of all source files (this includes src file from cmdline). // 5. The digest of all files listed on the commandline (self.externs). // 6. The digest of all static libraries listed on the commandline (self.staticlibs). for h in source_hashes.into_iter().chain(extern_hashes).chain(staticlib_hashes) { m.update(h.as_bytes()); } // 7. Environment variables. Ideally we'd use anything referenced // via env! in the program, but we don't have a way to determine that // currently, and hashing all environment variables is too much, so // we'll just hash the CARGO_ env vars and hope that's sufficient. // Upstream Rust issue tracking getting information about env! usage: // https://github.com/rust-lang/rust/issues/40364 let mut env_vars: Vec<_> = env_vars.iter() // Filter out RUSTC_COLOR since we control color usage with command line flags. // rustc reports an error when both are present. .filter(|(ref k, _)| k != "RUSTC_COLOR") .cloned() .collect(); env_vars.sort(); for &(ref var, ref val) in env_vars.iter() { // CARGO_MAKEFLAGS will have jobserver info which is extremely non-cacheable. if var.starts_with("CARGO_") && var != "CARGO_MAKEFLAGS" { var.hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); val.hash(&mut HashToDigest { digest: &mut m }); } } // 8. The cwd of the compile. This will wind up in the rlib. cwd.hash(&mut HashToDigest { digest: &mut m }); // Turn arguments into a simple Vec<OsString> to calculate outputs. let flat_os_string_arguments: Vec<OsString> = os_string_arguments.into_iter() .flat_map(|(arg, val)| iter::once(arg).into_iter().chain(val)) .collect(); Box::new(get_compiler_outputs(&creator, &executable, &flat_os_string_arguments, &cwd, &env_vars).map(move |mut outputs| { if emit.contains("metadata") { // rustc currently does not report rmeta outputs with --print file-names // --emit metadata the rlib is printed, and with --emit metadata,link // only the rlib is printed. let rlibs: HashSet<_> = outputs.iter().cloned().filter(|p| { p.ends_with(".rlib") }).collect(); for lib in rlibs { let rmeta = lib.replacen(".rlib", ".rmeta", 1); // Do this defensively for future versions of rustc that may // be fixed. if !outputs.contains(&rmeta) { outputs.push(rmeta); } if !emit.contains("link") { outputs.retain(|p| *p != lib); } } } let output_dir = PathBuf::from(output_dir); // Convert output files into a map of basename -> full path. let mut outputs = outputs.into_iter() .map(|o| { let p = output_dir.join(&o); (o, p) }) .collect::<HashMap<_, _>>(); let dep_info = if let Some(dep_info) = dep_info { let p = output_dir.join(&dep_info); outputs.insert(dep_info.to_string_lossy().into_owned(), p.clone()); Some(p) } else { None }; let mut arguments = arguments; // Request color output unless json was requested. The client will strip colors if needed. if !has_json { arguments.push(Argument::WithValue("--color", ArgData::Color("always".into()), ArgDisposition::Separated)); } let inputs = source_files.into_iter().chain(abs_externs).chain(abs_staticlibs).collect(); HashResult { key: m.finish(), compilation: Box::new(RustCompilation { executable: executable, host, sysroot: sysroot, arguments: arguments, inputs: inputs, outputs: outputs, crate_link_paths, crate_name, crate_types, dep_info, cwd, env_vars, #[cfg(feature = "dist-client")] rlib_dep_reader, }), weak_toolchain_key, } })) })) } fn color_mode(&self) -> ColorMode { self.parsed_args.color_mode } fn output_pretty(&self) -> Cow<'_, str> { Cow::Borrowed(&self.parsed_args.crate_name) } fn box_clone(&self) -> Box<dyn CompilerHasher<T>> { Box::new((*self).clone()) } } impl Compilation for RustCompilation { fn generate_compile_commands(&self, path_transformer: &mut dist::PathTransformer) -> Result<(CompileCommand, Option<dist::CompileCommand>, Cacheable)> { let RustCompilation { ref executable, ref arguments, ref crate_name, ref cwd, ref env_vars, #[cfg(feature = "dist-client")] ref host, #[cfg(feature = "dist-client")] ref sysroot, .. } = *self; #[cfg(not(feature = "dist-client"))] let _ = path_transformer; trace!("[{}]: compile", crate_name); let command = CompileCommand { executable: executable.to_owned(), arguments: arguments.iter().flat_map(|arg| arg.iter_os_strings()).collect(), env_vars: env_vars.to_owned(), cwd: cwd.to_owned(), }; #[cfg(not(feature = "dist-client"))] let dist_command = None; #[cfg(feature = "dist-client")] let dist_command = (|| { macro_rules! try_string_arg { ($e:expr) => { match $e { Ok(s) => s, Err(e) => { debug!("Conversion failed for distributed compile argument: {}", e); return None }, } }; } let mut dist_arguments = vec![]; let mut saw_target = false; // flat_map would be nice but the lifetimes don't work out for argument in arguments.iter() { let path_transformer_fn = &mut |p: &Path| path_transformer.to_dist(p); if let Argument::Raw(input_path) = argument { // Need to explicitly handle the input argument as it's not parsed as a path let input_path = Path::new(input_path).to_owned(); dist_arguments.push(try_string_arg!(input_path.into_arg_string(path_transformer_fn))) } else { if let Some(Target(_)) = argument.get_data() { saw_target = true } for string_arg in argument.iter_strings(path_transformer_fn) { dist_arguments.push(try_string_arg!(string_arg)) } } } // We can't rely on the packaged toolchain necessarily having the same default target triple // as us (typically host triple), so make sure to always explicitly specify a target. if !saw_target { dist_arguments.push(format!("--target={}", host)) } // Convert the paths of some important environment variables let mut env_vars = dist::osstring_tuples_to_strings(env_vars)?; let mut changed_out_dir: Option<PathBuf> = None; for (k, v) in env_vars.iter_mut() { match k.as_str() { // We round-tripped from path to string and back to path, but it should be lossless "OUT_DIR" => { let dist_out_dir = path_transformer.to_dist(Path::new(v))?; if dist_out_dir != *v { changed_out_dir = Some(v.to_owned().into()); } *v = dist_out_dir } "TMPDIR" => { // The server will need to find its own tempdir. *v = "".to_string(); } "CARGO" | "CARGO_MANIFEST_DIR" => { *v = path_transformer.to_dist(Path::new(v))? }, _ => (), } } // OUT_DIR was changed during transformation, check if this compilation is relying on anything // inside it - if so, disallow distributed compilation (there are sometimes hardcoded paths present) if let Some(out_dir) = changed_out_dir { if self.inputs.iter().any(|input| input.starts_with(&out_dir)) { return None } } // Add any necessary path transforms - although we haven't packaged up inputs yet, we've // probably seen all drives (e.g. on Windows), so let's just transform those rather than // trying to do every single path. let mut remapped_disks = HashSet::new(); for (local_path, dist_path) in get_path_mappings(&path_transformer) { let local_path = local_path.to_str()?; // "The from=to parameter is scanned from right to left, so from may contain '=', but to may not." if local_path.contains('=') { return None } if remapped_disks.contains(&dist_path) { continue } dist_arguments.push(format!("--remap-path-prefix={}={}", &dist_path, local_path)); remapped_disks.insert(dist_path); } let sysroot_executable = sysroot.join(BINS_DIR).join("rustc").with_extension(EXE_EXTENSION); Some(dist::CompileCommand { executable: path_transformer.to_dist(&sysroot_executable)?, arguments: dist_arguments, env_vars, cwd: path_transformer.to_dist_abs(cwd)?, }) })(); Ok((command, dist_command, Cacheable::Yes)) } #[cfg(feature = "dist-client")] fn into_dist_packagers(self: Box<Self>, path_transformer: dist::PathTransformer) -> Result<(Box<dyn pkg::InputsPackager>, Box<dyn pkg::ToolchainPackager>, Box<dyn OutputsRewriter>)> { let RustCompilation { inputs, crate_link_paths, sysroot, crate_types, dep_info, rlib_dep_reader, env_vars, .. } = *{self}; trace!("Dist inputs: inputs={:?} crate_link_paths={:?}", inputs, crate_link_paths); let inputs_packager = Box::new(RustInputsPackager { env_vars, crate_link_paths, crate_types, inputs, path_transformer, rlib_dep_reader }); let toolchain_packager = Box::new(RustToolchainPackager { sysroot }); let outputs_rewriter = Box::new(RustOutputsRewriter { dep_info }); Ok((inputs_packager, toolchain_packager, outputs_rewriter)) } fn outputs<'a>(&'a self) -> Box<dyn Iterator<Item=(&'a str, &'a Path)> + 'a> { Box::new(self.outputs.iter().map(|(k, v)| (k.as_str(), &**v))) } } // TODO: we do end up with slashes facing the wrong way, but Windows is agnostic so it's // mostly ok. We currently don't get mappings for every single path because it means we need to // figure out all prefixes and send them over the wire. #[cfg(feature = "dist-client")] fn get_path_mappings(path_transformer: &dist::PathTransformer) -> impl Iterator<Item=(PathBuf, String)> { path_transformer.disk_mappings() } #[cfg(feature = "dist-client")] struct RustInputsPackager { env_vars: Vec<(OsString, OsString)>, crate_link_paths: Vec<PathBuf>, crate_types: CrateTypes, inputs: Vec<PathBuf>, path_transformer: dist::PathTransformer, rlib_dep_reader: Option<Arc<RlibDepReader>>, } #[cfg(feature = "dist-client")] impl pkg::InputsPackager for RustInputsPackager { fn write_inputs(self: Box<Self>, wtr: &mut dyn io::Write) -> Result<dist::PathTransformer> { debug!("Packaging compile inputs for compile"); let RustInputsPackager { crate_link_paths, crate_types, inputs, mut path_transformer, rlib_dep_reader, env_vars } = *{self}; // If this is a cargo build, we can assume all immediate `extern crate` dependencies // have been passed on the command line, allowing us to scan them all and find the // complete list of crates we might need. // If it's not a cargo build, we can't to extract the `extern crate` statements and // so have no way to build a list of necessary crates - send all rlibs. let is_cargo = env_vars.iter().any(|(k, _)| k == "CARGO_PKG_NAME"); let mut rlib_dep_reader_and_names = if is_cargo { rlib_dep_reader.map(|r| (r, HashSet::new())) } else { None }; let mut tar_inputs = vec![]; for input_path in inputs.into_iter() { let input_path = pkg::simplify_path(&input_path)?; if let Some(ext) = input_path.extension() { if !CAN_DIST_DYLIBS && ext == DLL_EXTENSION { bail!("Cannot distribute dylib input {} on this platform", input_path.display()) } else if ext == RLIB_EXTENSION { if let Some((ref rlib_dep_reader, ref mut dep_crate_names)) = rlib_dep_reader_and_names { dep_crate_names.extend(rlib_dep_reader.discover_rlib_deps(&env_vars, &input_path) .chain_err(|| format!("Failed to read deps of {}", input_path.display()))?) } } } let dist_input_path = path_transformer.to_dist(&input_path) .chain_err(|| format!("unable to transform input path {}", input_path.display()))?; tar_inputs.push((input_path, dist_input_path)) } if log_enabled!(Trace) { if let Some((_, ref dep_crate_names)) = rlib_dep_reader_and_names { trace!("Identified dependency crate names: {:?}", dep_crate_names) } } // Given the link paths, find the things we need to send over the wire to the remote machine. If // we've been able to use a dependency searcher then we can filter down just candidates for that // crate, otherwise we need to send everything. let mut tar_crate_libs = vec![]; for crate_link_path in crate_link_paths.into_iter() { let crate_link_path = pkg::simplify_path(&crate_link_path)?; let dir_entries = match fs::read_dir(crate_link_path) { Ok(iter) => iter, Err(ref e) if e.kind() == io::ErrorKind::NotFound => continue, Err(e) => return Err(Error::from(e).chain_err(|| "Failed to read dir entries in crate link path")), }; for entry in dir_entries { let entry = match entry { Ok(entry) => entry, Err(e) => return Err(Error::from(e).chain_err(|| "Error during iteration over crate link path")), }; let path = entry.path(); { // Take a look at the path and see if it's something we care about let libname: &str = match path.file_name().and_then(|s| s.to_str()) { Some(name) => { let mut rev_name_split = name.rsplitn(2, '-'); let _extra_filename_and_ext = rev_name_split.next(); let libname = if let Some(libname) = rev_name_split.next() { libname } else { continue }; assert!(rev_name_split.next().is_none()); libname }, None => continue, }; let (crate_name, ext): (&str, _) = match path.extension() { Some(ext) if libname.starts_with(DLL_PREFIX) && ext == DLL_EXTENSION => (&libname[DLL_PREFIX.len()..], ext), Some(ext) if libname.starts_with(RLIB_PREFIX) && ext == RLIB_EXTENSION => (&libname[RLIB_PREFIX.len()..], ext), _ => continue, }; if let Some((_, ref dep_crate_names)) = rlib_dep_reader_and_names { // We have a list of crate names we care about, see if this lib is a candidate if !dep_crate_names.contains(crate_name) { continue } } if !path.is_file() { continue } else if !CAN_DIST_DYLIBS && ext == DLL_EXTENSION { bail!("Cannot distribute dylib input {} on this platform", path.display()) } } // This is a lib that may be of interest during compilation let dist_path = path_transformer.to_dist(&path) .chain_err(|| format!("unable to transform lib path {}", path.display()))?; tar_crate_libs.push((path, dist_path)) } } let mut all_tar_inputs: Vec<_> = tar_inputs.into_iter().chain(tar_crate_libs).collect(); all_tar_inputs.sort(); // There are almost certainly duplicates from explicit externs also within the lib search paths all_tar_inputs.dedup(); // If we're just creating an rlib then the only thing inspected inside dependency rlibs is the // metadata, in which case we can create a trimmed rlib (which is actually a .a) with the metadata let can_trim_rlibs = if let CrateTypes { rlib: true, staticlib: false } = crate_types { true } else { false }; let mut builder = tar::Builder::new(wtr); for (input_path, dist_input_path) in all_tar_inputs.iter() { let mut file_header = pkg::make_tar_header(input_path, dist_input_path)?; let file = fs::File::open(input_path)?; if can_trim_rlibs && input_path.extension().map(|e| e == RLIB_EXTENSION).unwrap_or(false) { let mut archive = ar::Archive::new(file); while let Some(entry_result) = archive.next_entry() { let mut entry = entry_result?; if entry.header().identifier() != b"rust.metadata.bin" { continue } let mut metadata = vec![]; io::copy(&mut entry, &mut metadata)?; let mut metadata_ar = vec![]; { let mut ar_builder = ar::Builder::new(&mut metadata_ar); ar_builder.append(entry.header(), metadata.as_slice())? } file_header.set_size(metadata_ar.len() as u64); file_header.set_cksum(); builder.append(&file_header, metadata_ar.as_slice())?; break } } else { file_header.set_cksum(); builder.append(&file_header, file)? } } // Finish archive let _ = builder.into_inner()?; Ok(path_transformer) } } #[cfg(feature = "dist-client")] #[allow(unused)] struct RustToolchainPackager { sysroot: PathBuf, } #[cfg(feature = "dist-client")] #[cfg(all(target_os = "linux", target_arch = "x86_64"))] impl pkg::ToolchainPackager for RustToolchainPackager { fn write_pkg(self: Box<Self>, f: fs::File) -> Result<()> { info!("Packaging Rust compiler for sysroot {}", self.sysroot.display()); let RustToolchainPackager { sysroot } = *self; let mut package_builder = pkg::ToolchainPackageBuilder::new(); package_builder.add_common()?; let bins_path = sysroot.join(BINS_DIR); let sysroot_executable = bins_path.join("rustc").with_extension(EXE_EXTENSION); package_builder.add_executable_and_deps(sysroot_executable)?; package_builder.add_dir_contents(&bins_path)?; if BINS_DIR != LIBS_DIR { let libs_path = sysroot.join(LIBS_DIR); package_builder.add_dir_contents(&libs_path)? } package_builder.into_compressed_tar(f) } } #[cfg(feature = "dist-client")] struct RustOutputsRewriter { dep_info: Option<PathBuf>, } #[cfg(feature = "dist-client")] impl OutputsRewriter for RustOutputsRewriter { fn handle_outputs(self: Box<Self>, path_transformer: &dist::PathTransformer, output_paths: &[PathBuf]) -> Result<()> { use std::io::Write; // Outputs in dep files (the files at the beginning of lines) are untransformed at this point - // remap-path-prefix is documented to only apply to 'inputs'. trace!("Pondering on rewriting dep file {:?}", self.dep_info); if let Some(dep_info) = self.dep_info { for dep_info_local_path in output_paths { trace!("Comparing with {}", dep_info_local_path.display()); if dep_info == *dep_info_local_path { info!("Replacing using the transformer {:?}", path_transformer); // Found the dep info file, read it in let f = fs::File::open(&dep_info).chain_err(|| "Failed to open dep info file")?; let mut deps = String::new(); {f}.read_to_string(&mut deps)?; // Replace all the output paths, at the beginning of lines for (local_path, dist_path) in get_path_mappings(path_transformer) { let re_str = format!("(?m)^{}", regex::escape(&dist_path)); let local_path_str = local_path.to_str() .chain_err(|| format!("could not convert {} to string for RE replacement", local_path.display()))?; error!("RE replacing {} with {} in {}", re_str, local_path_str, deps); let re = regex::Regex::new(&re_str).expect("Invalid regex"); deps = re.replace_all(&deps, local_path_str).into_owned(); } // Write the depinfo file let f = fs::File::create(&dep_info).chain_err(|| "Failed to recreate dep info file")?; {f}.write_all(deps.as_bytes())?; return Ok(()) } } // We expected there to be dep info, but none of the outputs matched bail!("No outputs matched dep info file {}", dep_info.display()); } Ok(()) } } #[test] #[cfg(all(feature = "dist-client", target_os = "windows"))] fn test_rust_outputs_rewriter() { use crate::compiler::compiler::OutputsRewriter; use std::io::Write; use crate::test::utils::create_file; let mut pt = dist::PathTransformer::new(); pt.to_dist(Path::new("c:\\")).unwrap(); let mappings: Vec<_> = pt.disk_mappings().collect(); assert!(mappings.len() == 1); let linux_prefix = &mappings[0].1; let depinfo_data = format!("{prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.rmeta: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs {prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.d: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs src/bin/sccache-dist/main.rs: src/bin/sccache-dist/build.rs: src/bin/sccache-dist/token_check.rs: ", prefix=linux_prefix); let depinfo_resulting_data = format!("{prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.rmeta: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs {prefix}/sccache/target/x86_64-unknown-linux-gnu/debug/deps/sccache_dist-c6f3229b9ef0a5c3.d: src/bin/sccache-dist/main.rs src/bin/sccache-dist/build.rs src/bin/sccache-dist/token_check.rs src/bin/sccache-dist/main.rs: src/bin/sccache-dist/build.rs: src/bin/sccache-dist/token_check.rs: ", prefix="c:"); let tempdir = TempDir::new("sccache_test").unwrap(); let tempdir = tempdir.path(); let depinfo_file = create_file(tempdir, "depinfo.d", |mut f| { f.write_all(depinfo_data.as_bytes()) }).unwrap(); let ror = Box::new(RustOutputsRewriter { dep_info: Some(depinfo_file.clone()), }); let () = ror.handle_outputs(&pt, &[depinfo_file.clone()]).unwrap(); let mut s = String::new(); fs::File::open(depinfo_file).unwrap().read_to_string(&mut s).unwrap(); assert_eq!(s, depinfo_resulting_data) } #[cfg(feature = "dist-client")] #[derive(Debug)] struct RlibDepsDetail { deps: Vec<String>, mtime: time::SystemTime, } #[cfg(feature = "dist-client")] struct DepsSize; #[cfg(feature = "dist-client")] impl Meter<PathBuf, RlibDepsDetail> for DepsSize { type Measure = usize; fn measure<Q: ?Sized>(&self, _k: &Q, v: &RlibDepsDetail) -> usize where PathBuf: Borrow<Q> { use std::mem; // TODO: unfortunately there is exactly nothing you can do with the k given the // current trait bounds. Just use some kind of sane value; //let k_size = mem::size_of::<PathBuf>() + k.capacity(); let k_size = 3*8 + 100; let crate_names_size: usize = v.deps.iter().map(|s| s.capacity()).sum(); let v_size: usize = mem::size_of::<RlibDepsDetail>() + // Systemtime and vec itself v.deps.capacity() * mem::size_of::<String>() + // Each string in the vec crate_names_size; // Contents of all strings k_size + v_size } } #[cfg(feature = "dist-client")] #[derive(Debug)] struct RlibDepReader { cache: Mutex<LruCache<PathBuf, RlibDepsDetail, RandomState, DepsSize>>, executable: PathBuf, } #[cfg(feature = "dist-client")] impl RlibDepReader { fn new_with_check(executable: PathBuf, env_vars: &[(OsString, OsString)]) -> Result<Self> { let temp_dir = TempDir::new("sccache-rlibreader") .chain_err(|| "Could not create temporary directory for rlib output")?; let temp_rlib = temp_dir.path().join("x.rlib"); let mut cmd = process::Command::new(&executable); cmd .arg("--crate-type=rlib") .arg("-o").arg(&temp_rlib) .arg("-") .env_clear() .envs(ref_env(env_vars)); let process::Output { status, stdout, stderr } = cmd.output()?; if !status.success() { bail!("Failed to compile a minimal rlib with {}", executable.display()) } if !stdout.is_empty() { bail!("rustc stdout non-empty when compiling a minimal rlib: {:?}", String::from_utf8_lossy(&stdout)) } if !stderr.is_empty() { bail!("rustc stderr non-empty when compiling a minimal rlib: {:?}", String::from_utf8_lossy(&stderr)) } // The goal of this cache is to avoid repeated lookups when building a single project. Let's budget 3MB. // Allowing for a 100 byte path, 50 dependecies per rlib and 20 characters per crate name, this roughly // approximates to `path_size + path + vec_size + num_deps * (systemtime_size + string_size + crate_name_len)` // ` 3*8 + 100 + 3*8 + 50 * ( 8 + 3*8 + 20 )` // `2748` bytes per crate // Allowing for possible overhead of up to double (for unused space in allocated memory), this means we // can cache information from about 570 rlibs - easily enough for a single project. const CACHE_SIZE: u64 = 3*1024*1024; let cache = LruCache::with_meter(CACHE_SIZE, DepsSize); let rlib_dep_reader = RlibDepReader { cache: Mutex::new(cache), executable }; if let Err(e) = rlib_dep_reader.discover_rlib_deps(env_vars, &temp_rlib) { bail!("Failed to read deps from minimal rlib: {}", e) } Ok(rlib_dep_reader) } fn discover_rlib_deps(&self, env_vars: &[(OsString, OsString)], rlib: &Path) -> Result<Vec<String>> { let rlib_mtime = fs::metadata(&rlib).and_then(|m| m.modified()).chain_err(|| "Unable to get rlib modified time")?; { let mut cache = self.cache.lock().unwrap(); if let Some(deps_detail) = cache.get(rlib) { if rlib_mtime == deps_detail.mtime { return Ok(deps_detail.deps.clone()) } } } trace!("Discovering dependencies of {}", rlib.display()); let mut cmd = process::Command::new(&self.executable); cmd.args(&["-Z", "ls"]).arg(&rlib) .env_clear() .envs(ref_env(env_vars)) .env("RUSTC_BOOTSTRAP", "1"); // TODO: this is fairly naughty let process::Output { status, stdout, stderr } = cmd.output()?; if !status.success() { bail!(format!("Failed to list deps of {}", rlib.display())) } if !stderr.is_empty() { bail!("rustc -Z ls stderr non-empty: {:?}", String::from_utf8_lossy(&stderr)) } let stdout = String::from_utf8(stdout).chain_err(|| "Error parsing rustc -Z ls output")?; let deps: Vec<_> = parse_rustc_z_ls(&stdout) .map(|deps| deps.into_iter().map(|dep| dep.to_owned()).collect())?; { // This will behave poorly if the rlib is changing under our feet, but in that case rustc // will also do the wrong thing, so the user has bigger issues to deal with. let mut cache = self.cache.lock().unwrap(); cache.insert(rlib.to_owned(), RlibDepsDetail { deps: deps.clone(), mtime: rlib_mtime }); } Ok(deps) } } // Parse output like the following: // // ``` // =External Dependencies= // 1 std-08a5bd1ca58a28ee // 2 core-ed31c38c1a60e6f9 // 3 compiler_builtins-6bd92a903b271497 // 4 alloc-5184f4fa2c87f835 // 5 alloc_system-7a70df28ae5ce6c3 // 6 libc-fb97b8e8c331f065 // 7 unwind-3fec89e45492b583 // 8 alloc_jemalloc-3e9fce05c4bf31e5 // 9 panic_unwind-376f1801255ba526 // 10 bitflags-f482823cbc05f4d7 // 11 cfg_if-cf72e166fff77ced // ``` #[cfg(feature = "dist-client")] fn parse_rustc_z_ls(stdout: &str) -> Result<Vec<&str>> { let mut lines = stdout.lines(); match lines.next() { Some("=External Dependencies=") => {}, Some(s) => bail!("Unknown first line from rustc -Z ls: {}", s), None => bail!("No output from rustc -Z ls"), } let mut dep_names = vec![]; while let Some(line) = lines.next() { if line == "" { break } let mut line_splits = line.splitn(2, ' '); let num: usize = line_splits.next().expect("Zero strings from line split").parse() .chain_err(|| "Could not parse number from rustc -Z ls")?; let libstring = line_splits.next().ok_or_else(|| "No lib string on line from rustc -Z ls")?; if num != dep_names.len() + 1 { bail!("Unexpected numbering of {} in rustc -Z ls output", libstring) } assert!(line_splits.next().is_none()); let mut libstring_splits = libstring.rsplitn(2, '-'); // Rustc prints strict hash value (rather than extra filename as it likely should be) // https://github.com/rust-lang/rust/pull/55555 let _svh = libstring_splits.next().ok_or_else(|| "No hash in lib string from rustc -Z ls")?; let libname = libstring_splits.next().expect("Zero strings from libstring split"); assert!(libstring_splits.next().is_none()); dep_names.push(libname); } while let Some(line) = lines.next() { if line != "" { bail!("Trailing non-blank lines in rustc -Z ls output") } } Ok(dep_names) } #[cfg(test)] mod test { use super::*; use crate::compiler::*; use itertools::Itertools; use crate::mock_command::*; use std::ffi::OsStr; use std::fs::File; use std::io::Write; use std::sync::{Arc,Mutex}; use crate::test::utils::*; fn _parse_arguments(arguments: &[String]) -> CompilerArguments<ParsedArguments> { let arguments = arguments.iter().map(OsString::from).collect::<Vec<_>>(); parse_arguments(&arguments, ".".as_ref()) } macro_rules! parses { ( $( $s:expr ),* ) => { match _parse_arguments(&[ $( $s.to_string(), )* ]) { CompilerArguments::Ok(a) => a, o @ _ => panic!("Got unexpected parse result: {:?}", o), } } } macro_rules! fails { ( $( $s:expr ),* ) => { match _parse_arguments(&[ $( $s.to_string(), )* ]) { CompilerArguments::Ok(_) => panic!("Should not have parsed ok: `{}`", stringify!($( $s, )*)), o @ _ => o, } } } #[test] fn test_parse_arguments_simple() { let h = parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"); assert_eq!(h.output_dir.to_str(), Some("out")); assert!(h.dep_info.is_none()); assert!(h.externs.is_empty()); let h = parses!("--emit=link", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib"); assert_eq!(h.output_dir.to_str(), Some("out")); assert!(h.dep_info.is_none()); let h = parses!("--emit", "link", "foo.rs", "--out-dir=out", "--crate-name=foo", "--crate-type=lib"); assert_eq!(h.output_dir.to_str(), Some("out")); assert_eq!(parses!("--emit", "link", "-C", "opt-level=1", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"), parses!("--emit=link", "-Copt-level=1", "foo.rs", "--out-dir=out", "--crate-name=foo", "--crate-type=lib")); let h = parses!("--emit", "link,dep-info", "foo.rs", "--out-dir", "out", "--crate-name", "my_crate", "--crate-type", "lib", "-C", "extra-filename=-abcxyz"); assert_eq!(h.output_dir.to_str(), Some("out")); assert_eq!(h.dep_info.unwrap().to_str().unwrap(), "my_crate-abcxyz.d"); fails!("--emit", "link", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib"); fails!("--emit", "link", "foo.rs", "--crate-name=foo", "--crate-type=lib"); fails!("--emit", "asm", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib"); fails!("--emit", "asm,link", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib"); fails!("--emit", "asm,link,dep-info", "foo.rs", "--out-dir", "out", "--crate-name=foo", "--crate-type=lib"); fails!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name=foo"); fails!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-type=lib"); // From an actual cargo compilation, with some args shortened: let h = parses!("--crate-name", "foo", "src/lib.rs", "--crate-type", "lib", "--emit=dep-info,link", "-C", "debuginfo=2", "-C", "metadata=d6ae26f5bcfb7733", "-C", "extra-filename=-d6ae26f5bcfb7733", "--out-dir", "/foo/target/debug/deps", "-L", "dependency=/foo/target/debug/deps", "--extern", "libc=/foo/target/debug/deps/liblibc-89a24418d48d484a.rlib", "--extern", "log=/foo/target/debug/deps/liblog-2f7366be74992849.rlib"); assert_eq!(h.output_dir.to_str(), Some("/foo/target/debug/deps")); assert_eq!(h.crate_name, "foo"); assert_eq!(h.dep_info.unwrap().to_str().unwrap(), "foo-d6ae26f5bcfb7733.d"); assert_eq!(h.externs, ovec!["/foo/target/debug/deps/liblibc-89a24418d48d484a.rlib", "/foo/target/debug/deps/liblog-2f7366be74992849.rlib"]); } #[test] fn test_parse_arguments_incremental() { parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"); let r = fails!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "-C", "incremental=/foo"); assert_eq!(r, CompilerArguments::CannotCache("incremental", None)) } #[test] fn test_parse_arguments_dep_info_no_extra_filename() { let h = parses!("--crate-name", "foo", "--crate-type", "lib", "src/lib.rs", "--emit=dep-info,link", "--out-dir", "/out"); assert_eq!(h.dep_info, Some("foo.d".into())); } #[test] fn test_parse_arguments_native_libs() { parses!("--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "bar", "foo.rs", "--out-dir", "out"); parses!("--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "static=bar", "foo.rs", "--out-dir", "out"); parses!("--crate-name", "foo", "--crate-type", "lib,staticlib", "--emit", "link", "-l", "dylib=bar", "foo.rs", "--out-dir", "out"); } #[test] fn test_parse_arguments_non_rlib_crate() { parses!("--crate-type", "rlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); parses!("--crate-type", "lib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); parses!("--crate-type", "staticlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); parses!("--crate-type", "rlib,staticlib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); fails!("--crate-type", "bin", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); fails!("--crate-type", "rlib,dylib", "--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo"); } #[test] fn test_parse_arguments_color() { let h = parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"); assert_eq!(h.color_mode, ColorMode::Auto); let h = parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=always"); assert_eq!(h.color_mode, ColorMode::On); let h = parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=never"); assert_eq!(h.color_mode, ColorMode::Off); let h = parses!("--emit", "link", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--color=auto"); assert_eq!(h.color_mode, ColorMode::Auto); } #[test] fn test_get_compiler_outputs() { let creator = new_creator(); next_command(&creator, Ok(MockChild::new(exit_status(0), "foo\nbar\nbaz", ""))); let outputs = get_compiler_outputs(&creator, "rustc".as_ref(), &ovec!("a", "b"), "cwd".as_ref(), &[]).wait().unwrap(); assert_eq!(outputs, &["foo", "bar", "baz"]); } #[test] fn test_get_compiler_outputs_fail() { let creator = new_creator(); next_command(&creator, Ok(MockChild::new(exit_status(1), "", "error"))); assert!(get_compiler_outputs(&creator, "rustc".as_ref(), &ovec!("a", "b"), "cwd".as_ref(), &[]).wait().is_err()); } #[test] fn test_parse_dep_info() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!(pathvec!["abc.rs", "bar.rs", "baz.rs"], parse_dep_info(&deps, "")); } #[test] fn test_parse_dep_info_with_escaped_spaces() { let deps = r#"foo: baz.rs abc\ def.rs baz.rs: abc def.rs: "#; assert_eq!(pathvec!["abc def.rs", "baz.rs"], parse_dep_info(&deps, "")); } #[cfg(not(windows))] #[test] fn test_parse_dep_info_cwd() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!(pathvec!["foo/abc.rs", "foo/bar.rs", "foo/baz.rs"], parse_dep_info(&deps, "foo/")); assert_eq!(pathvec!["/foo/bar/abc.rs", "/foo/bar/bar.rs", "/foo/bar/baz.rs"], parse_dep_info(&deps, "/foo/bar/")); } #[cfg(not(windows))] #[test] fn test_parse_dep_info_abs_paths() { let deps = "/foo/foo: /foo/baz.rs /foo/abc.rs /foo/bar.rs /foo/baz.rs: /foo/abc.rs: /foo/bar.rs: "; assert_eq!(pathvec!["/foo/abc.rs", "/foo/bar.rs", "/foo/baz.rs"], parse_dep_info(&deps, "/bar/")); } #[cfg(windows)] #[test] fn test_parse_dep_info_cwd() { let deps = "foo: baz.rs abc.rs bar.rs baz.rs: abc.rs: bar.rs: "; assert_eq!(pathvec!["foo/abc.rs", "foo/bar.rs", "foo/baz.rs"], parse_dep_info(&deps, "foo/")); assert_eq!(pathvec!["c:/foo/bar/abc.rs", "c:/foo/bar/bar.rs", "c:/foo/bar/baz.rs"], parse_dep_info(&deps, "c:/foo/bar/")); } #[cfg(windows)] #[test] fn test_parse_dep_info_abs_paths() { let deps = "c:/foo/foo: c:/foo/baz.rs c:/foo/abc.rs c:/foo/bar.rs c:/foo/baz.rs: c:/foo/bar.rs c:/foo/abc.rs: c:/foo/bar.rs: "; assert_eq!(pathvec!["c:/foo/abc.rs", "c:/foo/bar.rs", "c:/foo/baz.rs"], parse_dep_info(&deps, "c:/bar/")); } fn mock_dep_info(creator: &Arc<Mutex<MockCommandCreator>>, dep_srcs: &[&str]) { // Mock the `rustc --emit=dep-info` process by writing // a dep-info file. let mut sorted_deps = dep_srcs.iter().map(|s| s.to_string()).collect::<Vec<String>>(); sorted_deps.sort(); next_command_calls(creator, move |args| { let mut dep_info_path = None; let mut it = args.iter(); while let Some(a) = it.next() { if a == "-o" { dep_info_path = it.next(); break; } } let dep_info_path = dep_info_path.unwrap(); let mut f = File::create(dep_info_path)?; writeln!(f, "blah: {}", sorted_deps.iter().join(" "))?; for d in sorted_deps.iter() { writeln!(f, "{}:", d)?; } Ok(MockChild::new(exit_status(0), "", "")) }); } fn mock_file_names(creator: &Arc<Mutex<MockCommandCreator>>, filenames: &[&str]) { // Mock the `rustc --print=file-names` process output. next_command(&creator, Ok(MockChild::new(exit_status(0), filenames.iter().join("\n"), ""))); } #[test] fn test_generate_hash_key() { drop(env_logger::try_init()); let f = TestFixture::new(); const FAKE_DIGEST: &'static str = "abcd1234"; // We'll just use empty files for each of these. for s in ["foo.rs", "bar.rs", "bar.rlib", "libbaz.a"].iter() { f.touch(s).unwrap(); } let mut emit = HashSet::new(); emit.insert("link".to_string()); emit.insert("metadata".to_string()); let hasher = Box::new(RustHasher { executable: "rustc".into(), host: "x86-64-unknown-unknown-unknown".to_owned(), sysroot: f.tempdir.path().join("sysroot"), compiler_shlibs_digests: vec![FAKE_DIGEST.to_owned()], #[cfg(feature = "dist-client")] rlib_dep_reader: None, parsed_args: ParsedArguments { arguments: vec![ Argument::Raw("a".into()), Argument::WithValue("--cfg".into(), ArgData::PassThrough("xyz".into()), ArgDisposition::Separated), Argument::Raw("b".into()), Argument::WithValue("--cfg".into(), ArgData::PassThrough("abc".into()), ArgDisposition::Separated), ], output_dir: "foo/".into(), externs: vec!["bar.rlib".into()], crate_link_paths: vec![], staticlibs: vec![f.tempdir.path().join("libbaz.a")], crate_name: "foo".into(), crate_types: CrateTypes { rlib: true, staticlib: false }, dep_info: None, emit: emit, color_mode: ColorMode::Auto, has_json: false, } }); let creator = new_creator(); mock_dep_info(&creator, &["foo.rs", "bar.rs"]); mock_file_names(&creator, &["foo.rlib", "foo.a"]); let pool = CpuPool::new(1); let res = hasher.generate_hash_key(&creator, f.tempdir.path().to_owned(), [(OsString::from("CARGO_PKG_NAME"), OsString::from("foo")), (OsString::from("FOO"), OsString::from("bar")), (OsString::from("CARGO_BLAH"), OsString::from("abc"))].to_vec(), false, &pool).wait().unwrap(); let m = Digest::new(); let empty_digest = m.finish(); let mut m = Digest::new(); // Version. m.update(CACHE_VERSION); // sysroot shlibs digests. m.update(FAKE_DIGEST.as_bytes()); // Arguments, with cfgs sorted at the end. OsStr::new("ab--cfgabc--cfgxyz").hash(&mut HashToDigest { digest: &mut m }); // bar.rs (source file, from dep-info) m.update(empty_digest.as_bytes()); // foo.rs (source file, from dep-info) m.update(empty_digest.as_bytes()); // bar.rlib (extern crate, from externs) m.update(empty_digest.as_bytes()); // libbaz.a (static library, from staticlibs) m.update(empty_digest.as_bytes()); // Env vars OsStr::new("CARGO_BLAH").hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); OsStr::new("abc").hash(&mut HashToDigest { digest: &mut m }); OsStr::new("CARGO_PKG_NAME").hash(&mut HashToDigest { digest: &mut m }); m.update(b"="); OsStr::new("foo").hash(&mut HashToDigest { digest: &mut m }); f.tempdir.path().hash(&mut HashToDigest { digest: &mut m }); let digest = m.finish(); assert_eq!(res.key, digest); let mut out = res.compilation.outputs().map(|(k, _)| k.to_owned()).collect::<Vec<_>>(); out.sort(); assert_eq!(out, vec!["foo.a", "foo.rlib", "foo.rmeta"]); } fn hash_key<'a, F>(f: &TestFixture, args: &[OsString], env_vars: &[(OsString, OsString)], pre_func: F) -> String where F: Fn(&Path) -> Result<()> { let parsed_args = match parse_arguments(args, &f.tempdir.path()) { CompilerArguments::Ok(parsed_args) => parsed_args, o @ _ => panic!("Got unexpected parse result: {:?}", o), }; // Just use empty files for sources. for src in ["foo.rs"].iter() { let s = format!("Failed to create {}", src); f.touch(src).expect(&s); } // as well as externs for e in parsed_args.externs.iter() { let s = format!("Failed to create {:?}", e); f.touch(e.to_str().unwrap()).expect(&s); } pre_func(&f.tempdir.path()).expect("Failed to execute pre_func"); let hasher = Box::new(RustHasher { executable: "rustc".into(), host: "x86-64-unknown-unknown-unknown".to_owned(), sysroot: f.tempdir.path().join("sysroot"), compiler_shlibs_digests: vec![], #[cfg(feature = "dist-client")] rlib_dep_reader: None, parsed_args: parsed_args, }); let creator = new_creator(); let pool = CpuPool::new(1); mock_dep_info(&creator, &["foo.rs"]); mock_file_names(&creator, &["foo.rlib"]); hasher.generate_hash_key(&creator, f.tempdir.path().to_owned(), env_vars.to_owned(), false, &pool) .wait().unwrap().key } fn nothing(_path: &Path) -> Result<()> { Ok(()) } #[test] fn test_equal_hashes_externs() { // Put some content in the extern rlibs so we can verify that the content hashes are // used in the right order. fn mk_files(tempdir: &Path) -> Result<()> { create_file(tempdir, "a.rlib", |mut f| f.write_all(b"this is a.rlib"))?; create_file(tempdir, "b.rlib", |mut f| f.write_all(b"this is b.rlib"))?; Ok(()) } let f = TestFixture::new(); assert_eq!(hash_key(&f, &ovec!["--emit", "link", "foo.rs", "--extern", "a=a.rlib", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--extern", "b=b.rlib"], &vec![], &mk_files), hash_key(&f, &ovec!["--extern", "b=b.rlib", "--emit", "link", "--extern", "a=a.rlib", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"], &vec![], &mk_files)); } #[test] fn test_equal_hashes_link_paths() { let f = TestFixture::new(); assert_eq!(hash_key(&f, &ovec!["--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "-L", "y=y"], &vec![], nothing), hash_key(&f, &ovec!["-L", "y=y", "--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"], &vec![], nothing)); } #[test] fn test_equal_hashes_ignored_args() { let f = TestFixture::new(); assert_eq!(hash_key(&f, &ovec!["--emit", "link", "-L", "x=x", "foo.rs", "--out-dir", "out", "--extern", "a=1", "--crate-name", "foo", "--crate-type", "lib", "-L", "y=y"], &vec![], nothing), hash_key(&f, &ovec!["-L", "y=a", "--emit", "link", "-L", "x=b", "foo.rs", "--extern", "a=2", "--out-dir", "out2", "--crate-name", "foo", "--crate-type", "lib"], &vec![], nothing)); } #[test] fn test_equal_hashes_cfg_features() { let f = TestFixture::new(); assert_eq!(hash_key(&f, &ovec!["--emit", "link", "--cfg", "feature=a", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib", "--cfg", "feature=b"], &vec![], nothing), hash_key(&f, &ovec!["--cfg", "feature=b", "--emit", "link", "--cfg", "feature=a", "foo.rs", "--out-dir", "out", "--crate-name", "foo", "--crate-type", "lib"], &vec![], nothing)); } }
41.764522
233
0.541602
39913a37e2c14e2b5e1a43af81cac1fbe1124d3f
2,440
use std::fs; use std::path::{Path, PathBuf}; use walkdir::WalkDir; use guarding_core::domain::code_file::CodeFile; use crate::identify::code_ident::CodeIdent; use crate::identify::java_ident::JavaIdent; use crate::identify::js_ident::JsIdent; use crate::identify::rust_ident::RustIdent; pub struct ModelBuilder {} impl ModelBuilder { pub fn build_models_by_dir(code_dir: PathBuf) -> Vec<CodeFile> { let mut models = vec![]; for entry in WalkDir::new(code_dir) { let entry = entry.unwrap(); if !entry.file_type().is_file() { continue; } let path = entry.path(); if let None = path.extension() { continue; } ModelBuilder::build_model_by_file(&mut models, path) } models } pub fn build_model_by_file(models: &mut Vec<CodeFile>, path: &Path) { let ext = path.extension().unwrap().to_str().unwrap(); let file_name = path.file_name().unwrap().to_str().unwrap(); match ext { "java" => { let mut file = JavaIdent::parse(ModelBuilder::read_content(path).as_str()); file.path = ModelBuilder::format_path(path); file.file_name = file_name.to_string(); models.push(file); } "js" => { let mut file = JsIdent::parse(ModelBuilder::read_content(path).as_str()); file.path = format!("{}", path.display()); file.file_name = file_name.to_string(); models.push(file); } "rs" => { let mut file = RustIdent::parse(ModelBuilder::read_content(path).as_str()); file.path = format!("{}", path.display()); file.file_name = file_name.to_string(); models.push(file); } &_ => {} } } fn read_content(path: &Path) -> String { fs::read_to_string(path).expect("not such file") } fn format_path(path: &Path) -> String { format!("{}", path.display()) } } #[cfg(test)] mod tests { use std::env; use crate::ModelBuilder; #[test] fn should_parse_current_dir() { let dir = env::current_dir().unwrap(); let models = ModelBuilder::build_models_by_dir(dir); println!("{:?}", models); assert!(models.len() > 0); } }
29.756098
91
0.540574
eba43a2e1b29a78b9a48ea94b83b0803bfdd75da
14,716
use std::{ collections::HashMap, fs::File, io::{LineWriter, Write}, path::Path, }; use std::ops::AddAssign; use num_traits::{Bounded, Num, Zero}; use crate::{ph::HeapElmt, PairingHeap}; /// A simple and undirected graph. /// /// A simple graph assumes that the node indexing starts from ```0``` and is not equipped with a hash map /// for a mapping from external complex objects to internal graph indices. As a result, [`SimpleGraph`] /// doesn't have no runtime overhead for such object storage and mapping. /// /// # Examples /// The following example shows how to construct a graph and find the shortest path between node 1 and 5. /// The data is taken from the illustration in Wikipedia's page for [Dijkstra's algorithm](https://en.wikipedia.org/wiki/Dijkstra's_algorithm). /// /// Here, the numbering is adjusted so that the node indexing starts from ```0```. /// ``` /// use pheap::graph::SimpleGraph; /// /// let mut g = SimpleGraph::<u32>::with_capacity(6); /// /// g.add_weighted_edges(0, 1, 7); /// g.add_weighted_edges(0, 2, 9); /// g.add_weighted_edges(0, 5, 14); /// g.add_weighted_edges(1, 2, 10); /// g.add_weighted_edges(1, 3, 15); /// g.add_weighted_edges(2, 5, 2); /// g.add_weighted_edges(2, 3, 11); /// g.add_weighted_edges(3, 4, 6); /// g.add_weighted_edges(4, 5, 9); /// /// // Finds an SSSP from 0 to 4. /// let mut sp = g.sssp_dijkstra(0, &[4]); /// assert_eq!(1, sp.len()); /// /// let sp = sp.pop().unwrap(); /// assert_eq!(20, sp.dist()); /// assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); /// /// // Adds a disconnected component to the graph. /// g.add_weighted_edges(6, 7, 2); /// g.add_weighted_edges(6, 8, 3); /// /// // Finds an SSSP starting from 0. The result can be used for later query. /// let lsp = g.sssp_dijkstra_lazy(0); /// let lsp = g.sssp_dijkstra_lazy(0); /// let sp = lsp.get(7); /// assert_eq!(false, sp.is_feasible()); /// /// let sp = lsp.get(4); /// assert_eq!(true, sp.is_feasible()); /// assert_eq!(20, sp.dist()); /// assert_eq!(&[0, 2, 5, 4], sp.path().as_slice()); /// /// ``` /// #[derive(Debug, Default)] pub struct SimpleGraph<W> { n_edges: usize, weights: HashMap<usize, Vec<(usize, W)>>, } impl<W> SimpleGraph<W> { /// Creates an empty graph. pub fn new() -> Self { Self { n_edges: 0, weights: HashMap::new(), } } /// Creates an empty graph with the given capacitiy of nodes. pub fn with_capacity(n_nodes: usize) -> Self { Self { n_edges: 0, weights: HashMap::with_capacity(n_nodes), } } /// Returns the number of nodes in the graph. pub fn n_nodes(&self) -> usize { self.weights.len() } /// Returns the number of edges in the graph. pub fn n_edges(&self) -> usize { self.n_edges } /// Adds a weighted edge to the graph. /// /// If the edge already exists in the graph, the weight will be updated. pub fn add_weighted_edges(&mut self, node1: usize, node2: usize, weight: W) where W: Clone + Copy, { if node1 != node2 { self.insert_weight(node1, node2, weight); self.insert_weight(node2, node1, weight); } self.n_edges += 2; } /// Returns the neighbours of a node. #[inline] pub(crate) fn neighbours(&self, node: &usize) -> Option<&Vec<(usize, W)>> { self.weights.get(&node) } /// Finds the shortest paths from a source node to destination nodes. /// /// If you want to keep the result for later usage and/or want to save memory, consider using /// the lazy version [`SimpleGraph::sssp_dijkstra_lazy`], which returns the intermediate result /// from Dijkstra's algorithm. pub fn sssp_dijkstra(&self, src: usize, dest: &[usize]) -> Vec<ShortestPath<W>> where W: Bounded + Num + Zero + PartialOrd + Copy, { let nodes = self.dijkstra(src); let mut result = Vec::with_capacity(dest.len()); for ii in dest { result.push(traverse_path(src, *ii, &nodes)); } result } /// Finds the shortest paths from a source node to all nodes and returns the intermediate result /// for later usage. pub fn sssp_dijkstra_lazy(&self, src: usize) -> LazyShortestPaths<W> where W: Bounded + Num + Zero + PartialOrd + Copy, { LazyShortestPaths { src, paths: self.dijkstra(src), } } #[inline] fn dijkstra(&self, src: usize) -> Vec<DijNode<W>> where W: Bounded + Num + Zero + PartialOrd + Copy, { let mut pq = PairingHeap::<usize, W>::new(); pq.insert(src, W::zero()); let mut nodes = vec![DijNode::<W>::new(); self.weights.len()]; nodes[src].dist = W::zero(); let mut len = pq.len(); while len != 0 { let (node, prio) = pq.delete_min().unwrap(); let count = nodes[node].len + 1; if let Some(nb) = self.neighbours(&node) { for (u, dist) in nb { let dijnode = &mut nodes[*u]; let alt = prio + *dist; if !dijnode.visited && alt < dijnode.dist { dijnode.dist = alt; dijnode.pred = node; dijnode.len = count; dijnode.feasible = true; pq.insert(*u, alt); } } } let dijnode = nodes.get_mut(node).unwrap(); dijnode.visited = true; len = pq.len(); } nodes } fn insert_weight(&mut self, node1: usize, node2: usize, weight: W) { match self.weights.get_mut(&node1) { Some(v) => { v.push((node2, weight)); } None => { let v = vec![(node2, weight)]; self.weights.insert(node1, v); } } } /// Write graph as a list of edges. /// /// Each line contains one edge, following [networkx](https://networkx.org/)'s format: /// ```index 1 index 2 {'weight': {}}```. pub fn write_edgelist<P>(&self, filepath: P) -> std::io::Result<()> where P: AsRef<Path>, W: std::fmt::Display, { let file = File::create(filepath)?; let mut file = LineWriter::new(file); for (node_idx, nb) in &self.weights { for (vtx_idx, w) in nb { file.write_all( format!("{} {} {{'weight': {}}}\n", node_idx, vtx_idx, w).as_bytes(), )?; } } file.flush()?; Ok(()) } } /// The shortest path from a source node to a destination node. #[derive(Debug)] pub struct ShortestPath<W> { src: usize, dest: usize, feasible: bool, dist: W, path: Vec<usize>, } impl<W> ShortestPath<W> { /// Returns the index of the source node in the shortest path. pub fn src(&self) -> usize { self.src } /// Returns the index of the destination node in the shortest path. pub fn dest(&self) -> usize { self.dest } /// Returns the shortest path's distance pub fn dist(&self) -> W where W: Copy, { self.dist } /// Returns whether a path from the source node to the destination node is feasible. pub fn is_feasible(&self) -> bool { self.feasible } /// Returns the path from the source node to destination node. /// /// The first element of the vector is the source node, the last the destination node. pub fn path(&self) -> &Vec<usize> { &self.path } } /// A struct representing the intermediate output of Dijkstra's algorithm. #[derive(Debug)] pub struct LazyShortestPaths<W> { src: usize, paths: Vec<DijNode<W>>, } impl<W> LazyShortestPaths<W> { /// Returns the shortest path for a given node. pub fn get(&self, node_index: usize) -> ShortestPath<W> where W: Zero + Copy, { traverse_path(self.src, node_index, &self.paths) } /// Returns the shortest paths for all nodes. pub fn get_all(&self) -> Vec<ShortestPath<W>> where W: Zero + Copy, { let n_nodes = self.paths.len(); let mut result: Vec<ShortestPath<W>> = Vec::with_capacity(n_nodes); for ii in 0..n_nodes { let end_node = &self.paths[ii]; let expected = end_node.len + 1; let sp = if end_node.feasible { let mut len = 0; let mut chain = Vec::with_capacity(expected); let mut next = end_node.pred; while len < expected { if next < ii { let mut sp = result[next].path.clone(); if ii < self.src { sp.reverse(); } sp.append(&mut chain); chain = sp; break; } chain.insert(0, next); next = self.paths[next].pred; len = chain.len(); } ShortestPath { src: self.src, dest: ii, dist: end_node.dist, path: chain, feasible: true, } } else { ShortestPath { src: self.src, dest: ii, dist: <W as Zero>::zero(), path: Vec::with_capacity(0), feasible: false, } }; result.push(sp); } result } /// Returns the shortest paths for a given list of node indices. pub fn get_list(&self, node_indices: &[usize]) -> Vec<ShortestPath<W>> where W: Zero + Copy, { let mut result = Vec::with_capacity(node_indices.len()); for ii in node_indices { result.push(traverse_path(self.src, *ii, &self.paths)); } result } } #[derive(Clone, Debug)] struct DijNode<W> { /// Id of the predecessor's node in SSSP solution from Dijkstra's algorithm. pred: usize, /// Flag whether a node is visisted or not. len: usize, /// Flag indicating whether the node is already visisted or not. visited: bool, /// Flag indicating whether a path from source node is feasible. feasible: bool, /// Distance to the predecessor. dist: W, } impl<W> DijNode<W> { pub fn new() -> Self where W: Bounded, { Self { pred: 0, dist: <W as Bounded>::max_value(), visited: false, len: 0, feasible: false, } } } #[inline(always)] fn traverse_path<W>(src: usize, dest: usize, paths: &[DijNode<W>]) -> ShortestPath<W> where W: Zero + Copy, { let end_node = &paths[dest]; if end_node.feasible { let expected = end_node.len + 1; let mut len = 0; let mut path = Vec::with_capacity(expected); path.push(dest); let mut next = end_node.pred; while len < expected { path.insert(0, next); next = paths[next].pred; len = path.len(); } ShortestPath { src, dest, dist: end_node.dist, path, feasible: true, } } else { ShortestPath { src, dest, dist: <W as Zero>::zero(), path: Vec::with_capacity(0), feasible: false, } } } /// Find the minimum spanning tree (MST) in a graph using Prim's algorithm. /// /// The function returns a tuple of the total weight of the MST and a simple graph, whose edges are the MST's edges. /// /// # Examples /// ```rust /// use pheap::graph::{mst_prim, SimpleGraph}; /// /// let mut g = SimpleGraph::<u32>::new(); /// /// g.add_weighted_edges(0, 1, 4); /// g.add_weighted_edges(0, 7, 8); /// g.add_weighted_edges(1, 2, 8); /// g.add_weighted_edges(1, 7, 11); /// g.add_weighted_edges(2, 3, 7); /// g.add_weighted_edges(2, 5, 4); /// g.add_weighted_edges(2, 8, 2); /// g.add_weighted_edges(3, 4, 9); /// g.add_weighted_edges(3, 5, 14); /// g.add_weighted_edges(4, 5, 10); /// g.add_weighted_edges(5, 6, 2); /// g.add_weighted_edges(6, 7, 1); /// g.add_weighted_edges(6, 8, 6); /// g.add_weighted_edges(7, 8, 7); /// /// // gx is the new graph containing the MST's edges and dx is the total weight. /// let (g0, d0) = mst_prim(&g, 0); /// let (g4, d4) = mst_prim(&g, 4); /// /// assert_eq!(d0, d4); /// assert_eq!(g0.n_nodes(), g4.n_nodes()); /// assert_eq!(g0.n_edges(), g4.n_edges()); /// ``` pub fn mst_prim<W>(graph: &SimpleGraph<W>, src: usize) -> (SimpleGraph<W>, W) where W: Copy + PartialOrd + Bounded + Zero + AddAssign, { let mut pq = PairingHeap::<usize, W>::new(); let mut nodes: Vec<_> = (0..graph.n_nodes()) .map(|ii| { let mut node = PrimNode::<W>::new(); node.dist = if ii == src { <W as Zero>::zero() } else { <W as Bounded>::max_value() }; node.idx = ii; node.heap = pq.insert2(ii, node.dist); node }) .collect(); let mut len = pq.len(); while len != 0 { let (node, _) = pq.delete_min().unwrap(); nodes[node].heap.none(); if let Some(nb) = graph.neighbours(&node) { for (u, dist) in nb { let primnode = &mut nodes[*u]; if !primnode.heap.is_none() && *dist < primnode.dist { primnode.dist = *dist; primnode.parent = Some(node); pq.update_prio(&primnode.heap, primnode.dist); } } } len = pq.len(); } let mut rg = SimpleGraph::<W>::with_capacity(graph.n_nodes()); let mut dist = <W as Zero>::zero(); for node in nodes { if let Some(p) = node.parent { rg.add_weighted_edges(p, node.idx, node.dist); dist += node.dist; } } (rg, dist) } #[derive(Clone, Debug)] struct PrimNode<W> { idx: usize, parent: Option<usize>, heap: HeapElmt<usize, W>, dist: W, } impl<W> PrimNode<W> { pub fn new() -> Self where W: Bounded, { Self { idx: 0, parent: None, heap: HeapElmt::<usize, W>::default(), dist: <W as Bounded>::max_value(), } } }
27.871212
143
0.524259
61e52a3abd19cc0e11e2ad50d321c1ad5cc63593
11,572
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The implementation of `rtio` for libuv use std::c_str::CString; use std::mem; use libc::c_int; use libc::{O_CREAT, O_APPEND, O_TRUNC, O_RDWR, O_RDONLY, O_WRONLY, S_IRUSR, S_IWUSR}; use libc; use std::rt::rtio; use std::rt::rtio::{ProcessConfig, IoFactory, EventLoop, IoResult}; #[cfg(test)] use std::rt::thread::Thread; use super::{uv_error_to_io_error, Loop}; use addrinfo::GetAddrInfoRequest; use async::AsyncWatcher; use file::{FsRequest, FileWatcher}; use queue::QueuePool; use homing::HomeHandle; use idle::IdleWatcher; use net::{TcpWatcher, TcpListener, UdpWatcher}; use pipe::{PipeWatcher, PipeListener}; use process::Process; use signal::SignalWatcher; use timer::TimerWatcher; use tty::TtyWatcher; use uvll; // Obviously an Event Loop is always home. pub struct UvEventLoop { uvio: UvIoFactory } impl UvEventLoop { pub fn new() -> UvEventLoop { let mut loop_ = Loop::new(); let handle_pool = QueuePool::new(&mut loop_); UvEventLoop { uvio: UvIoFactory { loop_: loop_, handle_pool: Some(handle_pool), } } } } impl Drop for UvEventLoop { fn drop(&mut self) { // Must first destroy the pool of handles before we destroy the loop // because otherwise the contained async handle will be destroyed after // the loop is free'd (use-after-free). We also must free the uv handle // after the loop has been closed because during the closing of the loop // the handle is required to be used apparently. // // Lastly, after we've closed the pool of handles we pump the event loop // one last time to run any closing callbacks to make sure the loop // shuts down cleanly. let handle = self.uvio.handle_pool.get_ref().handle(); drop(self.uvio.handle_pool.take()); self.run(); self.uvio.loop_.close(); unsafe { uvll::free_handle(handle) } } } impl EventLoop for UvEventLoop { fn run(&mut self) { self.uvio.loop_.run(); } fn callback(&mut self, f: proc()) { IdleWatcher::onetime(&mut self.uvio.loop_, f); } fn pausable_idle_callback(&mut self, cb: Box<rtio::Callback + Send>) -> Box<rtio::PausableIdleCallback + Send> { IdleWatcher::new(&mut self.uvio.loop_, cb) as Box<rtio::PausableIdleCallback + Send> } fn remote_callback(&mut self, f: Box<rtio::Callback + Send>) -> Box<rtio::RemoteCallback + Send> { box AsyncWatcher::new(&mut self.uvio.loop_, f) as Box<rtio::RemoteCallback + Send> } fn io<'a>(&'a mut self) -> Option<&'a mut rtio::IoFactory> { let factory = &mut self.uvio as &mut rtio::IoFactory; Some(factory) } fn has_active_io(&self) -> bool { self.uvio.loop_.get_blockers() > 0 } } #[test] fn test_callback_run_once() { Thread::start(proc() { let mut event_loop = UvEventLoop::new(); let mut count = 0; let count_ptr: *mut int = &mut count; event_loop.callback(proc() { unsafe { *count_ptr += 1 } }); event_loop.run(); assert_eq!(count, 1); }).join(); } pub struct UvIoFactory { pub loop_: Loop, handle_pool: Option<Box<QueuePool>>, } impl UvIoFactory { pub fn uv_loop<'a>(&mut self) -> *mut uvll::uv_loop_t { self.loop_.handle } pub fn make_handle(&mut self) -> HomeHandle { // It's understood by the homing code that the "local id" is just the // pointer of the local I/O factory cast to a uint. let id: uint = unsafe { mem::transmute_copy(&self) }; HomeHandle::new(id, &mut **self.handle_pool.get_mut_ref()) } } impl IoFactory for UvIoFactory { // Connect to an address and return a new stream // NB: This blocks the task waiting on the connection. // It would probably be better to return a future fn tcp_connect(&mut self, addr: rtio::SocketAddr, timeout: Option<u64>) -> IoResult<Box<rtio::RtioTcpStream + Send>> { match TcpWatcher::connect(self, addr, timeout) { Ok(t) => Ok(box t as Box<rtio::RtioTcpStream + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } fn tcp_bind(&mut self, addr: rtio::SocketAddr) -> IoResult<Box<rtio::RtioTcpListener + Send>> { match TcpListener::bind(self, addr) { Ok(t) => Ok(t as Box<rtio::RtioTcpListener + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } fn udp_bind(&mut self, addr: rtio::SocketAddr) -> IoResult<Box<rtio::RtioUdpSocket + Send>> { match UdpWatcher::bind(self, addr) { Ok(u) => Ok(box u as Box<rtio::RtioUdpSocket + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } fn timer_init(&mut self) -> IoResult<Box<rtio::RtioTimer + Send>> { Ok(TimerWatcher::new(self) as Box<rtio::RtioTimer + Send>) } fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>, hint: Option<rtio::AddrinfoHint>) -> IoResult<Vec<rtio::AddrinfoInfo>> { let r = GetAddrInfoRequest::run(&self.loop_, host, servname, hint); r.map_err(uv_error_to_io_error) } fn fs_from_raw_fd(&mut self, fd: c_int, close: rtio::CloseBehavior) -> Box<rtio::RtioFileStream + Send> { box FileWatcher::new(self, fd, close) as Box<rtio::RtioFileStream + Send> } fn fs_open(&mut self, path: &CString, fm: rtio::FileMode, fa: rtio::FileAccess) -> IoResult<Box<rtio::RtioFileStream + Send>> { let flags = match fm { rtio::Open => 0, rtio::Append => libc::O_APPEND, rtio::Truncate => libc::O_TRUNC, }; // Opening with a write permission must silently create the file. let (flags, mode) = match fa { rtio::Read => (flags | libc::O_RDONLY, 0), rtio::Write => (flags | libc::O_WRONLY | libc::O_CREAT, libc::S_IRUSR | libc::S_IWUSR), rtio::ReadWrite => (flags | libc::O_RDWR | libc::O_CREAT, libc::S_IRUSR | libc::S_IWUSR), }; match FsRequest::open(self, path, flags as int, mode as int) { Ok(fs) => Ok(box fs as Box<rtio::RtioFileStream + Send>), Err(e) => Err(uv_error_to_io_error(e)) } } fn fs_unlink(&mut self, path: &CString) -> IoResult<()> { let r = FsRequest::unlink(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_lstat(&mut self, path: &CString) -> IoResult<rtio::FileStat> { let r = FsRequest::lstat(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_stat(&mut self, path: &CString) -> IoResult<rtio::FileStat> { let r = FsRequest::stat(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_mkdir(&mut self, path: &CString, perm: uint) -> IoResult<()> { let r = FsRequest::mkdir(&self.loop_, path, perm as c_int); r.map_err(uv_error_to_io_error) } fn fs_rmdir(&mut self, path: &CString) -> IoResult<()> { let r = FsRequest::rmdir(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()> { let r = FsRequest::rename(&self.loop_, path, to); r.map_err(uv_error_to_io_error) } fn fs_chmod(&mut self, path: &CString, perm: uint) -> IoResult<()> { let r = FsRequest::chmod(&self.loop_, path, perm as c_int); r.map_err(uv_error_to_io_error) } fn fs_readdir(&mut self, path: &CString, flags: c_int) -> IoResult<Vec<CString>> { let r = FsRequest::readdir(&self.loop_, path, flags); r.map_err(uv_error_to_io_error) } fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()> { let r = FsRequest::link(&self.loop_, src, dst); r.map_err(uv_error_to_io_error) } fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()> { let r = FsRequest::symlink(&self.loop_, src, dst); r.map_err(uv_error_to_io_error) } fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> IoResult<()> { let r = FsRequest::chown(&self.loop_, path, uid, gid); r.map_err(uv_error_to_io_error) } fn fs_readlink(&mut self, path: &CString) -> IoResult<CString> { let r = FsRequest::readlink(&self.loop_, path); r.map_err(uv_error_to_io_error) } fn fs_utime(&mut self, path: &CString, atime: u64, mtime: u64) -> IoResult<()> { let r = FsRequest::utime(&self.loop_, path, atime, mtime); r.map_err(uv_error_to_io_error) } fn spawn(&mut self, cfg: ProcessConfig) -> IoResult<(Box<rtio::RtioProcess + Send>, Vec<Option<Box<rtio::RtioPipe + Send>>>)> { match Process::spawn(self, cfg) { Ok((p, io)) => { Ok((p as Box<rtio::RtioProcess + Send>, io.move_iter().map(|i| i.map(|p| { box p as Box<rtio::RtioPipe + Send> })).collect())) } Err(e) => Err(uv_error_to_io_error(e)), } } fn kill(&mut self, pid: libc::pid_t, signum: int) -> IoResult<()> { Process::kill(pid, signum).map_err(uv_error_to_io_error) } fn unix_bind(&mut self, path: &CString) -> IoResult<Box<rtio::RtioUnixListener + Send>> { match PipeListener::bind(self, path) { Ok(p) => Ok(p as Box<rtio::RtioUnixListener + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } fn unix_connect(&mut self, path: &CString, timeout: Option<u64>) -> IoResult<Box<rtio::RtioPipe + Send>> { match PipeWatcher::connect(self, path, timeout) { Ok(p) => Ok(box p as Box<rtio::RtioPipe + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } fn tty_open(&mut self, fd: c_int, readable: bool) -> IoResult<Box<rtio::RtioTTY + Send>> { match TtyWatcher::new(self, fd, readable) { Ok(tty) => Ok(box tty as Box<rtio::RtioTTY + Send>), Err(e) => Err(uv_error_to_io_error(e)) } } fn pipe_open(&mut self, fd: c_int) -> IoResult<Box<rtio::RtioPipe + Send>> { match PipeWatcher::open(self, fd) { Ok(s) => Ok(box s as Box<rtio::RtioPipe + Send>), Err(e) => Err(uv_error_to_io_error(e)) } } fn signal(&mut self, signum: int, cb: Box<rtio::Callback + Send>) -> IoResult<Box<rtio::RtioSignal + Send>> { match SignalWatcher::new(self, signum, cb) { Ok(s) => Ok(s as Box<rtio::RtioSignal + Send>), Err(e) => Err(uv_error_to_io_error(e)), } } }
35.388379
80
0.581922
efb862e799498cd78b8e918ff97329f985a42238
5,242
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use async_trait::async_trait; use clap::App; use clap::Arg; use clap::ArgMatches; use crate::cmds::command::Command; use crate::cmds::completions::completion::CompletionCommand; use crate::cmds::config; use crate::cmds::config::Config; use crate::cmds::loads::load::LoadCommand; use crate::cmds::queries::query::QueryCommand; use crate::cmds::ups::up::UpCommand; use crate::cmds::ClusterCommand; use crate::cmds::PackageCommand; use crate::cmds::VersionCommand; use crate::cmds::Writer; use crate::error::Result; #[derive(Clone)] pub struct RootCommand {} impl RootCommand { pub fn create() -> Self { RootCommand {} } } #[async_trait] impl Command for RootCommand { fn name(&self) -> &str { "bendctl" } fn clap(&self) -> App<'static> { App::new("bendctl") .arg( Arg::new("group") .long("group") .about("Sets the group name for configuration") .default_value("local") .env("DATABEND_GROUP") .global(true) .takes_value(true), ) .arg( Arg::new("databend_dir") .long("databend_dir") .about("Sets the directory to store databend binaries(query and store)") .default_value("~/.databend") .env("databend_dir") .global(true) .takes_value(true) .value_hint(clap::ValueHint::DirPath), ) .arg( Arg::new("download_url") .long("download_url") .about("Sets the url to download databend binaries") .default_value(config::REPO_DATABEND_URL) .env("DOWNLOAD_URL") .global(true) .takes_value(true), ) .arg( Arg::new("tag_url") .long("tag_url") .about("Sets the url to for databend tags") .default_value(config::REPO_DATABEND_TAG_URL) .env("DOWNLOAD_URL") .global(true) .takes_value(true), ) .arg( Arg::new("validation_url") .long("validation_url") .about("Sets the url to validate on custom download network connection") .env("DOWNLOAD_VALIDATION_URL") .default_value(config::REPO_DATABEND_TAG_URL) .global(true) .takes_value(true), ) .arg( Arg::new("playground_url") .long("playground_url") .about("Sets the url to download databend playground") .env("DOWNLOAD_PLAYGROUND_URL") .default_value(config::REPO_PLAYGROUND_URL) .global(true) .takes_value(true), ) .arg( Arg::new("log-level") .long("log-level") .about("Sets the log-level for current settings") .env("BEND_LOG_LEVEL") .default_value("info") .global(true) .takes_value(true), ) .subcommand(CompletionCommand::default().clap()) .subcommand(PackageCommand::default().clap()) .subcommand(VersionCommand::default().clap()) .subcommand(ClusterCommand::default().clap()) .subcommand(QueryCommand::default().clap()) .subcommand(UpCommand::default().clap()) .subcommand(LoadCommand::default().clap()) } fn about(&self) -> &'static str { "Databend CLI" } fn subcommands(&self) -> Vec<Arc<dyn Command>> { let config = Config::create(self.clap()); vec![ Arc::new(PackageCommand::create(config.clone())), Arc::new(VersionCommand::create()), Arc::new(ClusterCommand::create(config.clone())), Arc::new(QueryCommand::create(config.clone())), Arc::new(LoadCommand::create(config.clone())), Arc::new(UpCommand::create(config)), Arc::new(CompletionCommand::create()), ] } fn is(&self, s: &str) -> bool { self.name() == s } async fn exec_matches(&self, writer: &mut Writer, args: Option<&ArgMatches>) -> Result<()> { self.exec_subcommand(writer, args).await } }
34.946667
96
0.528806
d98419d3130de872a7ad9f71fb5166e5e249e7cb
4,132
#![cfg(test)] #[macro_use] mod macros; // this is `1` for node-sass, but .999999etc for web compiler test!( precision_does_not_round_up, "a {\n color: 0.99999999991;\n}\n", "a {\n color: 0.9999999999;\n}\n" ); // this is `1` for node-sass, but .999999etc for web compiler test!( precision_does_round_up, "a {\n color: 1.00000000009;\n}\n", "a {\n color: 1.0000000001;\n}\n" ); test!( many_nines_becomes_one, "a {\n color: 0.9999999999999999;\n}\n", "a {\n color: 1;\n}\n" ); test!( many_nines_becomes_one_neg, "a {\n color: -0.9999999999999999;\n}\n", "a {\n color: -1;\n}\n" ); test!( negative_zero, "a {\n color: -0;\n}\n", "a {\n color: 0;\n}\n" ); test!( decimal_is_zero, "a {\n color: 1.0000;\n}\n", "a {\n color: 1;\n}\n" ); test!(many_nines_not_rounded, "a {\n color: 0.999999;\n}\n"); test!(positive_integer, "a {\n color: 1;\n}\n"); test!(negative_integer, "a {\n color: -1;\n}\n"); test!( positive_float_no_leading_zero, "a {\n color: .1;\n}\n", "a {\n color: 0.1;\n}\n" ); test!( negative_float_no_leading_zero, "a {\n color: -.1;\n}\n", "a {\n color: -0.1;\n}\n" ); test!(positive_float_leading_zero, "a {\n color: 0.1;\n}\n"); test!(negative_float_leading_zero, "a {\n color: -0.1;\n}\n"); test!( num_plus_div, "a {\n color: 1 + 3/4;\n}\n", "a {\n color: 1.75;\n}\n" ); test!( negative_near_zero_no_sign, "a {\n color: -0.000000000001;\n}\n", "a {\n color: 0;\n}\n" ); test!( equality_unit_conversions, "a {\n color: 1in == 96px;\n}\n", "a {\n color: true;\n}\n" ); test!( positive_scientific_notation, "a {\n color: 1e5;\n}\n", "a {\n color: 100000;\n}\n" ); test!( positive_scientific_notation_leading_zeroes, "a {\n color: 1e05;\n}\n", "a {\n color: 100000;\n}\n" ); test!( positive_scientific_notation_capital, "a {\n color: 1E5;\n}\n", "a {\n color: 100000;\n}\n" ); test!( negative_scientific_notation, "a {\n color: 1e-5;\n}\n", "a {\n color: 0.00001;\n}\n" ); test!( negative_scientific_notation_leading_zeroes, "a {\n color: 1e-05;\n}\n", "a {\n color: 0.00001;\n}\n" ); test!( negative_scientific_notation_capital, "a {\n color: 1E-5;\n}\n", "a {\n color: 0.00001;\n}\n" ); test!( positive_scientific_notation_decimal, "a {\n color: 1.2e5;\n}\n", "a {\n color: 120000;\n}\n" ); test!( negative_scientific_notation_decimal, "a {\n color: 1.2e-5;\n}\n", "a {\n color: 0.000012;\n}\n" ); test!(unit_e, "a {\n color: 1e;\n}\n"); test!( positive_scientific_notation_zero, "a {\n color: 1e0;\n}\n", "a {\n color: 1;\n}\n" ); test!( negative_scientific_notation_zero, "a {\n color: 1e-0;\n}\n", "a {\n color: 1;\n}\n" ); test!( scientific_notation_decimal, "a {\n color: 1.2e5.5;\n}\n", "a {\n color: 120000 0.5;\n}\n" ); test!( binary_op_with_e_as_unit, "a {\n color: 1e - 2;\n}\n", "a {\n color: -1e;\n}\n" ); error!( scientific_notation_nothing_after_dash_in_style, "a {\n color: 1e-;\n}\n", "Error: Expected digit." ); error!( scientific_notation_nothing_after_dash, "a {\n color: 1e-", "Error: Expected digit." ); error!( scientific_notation_whitespace_after_dash, "a {\n color: 1e- 2;\n}\n", "Error: Expected digit." ); error!( scientific_notation_ident_char_after_dash, "a {\n color: 1e-a;\n}\n", "Error: Expected digit." ); test!( number_overflow_from_addition, "a {\n color: 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999 + 999999999999999999;\n}\n", "a {\n color: 9999999999999999990;\n}\n" ); test!( number_overflow_from_multiplication, "a {\n color: 999999999999999999 * 10;\n}\n", "a {\n color: 9999999999999999990;\n}\n" );
25.195122
63
0.570426
ff5b9f80a60c17646d7519c09f5541ede26228f8
3,699
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::ffi::CString; use std::fmt; use super::JSString; impl JSString { /// Convert this `JSString` to a `String`. pub fn to_string(&self) -> String { unsafe { let max_size = ultralight_sys::JSStringGetMaximumUTF8CStringSize(self.raw); let mut buffer: Vec<u8> = Vec::with_capacity(max_size as usize); let actual_size = ultralight_sys::JSStringGetUTF8CString( self.raw, buffer.as_mut_ptr() as *mut ::std::os::raw::c_char, max_size, ); buffer.set_len((actual_size - 1) as usize); String::from_utf8(buffer).unwrap() } } } impl fmt::Debug for JSString { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "JSString {{ \"{}\" }}", self.to_string()) } } impl Drop for JSString { fn drop(&mut self) { unsafe { ultralight_sys::JSStringRelease(self.raw) } } } impl PartialEq for JSString { fn eq(&self, other: &JSString) -> bool { unsafe { ultralight_sys::JSStringIsEqual(self.raw, other.raw) } } } impl<'s> PartialEq<&'s str> for JSString { fn eq(&self, other: &&'s str) -> bool { let utf8 = CString::new(other.as_bytes()).unwrap(); unsafe { ultralight_sys::JSStringIsEqualToUTF8CString(self.raw, utf8.as_ptr()) } } } impl PartialEq<String> for JSString { fn eq(&self, other: &String) -> bool { let utf8 = CString::new(other.as_bytes()).unwrap(); unsafe { ultralight_sys::JSStringIsEqualToUTF8CString(self.raw, utf8.as_ptr()) } } } impl<'s> PartialEq<JSString> for &'s str { fn eq(&self, other: &JSString) -> bool { let utf8 = CString::new(self.as_bytes()).unwrap(); unsafe { ultralight_sys::JSStringIsEqualToUTF8CString(other.raw, utf8.as_ptr()) } } } impl PartialEq<JSString> for String { fn eq(&self, other: &JSString) -> bool { let utf8 = CString::new(self.as_bytes()).unwrap(); unsafe { ultralight_sys::JSStringIsEqualToUTF8CString(other.raw, utf8.as_ptr()) } } } impl<'s> From<&'s str> for JSString { fn from(s: &'s str) -> Self { let c = CString::new(s.as_bytes()).unwrap(); JSString { raw: unsafe { ultralight_sys::JSStringCreateWithUTF8CString(c.as_ptr()) }, } } } impl From<String> for JSString { fn from(s: String) -> Self { let c = CString::new(s.as_bytes()).unwrap(); JSString { raw: unsafe { ultralight_sys::JSStringCreateWithUTF8CString(c.as_ptr()) }, } } } impl<'s> From<&'s JSString> for String { fn from(s: &'s JSString) -> Self { s.to_string() } } #[cfg(test)] mod tests { use super::JSString; #[test] fn from_conversion() { let a: JSString = "abc".into(); let b: JSString = "abc".to_owned().into(); assert_eq!(a, a); assert_eq!(a, b); assert_eq!(b, b); let c: JSString = "def".into(); assert_ne!(a, c); let d: JSString = "abcdef".into(); assert_ne!(a, d); let e: String = (&d).into(); assert_eq!(e, "abcdef"); } #[test] fn equality() { let a: JSString = "abc".into(); let s: String = "abc".to_owned(); assert_eq!(a, "abc"); assert_eq!(a, s); assert_eq!("abc", a); assert_eq!(s, a); } }
27.81203
89
0.578535
647ab0540bbd08903da820b4837dd1f1c6ddd372
66,929
//! # BTC-Relay Module //! Based on the [specification](https://interlay.gitlab.io/polkabtc-spec/btcrelay-spec/). #![deny(warnings)] #![cfg_attr(test, feature(proc_macro_hygiene))] #![cfg_attr(not(feature = "std"), no_std)] mod ext; mod types; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; mod default_weights; pub use default_weights::WeightInfo; #[cfg(test)] mod tests; #[cfg(test)] mod mock; #[cfg(test)] extern crate mocktopus; #[cfg(test)] use mocktopus::macros::mockable; use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, runtime_print, transactional, }; use frame_system::{ensure_root, ensure_signed}; use sp_core::{H256, U256}; use sp_std::{collections::btree_set::BTreeSet, prelude::*}; // Crates pub use bitcoin::{self, Address as BtcAddress, PublicKey as BtcPublicKey}; use bitcoin::{ merkle::{MerkleProof, ProofResult}, parser::{parse_block_header, parse_transaction}, types::{BlockChain, BlockHeader, H256Le, RawBlockHeader, Transaction, TransactionOutput}, Error as BitcoinError, }; use security::types::ErrorCode; pub use types::RichBlockHeader; pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(trait Store)] pub struct Pallet<T>(_); #[pallet::config] pub trait Config: frame_system::Config + security::Config { /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as frame_system::Config>::Event> + IsType<<Self as frame_system::Config>::Event>; /// Weight information for the extrinsics in this module. type WeightInfo: WeightInfo; } #[pallet::hooks] impl<T: Config> Hooks<T::BlockNumber> for Pallet<T> {} #[pallet::call] impl<T: Config> Pallet<T> { /// Verifies the inclusion of `tx_id` into the relay, and validates the given raw Bitcoin transaction, according /// to the supported transaction format (see <https://interlay.gitlab.io/polkabtc-spec/btcrelay-spec/intro/accepted-format.html>) /// /// # Arguments /// /// * `raw_merkle_proof` - The raw merkle proof as returned by bitcoin `gettxoutproof` /// * `confirmations` - The number of confirmations needed to accept the proof. If `none`, the value stored in /// the StableBitcoinConfirmations storage item is used. /// * `raw_tx` - raw Bitcoin transaction /// * `minimum_btc` - minimum amount of BTC (satoshis) sent to the recipient /// * `recipient_btc_address` - 20 byte Bitcoin address of recipient of the BTC in the 1st / payment UTXO /// * `op_return_id` - 32 byte hash identifier expected in OP_RETURN (replay protection) #[pallet::weight(<T as Config>::WeightInfo::verify_and_validate_transaction())] #[transactional] pub fn verify_and_validate_transaction( origin: OriginFor<T>, raw_merkle_proof: Vec<u8>, confirmations: Option<u32>, raw_tx: Vec<u8>, minimum_btc: i64, recipient_btc_address: BtcAddress, op_return_id: Option<H256>, ) -> DispatchResultWithPostInfo { ext::security::ensure_parachain_status_not_shutdown::<T>()?; let _ = ensure_signed(origin)?; Self::_verify_and_validate_transaction( raw_merkle_proof, raw_tx, recipient_btc_address, Some(minimum_btc), op_return_id, confirmations, )?; Ok(().into()) } /// Verifies the inclusion of `tx_id` /// /// # Arguments /// /// * `tx_id` - The hash of the transaction to check for /// * `raw_merkle_proof` - The raw merkle proof as returned by bitcoin `gettxoutproof` /// * `confirmations` - The number of confirmations needed to accept the proof. If `none`, the value stored in /// the `StableBitcoinConfirmations` storage item is used. /// /// # <weight> /// Key: C (len of chains), P (len of positions) /// - Storage Reads: /// - One storage read to check if inclusion check is disabled. O(1) /// - One storage read to retrieve best block height. O(1) /// - One storage read to check if transaction is in active fork. O(1) /// - One storage read to retrieve block header. O(1) /// - One storage read to check that parachain is not shutdown. O(1) /// - One storage read to check stable bitcoin confirmations. O(1) /// - One storage read to check stable parachain confirmations. O(1) /// # </weight> #[pallet::weight(<T as Config>::WeightInfo::verify_transaction_inclusion())] #[transactional] pub fn verify_transaction_inclusion( origin: OriginFor<T>, tx_id: H256Le, raw_merkle_proof: Vec<u8>, confirmations: Option<u32>, ) -> DispatchResultWithPostInfo { ext::security::ensure_parachain_status_not_shutdown::<T>()?; let _ = ensure_signed(origin)?; Self::_verify_transaction_inclusion(tx_id, raw_merkle_proof, confirmations)?; Ok(().into()) } /// Validates a given raw Bitcoin transaction, according to the supported transaction /// format (see <https://interlay.gitlab.io/polkabtc-spec/btcrelay-spec/intro/accepted-format.html>) /// This DOES NOT check if the transaction is included in a block, nor does it guarantee that the /// transaction is fully valid according to the consensus (needs full node). /// /// # Arguments /// * `raw_tx` - raw Bitcoin transaction /// * `minimum_btc` - minimum amount of BTC (satoshis) sent to the recipient /// * `recipient_btc_address` - expected Bitcoin address of recipient (p2sh, p2pkh, p2wpkh) /// * `op_return_id` - 32 byte hash identifier expected in OP_RETURN (replay protection) #[pallet::weight(<T as Config>::WeightInfo::validate_transaction())] #[transactional] pub fn validate_transaction( origin: OriginFor<T>, raw_tx: Vec<u8>, minimum_btc: i64, recipient_btc_address: BtcAddress, op_return_id: Option<H256>, ) -> DispatchResultWithPostInfo { ext::security::ensure_parachain_status_not_shutdown::<T>()?; let _ = ensure_signed(origin)?; let transaction = Self::parse_transaction(&raw_tx)?; Self::_validate_transaction( transaction, recipient_btc_address, Some(minimum_btc), op_return_id.map(|x| x.as_bytes().to_vec()), )?; Ok(().into()) } /// Insert an error at the specified block. /// /// # Arguments /// /// * `origin` - the dispatch origin of this call (must be _Root_) /// * `block_hash` - the hash of the bitcoin block /// * `error` - the error code to insert /// /// # Weight: `O(1)` #[pallet::weight(0)] #[transactional] pub fn insert_block_error( origin: OriginFor<T>, block_hash: H256Le, error: ErrorCode, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; Self::flag_block_error(block_hash, error)?; Ok(().into()) } /// Remove an error from the specified block. /// /// # Arguments /// /// * `origin` - the dispatch origin of this call (must be _Root_) /// * `block_hash` - the hash of the bitcoin block /// * `error` - the error code to remove /// /// # Weight: `O(1)` #[pallet::weight(0)] #[transactional] pub fn remove_block_error( origin: OriginFor<T>, block_hash: H256Le, error: ErrorCode, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; Self::clear_block_error(block_hash, error)?; Ok(().into()) } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] #[pallet::metadata(T::AccountId = "AccountId")] pub enum Event<T: Config> { /// block_height, block_header_hash, relayer_id Initialized(u32, H256Le, T::AccountId), /// new chain height, block_header_hash, relayer_id StoreMainChainHeader(u32, H256Le, T::AccountId), /// chain_id, fork height, block_header_hash, relayer_id StoreForkHeader(u32, u32, H256Le, T::AccountId), /// new_chain_tip, chain height, fork_depth ChainReorg(H256Le, u32, u32), /// main chain height, fork height, fork id ForkAheadOfMainChain(u32, u32, u32), /// block_hash, chain_id, error FlagBlockError(H256Le, u32, ErrorCode), /// block_hash, chain_id, error ClearBlockError(H256Le, u32, ErrorCode), } #[pallet::error] pub enum Error<T> { /// Already initialized AlreadyInitialized, /// Missing the block at this height MissingBlockHeight, /// Invalid block header size InvalidHeaderSize, /// Block already stored DuplicateBlock, /// Block already stored and is not head OutdatedBlock, /// Previous block hash not found PrevBlock, /// Invalid chain ID InvalidChainID, /// PoW hash does not meet difficulty target of header LowDiff, /// Incorrect difficulty target specified in block header DiffTargetHeader, /// Malformed transaction identifier MalformedTxid, /// Transaction has less confirmations of Bitcoin blocks than required BitcoinConfirmations, /// Transaction has less confirmations of Parachain blocks than required ParachainConfirmations, /// Current fork ongoing OngoingFork, /// Merkle proof is malformed MalformedMerkleProof, /// Invalid merkle proof InvalidMerkleProof, /// Feature disabled. Reason: a main chain block with a lower height is flagged with NO_DATA. NoData, /// Feature disabled. Reason: a main chain block is flagged as INVALID. Invalid, /// BTC Parachain has shut down Shutdown, /// Transaction hash does not match given txid InvalidTxid, /// Value of payment below requested amount InsufficientValue, /// Transaction has incorrect format MalformedTransaction, /// Incorrect recipient Bitcoin address InvalidPayment, /// Incorrect transaction output format InvalidOutputFormat, /// Incorrect identifier in OP_RETURN field InvalidOpReturn, /// Invalid transaction version InvalidTxVersion, /// Error code not applicable to blocks UnknownErrorcode, /// Blockchain with requested ID not found ForkIdNotFound, /// Block header not found for given hash BlockNotFound, /// Error code already reported AlreadyReported, /// Unauthorized staked relayer UnauthorizedRelayer, /// Overflow of chain counter ChainCounterOverflow, /// Overflow of block height BlockHeightOverflow, /// Underflow of stored blockchains counter ChainsUnderflow, /// EndOfFile reached while parsing EndOfFile, /// Format of the header is invalid MalformedHeader, /// Format of the BIP141 witness transaction output is invalid MalformedWitnessOutput, // Format of the P2PKH transaction output is invalid MalformedP2PKHOutput, // Format of the P2SH transaction output is invalid MalformedP2SHOutput, /// Format of the OP_RETURN transaction output is invalid MalformedOpReturnOutput, // Output does not match format of supported output types (Witness, P2PKH, P2SH) UnsupportedOutputFormat, // Input does not match format of supported input types (Witness, P2PKH, P2SH) UnsupportedInputFormat, /// There are no NO_DATA blocks in this BlockChain NoDataEmpty, /// User supplied an invalid address InvalidBtcHash, /// User supplied an invalid script InvalidScript, /// Specified invalid Bitcoin address InvalidBtcAddress, /// Arithmetic overflow ArithmeticOverflow, /// Arithmetic underflow ArithmeticUnderflow, /// Relayer is not registered RelayerNotAuthorized, } /// Store Bitcoin block headers #[pallet::storage] pub(super) type BlockHeaders<T: Config> = StorageMap<_, Blake2_128Concat, H256Le, RichBlockHeader<T::AccountId, T::BlockNumber>, ValueQuery>; /// Priority queue of BlockChain elements, ordered by the maximum height (descending). /// The first index into this mapping (0) is considered to be the longest chain. The value /// of the entry is the index into `ChainsIndex` to retrieve the `BlockChain`. #[pallet::storage] pub(super) type Chains<T: Config> = StorageMap<_, Blake2_128Concat, u32, u32>; /// Auxiliary mapping of chains ids to `BlockChain` entries. The first index into this /// mapping (0) is considered to be the Bitcoin main chain. #[pallet::storage] pub(super) type ChainsIndex<T: Config> = StorageMap<_, Blake2_128Concat, u32, BlockChain>; /// Stores a mapping from (chain_index, block_height) to block hash #[pallet::storage] pub(super) type ChainsHashes<T: Config> = StorageDoubleMap<_, Blake2_128Concat, u32, Blake2_128Concat, u32, H256Le, ValueQuery>; /// Store the current blockchain tip #[pallet::storage] pub(super) type BestBlock<T: Config> = StorageValue<_, H256Le, ValueQuery>; /// Store the height of the best block #[pallet::storage] pub(super) type BestBlockHeight<T: Config> = StorageValue<_, u32, ValueQuery>; /// BTC height when the relay was initialized #[pallet::storage] pub(super) type StartBlockHeight<T: Config> = StorageValue<_, u32, ValueQuery>; /// Increment-only counter used to track new BlockChain entries #[pallet::storage] pub(super) type ChainCounter<T: Config> = StorageValue<_, u32, ValueQuery>; /// Global security parameter k for stable Bitcoin transactions #[pallet::storage] #[pallet::getter(fn bitcoin_confirmations)] pub(super) type StableBitcoinConfirmations<T: Config> = StorageValue<_, u32, ValueQuery>; /// Global security parameter k for stable Parachain transactions #[pallet::storage] #[pallet::getter(fn parachain_confirmations)] pub(super) type StableParachainConfirmations<T: Config> = StorageValue<_, T::BlockNumber, ValueQuery>; /// Whether the module should perform difficulty checks. #[pallet::storage] #[pallet::getter(fn disable_difficulty_check)] pub(super) type DisableDifficultyCheck<T: Config> = StorageValue<_, bool, ValueQuery>; /// Whether the module should perform inclusion checks. #[pallet::storage] #[pallet::getter(fn disable_inclusion_check)] pub(super) type DisableInclusionCheck<T: Config> = StorageValue<_, bool, ValueQuery>; /// Whether the module should perform OP_RETURN checks. #[pallet::storage] #[pallet::getter(fn disable_op_return_check)] pub(super) type DisableOpReturnCheck<T: Config> = StorageValue<_, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig<T: Config> { /// Global security parameter k for stable Bitcoin transactions pub bitcoin_confirmations: u32, /// Global security parameter k for stable Parachain transactions pub parachain_confirmations: T::BlockNumber, /// Whether the module should perform difficulty checks. pub disable_difficulty_check: bool, /// Whether the module should perform inclusion checks. pub disable_inclusion_check: bool, /// Whether the module should perform OP_RETURN checks. pub disable_op_return_check: bool, } #[cfg(feature = "std")] impl<T: Config> Default for GenesisConfig<T> { fn default() -> Self { Self { bitcoin_confirmations: Default::default(), parachain_confirmations: Default::default(), disable_difficulty_check: Default::default(), disable_inclusion_check: Default::default(), disable_op_return_check: Default::default(), } } } #[pallet::genesis_build] impl<T: Config> GenesisBuild<T> for GenesisConfig<T> { fn build(&self) { StableBitcoinConfirmations::<T>::put(self.bitcoin_confirmations); StableParachainConfirmations::<T>::put(self.parachain_confirmations); DisableDifficultyCheck::<T>::put(self.disable_difficulty_check); DisableInclusionCheck::<T>::put(self.disable_inclusion_check); DisableOpReturnCheck::<T>::put(self.disable_op_return_check); } } } /// Difficulty Adjustment Interval pub const DIFFICULTY_ADJUSTMENT_INTERVAL: u32 = 2016; /// Target Spacing: 10 minutes (600 seconds) // https://github.com/bitcoin/bitcoin/blob/5ba5becbb5d8c794efe579caeea7eea64f895a13/src/chainparams.cpp#L78 pub const TARGET_SPACING: u32 = 10 * 60; /// Target Timespan: 2 weeks (1209600 seconds) // https://github.com/bitcoin/bitcoin/blob/5ba5becbb5d8c794efe579caeea7eea64f895a13/src/chainparams.cpp#L77 pub const TARGET_TIMESPAN: u32 = 14 * 24 * 60 * 60; // Used in Bitcoin's retarget algorithm pub const TARGET_TIMESPAN_DIVISOR: u32 = 4; // Accepted minimum number of transaction outputs for okd validation pub const ACCEPTED_MIN_TRANSACTION_OUTPUTS: u32 = 1; // Accepted minimum number of transaction outputs for op-return validation pub const ACCEPTED_MIN_TRANSACTION_OUTPUTS_WITH_OP_RETURN: u32 = 2; // Accepted maximum number of transaction outputs for validation pub const ACCEPTED_MAX_TRANSACTION_OUTPUTS: u32 = 32; /// Unrounded Maximum Target /// 0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF pub const UNROUNDED_MAX_TARGET: U256 = U256([ <u64>::max_value(), <u64>::max_value(), <u64>::max_value(), 0x0000_0000_ffff_ffffu64, ]); /// Main chain id pub const MAIN_CHAIN_ID: u32 = 0; /// Number of outputs expected in the accepted transaction format /// See: <https://interlay.gitlab.io/polkabtc-spec/btcrelay-spec/intro/accepted-format.html> pub const ACCEPTED_NO_TRANSACTION_OUTPUTS: u32 = 2; macro_rules! extract_op_return { ($($tx:expr),*) => { { $( if let Some(Ok(data)) = $tx.map(|tx| tx.script.extract_op_return_data()) { data } else )* { return None; } } }; } fn maybe_get_payment_value(output: &TransactionOutput, recipient_btc_address: &BtcAddress) -> Option<i64> { match output.extract_address() { Ok(extr_recipient_btc_address) => { if *recipient_btc_address == extr_recipient_btc_address { Some(output.value) } else { None } } _ => None, } } #[cfg_attr(test, mockable)] impl<T: Config> Pallet<T> { pub fn initialize(relayer: T::AccountId, raw_block_header: RawBlockHeader, block_height: u32) -> DispatchResult { // Check if BTC-Relay was already initialized ensure!(!Self::best_block_exists(), Error::<T>::AlreadyInitialized); // Parse the block header bytes to extract the required info let basic_block_header = parse_block_header(&raw_block_header).map_err(Error::<T>::from)?; let block_header_hash = raw_block_header.hash(); // register the current height to track stable parachain confirmations let para_height = ext::security::active_block_number::<T>(); // construct the BlockChain struct let blockchain = Self::initialize_blockchain(block_height, block_header_hash); // Create rich block header let block_header = RichBlockHeader::<T::AccountId, T::BlockNumber> { block_hash: block_header_hash, block_header: basic_block_header, block_height, chain_ref: blockchain.chain_id, account_id: relayer.clone(), para_height, }; // Store a new BlockHeader struct in BlockHeaders Self::set_block_header_from_hash(block_header_hash, &block_header); // Store a pointer to BlockChain in ChainsIndex Self::set_block_chain_from_id(MAIN_CHAIN_ID, &blockchain); // Store the reference to the new BlockChain in Chains Self::set_chain_from_position_and_id(0, MAIN_CHAIN_ID); // Set BestBlock and BestBlockHeight to the submitted block Self::set_best_block(block_header_hash); Self::set_best_block_height(block_height); StartBlockHeight::<T>::set(block_height); // Emit a Initialized Event Self::deposit_event(<Event<T>>::Initialized(block_height, block_header_hash, relayer)); Ok(()) } /// wraps _store_block_header, but differentiates between DuplicateError and OutdatedError #[transactional] pub fn store_block_header(relayer: &T::AccountId, raw_block_header: RawBlockHeader) -> DispatchResult { let ret = Self::_store_block_header(relayer, raw_block_header); if let Err(err) = ret { if err == DispatchError::from(Error::<T>::DuplicateBlock) { // if this is not the chain head, return OutdatedBlock error let this_header_hash = raw_block_header.hash(); let best_header_hash = Self::get_best_block(); ensure!(this_header_hash == best_header_hash, Error::<T>::OutdatedBlock); } } ret } fn _store_block_header(relayer: &T::AccountId, raw_block_header: RawBlockHeader) -> DispatchResult { // Make sure Parachain is not shutdown ext::security::ensure_parachain_status_not_shutdown::<T>()?; // Parse the block header bytes to extract the required info let basic_block_header = Self::verify_block_header(&raw_block_header)?; let block_header_hash = raw_block_header.hash(); let prev_header = Self::get_block_header_from_hash(basic_block_header.hash_prev_block)?; // get the block chain of the previous header let prev_blockchain = Self::get_block_chain_from_id(prev_header.chain_ref)?; // Update the current block header // check if the prev block is the highest block in the chain // load the previous block header block height let prev_block_height = prev_header.block_height; // update the current block header with height and chain ref // Set the height of the block header let current_block_height = prev_block_height + 1; // Update the blockchain // check if we create a new blockchain or extend the existing one runtime_print!("Prev max height: {:?}", prev_blockchain.max_height); runtime_print!("Prev block height: {:?}", prev_block_height); let is_fork = prev_blockchain.max_height != prev_block_height; runtime_print!("Fork detected: {:?}", is_fork); let blockchain = if is_fork { // create new blockchain element Self::create_blockchain(current_block_height, block_header_hash) } else { // extend the current chain Self::extend_blockchain(current_block_height, &block_header_hash, prev_blockchain)? }; // register the current height to track stable parachain confirmations let para_height = ext::security::active_block_number::<T>(); // Create rich block header let block_header = RichBlockHeader::<T::AccountId, T::BlockNumber> { block_hash: block_header_hash, block_header: basic_block_header, block_height: current_block_height, chain_ref: blockchain.chain_id, account_id: relayer.clone(), para_height, }; // Store a new BlockHeader struct in BlockHeaders Self::set_block_header_from_hash(block_header_hash, &block_header); // Storing the blockchain depends if we extend or create a new chain if is_fork { // create a new chain // Store a pointer to BlockChain in ChainsIndex Self::set_block_chain_from_id(blockchain.chain_id, &blockchain); // Store the reference to the blockchain in Chains Self::insert_sorted(&blockchain)?; } else { // extended the chain // Update the pointer to BlockChain in ChainsIndex ChainsIndex::<T>::mutate(blockchain.chain_id, |_b| &blockchain); // check if ordering of Chains needs updating Self::check_and_do_reorg(&blockchain)?; if blockchain.chain_id == MAIN_CHAIN_ID { Self::set_best_block(block_header_hash); Self::set_best_block_height(current_block_height) } }; // Determine if this block extends the main chain or a fork let current_best_block = Self::get_best_block(); if current_best_block == block_header_hash { // extends the main chain Self::deposit_event(<Event<T>>::StoreMainChainHeader( current_block_height, block_header_hash, relayer.clone(), )); } else { // created a new fork or updated an existing one Self::deposit_event(<Event<T>>::StoreForkHeader( blockchain.chain_id, current_block_height, block_header_hash, relayer.clone(), )); }; Ok(()) } pub fn _verify_and_validate_transaction( raw_merkle_proof: Vec<u8>, raw_tx: Vec<u8>, recipient_btc_address: BtcAddress, minimum_btc: Option<i64>, op_return_id: Option<H256>, confirmations: Option<u32>, ) -> Result<(BtcAddress, i64), DispatchError> { let transaction = Self::parse_transaction(&raw_tx)?; // Verify that the transaction is indeed included in the main chain Self::_verify_transaction_inclusion(transaction.tx_id(), raw_merkle_proof, confirmations)?; // Parse transaction and check that it matches the given parameters Self::_validate_transaction( transaction, recipient_btc_address, minimum_btc, op_return_id.map(|x| x.as_bytes().to_vec()), ) } pub fn _verify_transaction_inclusion( tx_id: H256Le, raw_merkle_proof: Vec<u8>, confirmations: Option<u32>, ) -> Result<(), DispatchError> { if Self::disable_inclusion_check() { return Ok(()); } let best_block_height = Self::get_best_block_height(); Self::ensure_no_ongoing_fork(best_block_height)?; let merkle_proof = Self::parse_merkle_proof(&raw_merkle_proof)?; let rich_header = Self::get_block_header_from_hash(merkle_proof.block_header.hash().map_err(Error::<T>::from)?)?; ensure!(rich_header.chain_ref == MAIN_CHAIN_ID, Error::<T>::InvalidChainID); let block_height = rich_header.block_height; Self::transaction_verification_allowed(block_height)?; // This call fails if not enough confirmations Self::check_bitcoin_confirmations(best_block_height, confirmations, block_height)?; // This call fails if the block was stored too recently Self::check_parachain_confirmations(rich_header.para_height)?; let proof_result = Self::verify_merkle_proof(&merkle_proof)?; // fail if the transaction hash is invalid ensure!(proof_result.transaction_hash == tx_id, Error::<T>::InvalidTxid); // fail if the merkle root is invalid ensure!( proof_result.extracted_root == rich_header.block_header.merkle_root, Error::<T>::InvalidMerkleProof ); Ok(()) } /// Extract all payments and op_return outputs from a transaction. /// Rejects transactions with too many outputs. /// /// # Arguments /// /// * `transaction` - Bitcoin transaction pub fn extract_outputs( transaction: Transaction, ) -> Result<(Vec<(i64, BtcAddress)>, Vec<(i64, Vec<u8>)>), Error<T>> { ensure!( transaction.outputs.len() <= ACCEPTED_MAX_TRANSACTION_OUTPUTS as usize, Error::<T>::MalformedTransaction ); let mut payments = Vec::new(); let mut op_returns = Vec::new(); for tx in transaction.outputs { if let Ok(address) = tx.extract_address() { payments.push((tx.value, address)); } else if let Ok(data) = tx.script.extract_op_return_data() { op_returns.push((tx.value, data)); } } Ok((payments, op_returns)) } /// Extract the payment value from the first output with an address /// that matches the `recipient_btc_address`. /// /// # Arguments /// /// * `transaction` - Bitcoin transaction /// * `recipient_btc_address` - expected payment recipient fn extract_payment_value( transaction: Transaction, recipient_btc_address: BtcAddress, ) -> Result<i64, DispatchError> { ensure!( // We would typically expect two outputs here (payment, refund) but // the input amount may be exact so we would only require one transaction.outputs.len() >= ACCEPTED_MIN_TRANSACTION_OUTPUTS as usize, Error::<T>::MalformedTransaction ); // Check if payment is first output let output0 = transaction .outputs .get(0) .and_then(|output| maybe_get_payment_value(output, &recipient_btc_address)); // Check if payment is second output let output1 = transaction .outputs .get(1) .and_then(|output| maybe_get_payment_value(output, &recipient_btc_address)); // Check if payment is third output let output2 = transaction .outputs .get(2) .and_then(|output| maybe_get_payment_value(output, &recipient_btc_address)); match (output0, output1, output2) { (Some(o), None, None) | (None, Some(o), None) | (None, None, Some(o)) => Ok(o), // Payment UTXO sends to an incorrect address // OR contains a duplicate recipient _ => Err(Error::<T>::InvalidPayment.into()), } } /// Extract the payment value and `OP_RETURN` payload from the first /// output with an address that matches the `recipient_btc_address`. /// /// # Arguments /// /// * `transaction` - Bitcoin transaction /// * `recipient_btc_address` - expected payment recipient fn extract_payment_value_and_op_return( transaction: Transaction, recipient_btc_address: BtcAddress, ) -> Result<(i64, Vec<u8>), DispatchError> { ensure!( // We would typically expect three outputs (payment, op_return, refund) but // exceptionally the input amount may be exact so we would only require two transaction.outputs.len() >= ACCEPTED_MIN_TRANSACTION_OUTPUTS_WITH_OP_RETURN as usize, Error::<T>::MalformedTransaction ); // Check if payment is first output let output0 = transaction .outputs .get(0) .filter(|output| matches!(output.extract_address(), Ok(address) if address == recipient_btc_address)) .and_then(|output| { Some(( output.value, extract_op_return!(transaction.outputs.get(1), transaction.outputs.get(2)), )) }); // Check if payment is second output let output1 = transaction .outputs .get(1) .filter(|output| matches!(output.extract_address(), Ok(address) if address == recipient_btc_address)) .and_then(|output| { Some(( output.value, extract_op_return!(transaction.outputs.get(0), transaction.outputs.get(2)), )) }); // Check if payment is third output let output2 = transaction .outputs .get(2) .filter(|output| matches!(output.extract_address(), Ok(address) if address == recipient_btc_address)) .and_then(|output| { Some(( output.value, extract_op_return!(transaction.outputs.get(0), transaction.outputs.get(1)), )) }); match (output0, output1, output2) { (Some(o), None, None) | (None, Some(o), None) | (None, None, Some(o)) => Ok(o), // Payment UTXO sends to an incorrect address // OR contains a duplicate recipient // OR does not contain an OP_RETURN output _ => Err(Error::<T>::InvalidPayment.into()), } } pub fn is_op_return_disabled() -> bool { Self::disable_op_return_check() } /// Checks if transaction is valid. If so, it returns the first origin address, which can be /// use as the destination address for a potential refund, and the payment value fn _validate_transaction( transaction: Transaction, recipient_btc_address: BtcAddress, minimum_btc: Option<i64>, op_return_id: Option<Vec<u8>>, ) -> Result<(BtcAddress, i64), DispatchError> { let input_address = transaction .inputs .get(0) .ok_or(Error::<T>::MalformedTransaction)? .extract_address() .map_err(|_| Error::<T>::MalformedTransaction)?; let extr_payment_value = if Self::is_op_return_disabled() { Self::extract_payment_value(transaction, recipient_btc_address)? } else if let Some(op_return_id) = op_return_id { // NOTE: op_return UTXO should not contain any value let (extr_payment_value, extr_op_return) = Self::extract_payment_value_and_op_return(transaction, recipient_btc_address)?; // Check if data UTXO has correct OP_RETURN value ensure!(extr_op_return == op_return_id, Error::<T>::InvalidOpReturn); extr_payment_value } else { // using the on-chain key derivation scheme we only expect a simple // payment to the vault's new deposit address Self::extract_payment_value(transaction, recipient_btc_address)? }; // If a minimum was specified, check if the transferred amount is sufficient if let Some(minimum) = minimum_btc { ensure!(extr_payment_value >= minimum, Error::<T>::InsufficientValue); } Ok((input_address, extr_payment_value)) } pub fn is_fully_initialized() -> Result<bool, DispatchError> { if !StartBlockHeight::<T>::exists() { return Ok(false); } let required_height = StartBlockHeight::<T>::get() .checked_add(StableBitcoinConfirmations::<T>::get()) .ok_or(Error::<T>::ArithmeticOverflow)?; let best = BestBlockHeight::<T>::get(); Ok(best >= required_height) } // ******************************** // START: Storage getter functions // ******************************** /// Get chain id from position (sorted by max block height) fn get_chain_id_from_position(position: u32) -> Result<u32, DispatchError> { Chains::<T>::get(position).ok_or(Error::<T>::InvalidChainID.into()) } /// Get the position of the fork in Chains fn get_chain_position_from_chain_id(chain_id: u32) -> Result<u32, DispatchError> { for (k, v) in Chains::<T>::iter() { if v == chain_id { return Ok(k); } } Err(Error::<T>::ForkIdNotFound.into()) } /// Get a blockchain from the id fn get_block_chain_from_id(chain_id: u32) -> Result<BlockChain, DispatchError> { ChainsIndex::<T>::get(chain_id).ok_or(Error::<T>::InvalidChainID.into()) } /// Get the current best block hash pub fn get_best_block() -> H256Le { BestBlock::<T>::get() } /// Check if a best block hash is set fn best_block_exists() -> bool { BestBlock::<T>::exists() } /// get the best block height pub fn get_best_block_height() -> u32 { BestBlockHeight::<T>::get() } /// Get the current chain counter fn get_chain_counter() -> u32 { ChainCounter::<T>::get() } /// Get a block hash from a blockchain /// /// # Arguments /// /// * `chain_id`: the id of the blockchain to search in /// * `block_height`: the height if the block header fn get_block_hash(chain_id: u32, block_height: u32) -> Result<H256Le, DispatchError> { if !Self::block_exists(chain_id, block_height) { return Err(Error::<T>::MissingBlockHeight.into()); } Ok(ChainsHashes::<T>::get(chain_id, block_height)) } /// Get a block header from its hash fn get_block_header_from_hash( block_hash: H256Le, ) -> Result<RichBlockHeader<T::AccountId, T::BlockNumber>, DispatchError> { if BlockHeaders::<T>::contains_key(block_hash) { return Ok(BlockHeaders::<T>::get(block_hash)); } Err(Error::<T>::BlockNotFound.into()) } /// Check if a block header exists pub fn block_header_exists(block_hash: H256Le) -> bool { BlockHeaders::<T>::contains_key(block_hash) } /// Get a block header from fn get_block_header_from_height( blockchain: &BlockChain, block_height: u32, ) -> Result<RichBlockHeader<T::AccountId, T::BlockNumber>, DispatchError> { let block_hash = Self::get_block_hash(blockchain.chain_id, block_height)?; Self::get_block_header_from_hash(block_hash) } /// Storage setter functions /// Set a new chain with position and id fn set_chain_from_position_and_id(position: u32, id: u32) { Chains::<T>::insert(position, id); } /// Swap chain elements fn swap_chain(pos_1: u32, pos_2: u32) { // swaps the values of two keys Chains::<T>::swap(pos_1, pos_2) } /// Remove a chain id from chains fn remove_blockchain_from_chain(position: u32) -> Result<(), DispatchError> { // swap the element with the last element in the mapping // collect the unsorted chains iterable as a vector and sort it by index let mut chains = Chains::<T>::iter().collect::<Vec<(u32, u32)>>(); chains.sort_by_key(|k| k.0); // get the last position as stored in the list let last_pos = match chains.len() { 0 => return Err(Error::<T>::ForkIdNotFound.into()), // chains stores (position, index) n => chains[n - 1].0, }; Self::swap_chain(position, last_pos); // don't remove main chain id if last_pos > 0 { // remove the old head (now the value at the initial position) Chains::<T>::remove(last_pos); } Ok(()) } /// Set a new blockchain in ChainsIndex fn set_block_chain_from_id(id: u32, chain: &BlockChain) { ChainsIndex::<T>::insert(id, &chain); } /// Update a blockchain in ChainsIndex fn mutate_block_chain_from_id(id: u32, chain: BlockChain) { ChainsIndex::<T>::mutate(id, |b| *b = Some(chain)); } /// Remove a blockchain element from ChainsIndex fn remove_blockchain_from_chain_index(id: u32) { ChainsIndex::<T>::remove(id); } /// Set a new block header fn set_block_header_from_hash(hash: H256Le, header: &RichBlockHeader<T::AccountId, T::BlockNumber>) { BlockHeaders::<T>::insert(hash, header); } /// update the chain_ref of a block header fn mutate_block_header_from_chain_id(hash: &H256Le, chain_ref: u32) { BlockHeaders::<T>::mutate(&hash, |header| header.chain_ref = chain_ref); } /// Set a new best block fn set_best_block(hash: H256Le) { BestBlock::<T>::put(hash); } /// Set a new best block height fn set_best_block_height(height: u32) { BestBlockHeight::<T>::put(height); } /// Set a new chain counter fn increment_chain_counter() -> u32 { let new_counter = Self::get_chain_counter() + 1; ChainCounter::<T>::put(new_counter); new_counter } /// Initialize the new main blockchain with a single block fn initialize_blockchain(block_height: u32, block_hash: H256Le) -> BlockChain { let chain_id = MAIN_CHAIN_ID; // generate an empty blockchain Self::generate_blockchain(chain_id, block_height, block_hash) } /// Create a new blockchain element with a new chain id fn create_blockchain(block_height: u32, block_hash: H256Le) -> BlockChain { // get a new chain id let chain_id: u32 = Self::increment_chain_counter(); // generate an empty blockchain Self::generate_blockchain(chain_id, block_height, block_hash) } /// Generate the raw blockchain from a chain Id and with a single block fn generate_blockchain(chain_id: u32, block_height: u32, block_hash: H256Le) -> BlockChain { // initialize an empty chain Self::insert_block_hash(chain_id, block_height, block_hash); BlockChain { chain_id, start_height: block_height, max_height: block_height, no_data: BTreeSet::new(), invalid: BTreeSet::new(), } } fn insert_block_hash(chain_id: u32, block_height: u32, block_hash: H256Le) { ChainsHashes::<T>::insert(chain_id, block_height, block_hash); } fn remove_block_hash(chain_id: u32, block_height: u32) { ChainsHashes::<T>::remove(chain_id, block_height); } fn block_exists(chain_id: u32, block_height: u32) -> bool { ChainsHashes::<T>::contains_key(chain_id, block_height) } fn _blocks_count(chain_id: u32) -> usize { ChainsHashes::<T>::iter_prefix_values(chain_id).count() } /// Add a new block header to an existing blockchain fn extend_blockchain( block_height: u32, block_hash: &H256Le, prev_blockchain: BlockChain, ) -> Result<BlockChain, DispatchError> { let mut blockchain = prev_blockchain; if Self::block_exists(blockchain.chain_id, block_height) { return Err(Error::<T>::DuplicateBlock.into()); } Self::insert_block_hash(blockchain.chain_id, block_height, *block_hash); blockchain.max_height = block_height; Self::set_block_chain_from_id(blockchain.chain_id, &blockchain); Ok(blockchain) } // Get require conformations for stable transactions fn get_stable_transaction_confirmations() -> u32 { Self::bitcoin_confirmations() } // ********************************* // END: Storage getter functions // ********************************* // Wrapper functions around bitcoin lib for testing purposes fn parse_transaction(raw_tx: &[u8]) -> Result<Transaction, DispatchError> { Ok(parse_transaction(&raw_tx).map_err(Error::<T>::from)?) } fn parse_merkle_proof(raw_merkle_proof: &[u8]) -> Result<MerkleProof, DispatchError> { MerkleProof::parse(&raw_merkle_proof).map_err(|err| Error::<T>::from(err).into()) } fn verify_merkle_proof(merkle_proof: &MerkleProof) -> Result<ProofResult, DispatchError> { merkle_proof.verify_proof().map_err(|err| Error::<T>::from(err).into()) } /// Parses and verifies a raw Bitcoin block header. /// /// # Arguments /// /// * block_header` - 80-byte block header /// /// # Returns /// /// * `pure_block_header` - PureBlockHeader representation of the 80-byte block header fn verify_block_header(raw_block_header: &RawBlockHeader) -> Result<BlockHeader, DispatchError> { let basic_block_header = parse_block_header(&raw_block_header).map_err(Error::<T>::from)?; let block_header_hash = raw_block_header.hash(); // Check that the block header is not yet stored in BTC-Relay ensure!( !Self::block_header_exists(block_header_hash), Error::<T>::DuplicateBlock ); // Check that the referenced previous block header exists in BTC-Relay let prev_block_header = Self::get_block_header_from_hash(basic_block_header.hash_prev_block)?; // Check that the PoW hash satisfies the target set in the block header ensure!( block_header_hash.as_u256() < basic_block_header.target, Error::<T>::LowDiff ); // Check that the diff. target is indeed correctly set in the block header, i.e., check for re-target. let block_height = prev_block_header.block_height + 1; if Self::disable_difficulty_check() { return Ok(basic_block_header); } let expected_target = if block_height >= 2016 && block_height % DIFFICULTY_ADJUSTMENT_INTERVAL == 0 { Self::compute_new_target(&prev_block_header, block_height)? } else { prev_block_header.block_header.target }; ensure!( basic_block_header.target == expected_target, Error::<T>::DiffTargetHeader ); Ok(basic_block_header) } /// Computes Bitcoin's PoW retarget algorithm for a given block height /// /// # Arguments /// /// * `prev_block_header`: previous block header /// * `block_height` : block height of new target fn compute_new_target( prev_block_header: &RichBlockHeader<T::AccountId, T::BlockNumber>, block_height: u32, ) -> Result<U256, DispatchError> { // get time of last retarget let last_retarget_time = Self::get_last_retarget_time(prev_block_header.chain_ref, block_height)?; // Compute new target let actual_timespan = if ((prev_block_header.block_header.timestamp as u64 - last_retarget_time) as u32) < (TARGET_TIMESPAN / TARGET_TIMESPAN_DIVISOR) { TARGET_TIMESPAN / TARGET_TIMESPAN_DIVISOR } else { TARGET_TIMESPAN * TARGET_TIMESPAN_DIVISOR }; let new_target = U256::from(actual_timespan) .checked_mul(prev_block_header.block_header.target) .ok_or(Error::<T>::ArithmeticOverflow)? .checked_div(U256::from(TARGET_TIMESPAN)) .ok_or(Error::<T>::ArithmeticUnderflow)?; // ensure target does not exceed max. target Ok(if new_target > UNROUNDED_MAX_TARGET { UNROUNDED_MAX_TARGET } else { new_target }) } /// Returns the timestamp of the last difficulty retarget on the specified BlockChain, given the current block /// height /// /// # Arguments /// /// * `chain_ref` - BlockChain identifier /// * `block_height` - current block height fn get_last_retarget_time(chain_ref: u32, block_height: u32) -> Result<u64, DispatchError> { let block_chain = Self::get_block_chain_from_id(chain_ref)?; let last_retarget_header = Self::get_block_header_from_height(&block_chain, block_height - DIFFICULTY_ADJUSTMENT_INTERVAL)?; Ok(last_retarget_header.block_header.timestamp as u64) } /// Swap the main chain with a fork. This method takes the starting height /// of the fork and replaces each block in the main chain with the blocks /// in the fork. It moves the replaced blocks in the main chain to a new /// fork. /// Last, it replaces the chain_ref of each block header in the new main /// chain to the MAIN_CHAIN_ID and each block header in the new fork to the /// new chain id. /// /// # Arguments /// /// * `fork` - the fork that is going to become the main chain fn swap_main_blockchain(fork: &BlockChain) -> Result<(), DispatchError> { // load the main chain let mut main_chain = Self::get_block_chain_from_id(MAIN_CHAIN_ID)?; // the start height of the fork let start_height = fork.start_height; // create a new blockchain element to store the part of the main chain // that is being forked // generate a chain id let chain_id = Self::increment_chain_counter(); // maybe split off the no data elements // check if there is a no_data block element // that is greater than start_height let index_no_data = main_chain .no_data .iter() .position(|&h| h >= start_height) .map(|v| v as u32); let no_data = match index_no_data { Some(index) => main_chain.no_data.split_off(&index), None => BTreeSet::new(), }; // maybe split off the invalid elements let index_invalid = main_chain .invalid .iter() .position(|&h| h >= start_height) .map(|v| v as u32); let invalid = match index_invalid { Some(index) => main_chain.invalid.split_off(&index), None => BTreeSet::new(), }; // store the main chain part that is going to be replaced by the new fork // into the forked_main_chain element let forked_main_chain: BlockChain = BlockChain { chain_id, start_height, max_height: main_chain.max_height, no_data, invalid, }; main_chain.max_height = fork.max_height; main_chain.no_data.append(&mut fork.no_data.clone()); main_chain.invalid.append(&mut fork.invalid.clone()); // get the best block hash let best_block = Self::get_block_hash(fork.chain_id, fork.max_height)?; // get the position of the fork in Chains let position: u32 = Self::get_chain_position_from_chain_id(fork.chain_id)?; // Update the stored main chain Self::set_block_chain_from_id(MAIN_CHAIN_ID, &main_chain); // Set BestBlock and BestBlockHeight to the submitted block Self::set_best_block(best_block); Self::set_best_block_height(main_chain.max_height); // remove the fork from storage Self::remove_blockchain_from_chain_index(fork.chain_id); Self::remove_blockchain_from_chain(position)?; // store the forked main chain Self::set_block_chain_from_id(forked_main_chain.chain_id, &forked_main_chain); // insert the reference to the forked main chain in Chains Self::insert_sorted(&forked_main_chain)?; // update all the forked block headers for height in fork.start_height..=forked_main_chain.max_height { let block_hash = Self::get_block_hash(main_chain.chain_id, height)?; Self::insert_block_hash(forked_main_chain.chain_id, height, block_hash); Self::mutate_block_header_from_chain_id(&block_hash, forked_main_chain.chain_id); Self::remove_block_hash(MAIN_CHAIN_ID, height); } // update all new main chain block headers for height in fork.start_height..=fork.max_height { let block = Self::get_block_hash(fork.chain_id, height)?; Self::mutate_block_header_from_chain_id(&block, MAIN_CHAIN_ID); Self::insert_block_hash(MAIN_CHAIN_ID, height, block); } ChainsHashes::<T>::remove_prefix(fork.chain_id); if !fork.is_invalid() && !fork.is_no_data() { Self::recover_if_needed()? } Ok(()) } /// Checks if a newly inserted fork results in an update to the sorted /// Chains mapping. This happens when the max height of the fork is greater /// than the max height of the previous element in the Chains mapping. /// /// # Arguments /// /// * `fork` - the blockchain element that may cause a reorg fn check_and_do_reorg(fork: &BlockChain) -> Result<(), DispatchError> { // Check if the ordering needs updating // if the fork is the main chain, we don't need to update the ordering if fork.chain_id == MAIN_CHAIN_ID { return Ok(()); } // TODO: remove, fix for rm head_index if Chains::<T>::get(0).is_none() { Chains::<T>::insert(0, 0); } // get the position of the fork in Chains let fork_position: u32 = Self::get_chain_position_from_chain_id(fork.chain_id)?; // check if the previous element in Chains has a lower block_height let mut current_position = fork_position; let mut current_height = fork.max_height; // swap elements as long as previous block height is smaller while current_position > 0 { // get the previous position let prev_position = current_position - 1; // get the blockchain id let prev_blockchain_id = if let Ok(chain_id) = Self::get_chain_id_from_position(prev_position) { chain_id } else { // swap chain positions if previous doesn't exist and retry Self::swap_chain(prev_position, current_position); continue; }; // get the previous blockchain height let prev_height = Self::get_block_chain_from_id(prev_blockchain_id)?.max_height; // swap elements if block height is greater if prev_height < current_height { // Check if swap occurs on the main chain element if prev_blockchain_id == MAIN_CHAIN_ID { // if the previous position is the top element // and the current height is more than the // STABLE_TRANSACTION_CONFIRMATIONS ahead // we are swapping the main chain if prev_height + Self::get_stable_transaction_confirmations() < current_height { Self::swap_main_blockchain(&fork)?; // announce the new main chain let new_chain_tip = BestBlock::<T>::get(); let block_height = BestBlockHeight::<T>::get(); let fork_depth = fork.max_height - fork.start_height; Self::deposit_event(<Event<T>>::ChainReorg(new_chain_tip, block_height, fork_depth)); } else { Self::deposit_event(<Event<T>>::ForkAheadOfMainChain( prev_height, // main chain height fork.max_height, // fork height fork.chain_id, // fork id )); } // successful reorg break; } else { // else, simply swap the chain_id ordering in Chains Self::swap_chain(prev_position, current_position); } // update the current chain to the previous one current_position = prev_position; current_height = prev_height; } else { break; } } Ok(()) } /// Insert a new fork into the Chains mapping sorted by its max height /// /// # Arguments /// /// * `blockchain` - new blockchain element fn insert_sorted(blockchain: &BlockChain) -> Result<(), DispatchError> { // get a sorted vector over the Chains elements // NOTE: LinkedStorageMap iterators are not sorted over the keys let mut chains = Chains::<T>::iter().collect::<Vec<(u32, u32)>>(); chains.sort_by_key(|k| k.0); let max_chain_element = chains.len() as u32; // define the position of the new blockchain // by default, we insert it as the last element let mut position_blockchain = max_chain_element; // Starting from the second highest element, find where to insert the new fork // the previous element's block height should be higher or equal // the next element's block height should be lower or equal // NOTE: we never want to insert a new main chain through this function for (curr_position, curr_chain_id) in chains.iter().skip(1) { // get the height of the current chain_id let curr_height = Self::get_block_chain_from_id(*curr_chain_id)?.max_height; // if the height of the current blockchain is lower than // the new blockchain, it should be inserted at that position if curr_height <= blockchain.max_height { position_blockchain = *curr_position; break; }; } // insert the new fork into the chains element Self::set_chain_from_position_and_id(max_chain_element, blockchain.chain_id); // starting from the last element swap the positions until // the new blockchain is at the position_blockchain for curr_position in (position_blockchain + 1..max_chain_element + 1).rev() { // stop when the blockchain element is at it's // designated position if curr_position < position_blockchain { break; }; let prev_position = curr_position - 1; // swap the current element with the previous one Self::swap_chain(curr_position, prev_position); } Ok(()) } /// Flag an error in a block header. This function is called by the /// security pallet. /// /// # Arguments /// /// * `block_hash` - the hash of the block header with the error /// * `error` - the error code for the block header pub fn flag_block_error(block_hash: H256Le, error: ErrorCode) -> Result<(), DispatchError> { // Get the chain id of the block header let block_header = Self::get_block_header_from_hash(block_hash)?; let chain_id = block_header.chain_ref; // Get the blockchain element for the chain id let mut blockchain = Self::get_block_chain_from_id(chain_id)?; // Flag errors in the blockchain entry // Check which error we are dealing with let newly_flagged = match error { ErrorCode::NoDataBTCRelay => blockchain.no_data.insert(block_header.block_height), ErrorCode::InvalidBTCRelay => blockchain.invalid.insert(block_header.block_height), _ => return Err(Error::<T>::UnknownErrorcode.into()), }; // If the block was not already flagged, store the updated blockchain entry if newly_flagged { Self::mutate_block_chain_from_id(chain_id, blockchain); Self::deposit_event(<Event<T>>::FlagBlockError(block_hash, chain_id, error)); } Ok(()) } /// Clear an error from a block header. This function is called by the /// security pallet. /// /// # Arguments /// /// * `block_hash` - the hash of the block header being cleared /// * `error` - the error code for the block header pub fn clear_block_error(block_hash: H256Le, error: ErrorCode) -> Result<(), DispatchError> { // Get the chain id of the block header let block_header = Self::get_block_header_from_hash(block_hash)?; let chain_id = block_header.chain_ref; // Get the blockchain element for the chain id let mut blockchain = Self::get_block_chain_from_id(chain_id)?; // Clear errors in the blockchain entry // Check which error we are dealing with let block_exists = match error { ErrorCode::NoDataBTCRelay => blockchain.no_data.remove(&block_header.block_height), ErrorCode::InvalidBTCRelay => blockchain.invalid.remove(&block_header.block_height), _ => return Err(Error::<T>::UnknownErrorcode.into()), }; if block_exists { if !blockchain.is_invalid() && !blockchain.is_no_data() { Self::recover_if_needed()? } // Store the updated blockchain entry Self::mutate_block_chain_from_id(chain_id, blockchain); Self::deposit_event(<Event<T>>::ClearBlockError(block_hash, chain_id, error)); } Ok(()) } /// Checks if the given transaction confirmations are greater/equal to the /// requested confirmations (and/or the global k security parameter) /// /// # Arguments /// /// * `block_height` - current main chain block height /// * `confirmations` - The number of confirmations requested. If `none`, /// the value stored in the StableBitcoinConfirmations storage item is used. /// * `tx_block_height` - block height of checked transaction pub fn check_bitcoin_confirmations( main_chain_height: u32, req_confs: Option<u32>, tx_block_height: u32, ) -> Result<(), DispatchError> { let required_confirmations = req_confs.unwrap_or_else(Self::get_stable_transaction_confirmations); let required_mainchain_height = tx_block_height .checked_add(required_confirmations) .ok_or(Error::<T>::ArithmeticOverflow)? .checked_sub(1) .ok_or(Error::<T>::ArithmeticUnderflow)?; if main_chain_height >= required_mainchain_height { Ok(()) } else { Err(Error::<T>::BitcoinConfirmations.into()) } } /// Checks if the given bitcoin block has been stored for a sufficient /// amount of blocks. This should give sufficient time for staked relayers /// to flag potentially invalid blocks. /// /// # Arguments /// * `para_height` - height of the parachain when the block was stored pub fn check_parachain_confirmations(para_height: T::BlockNumber) -> Result<(), DispatchError> { let current_height = ext::security::active_block_number::<T>(); ensure!( para_height + Self::parachain_confirmations() <= current_height, Error::<T>::ParachainConfirmations ); Ok(()) } /// Checks if transaction verification is enabled for this block height /// Returs an error if: /// * Parachain is shutdown /// * the main chain contains an invalid block /// * the main chain contains a "NO_DATA" block at a lower height than `block_height` /// # Arguments /// /// * `block_height` - block height of the to-be-verified transaction fn transaction_verification_allowed(block_height: u32) -> Result<(), DispatchError> { // Make sure Parachain is not shutdown ext::security::ensure_parachain_status_not_shutdown::<T>()?; // Ensure main chain has no invalid block let main_chain = Self::get_block_chain_from_id(MAIN_CHAIN_ID)?; ensure!(!main_chain.is_invalid(), Error::<T>::Invalid); // Check if a NO_DATA block exists at a lower height than block_height if main_chain.is_no_data() { match main_chain.no_data.iter().next_back() { Some(no_data_height) => ensure!(block_height < *no_data_height, Error::<T>::NoData), None => (), } } Ok(()) } fn ensure_no_ongoing_fork(best_block_height: u32) -> Result<(), DispatchError> { // check if there is a next best fork match Self::get_chain_id_from_position(1) { // if yes, check that the main chain is at least Self::confirmations() ahead Ok(id) => { let next_best_fork_height = Self::get_block_chain_from_id(id)?.max_height; runtime_print!("Best block height: {}", best_block_height); runtime_print!("Next best fork height: {}", next_best_fork_height); // fail if there is an ongoing fork ensure!( best_block_height >= next_best_fork_height + Self::get_stable_transaction_confirmations(), Error::<T>::OngoingFork ); } // else, do nothing if there is no fork Err(_) => {} } Ok(()) } fn recover_if_needed() -> Result<(), DispatchError> { if ext::security::is_parachain_error_invalid_btcrelay::<T>() || ext::security::is_parachain_error_no_data_btcrelay::<T>() { ext::security::recover_from_btc_relay_failure::<T>(); Ok(()) } else { Ok(()) } } } impl<T: Config> From<BitcoinError> for Error<T> { fn from(err: BitcoinError) -> Self { match err { BitcoinError::MalformedMerkleProof => Self::MalformedMerkleProof, BitcoinError::InvalidMerkleProof => Self::InvalidMerkleProof, BitcoinError::EndOfFile => Self::EndOfFile, BitcoinError::MalformedHeader => Self::MalformedHeader, BitcoinError::MalformedTransaction => Self::MalformedTransaction, BitcoinError::UnsupportedInputFormat => Self::UnsupportedInputFormat, BitcoinError::MalformedWitnessOutput => Self::MalformedWitnessOutput, BitcoinError::MalformedP2PKHOutput => Self::MalformedP2PKHOutput, BitcoinError::MalformedP2SHOutput => Self::MalformedP2SHOutput, BitcoinError::UnsupportedOutputFormat => Self::UnsupportedOutputFormat, BitcoinError::MalformedOpReturnOutput => Self::MalformedOpReturnOutput, BitcoinError::InvalidHeaderSize => Self::InvalidHeaderSize, BitcoinError::InvalidBtcHash => Self::InvalidBtcHash, BitcoinError::InvalidScript => Self::InvalidScript, BitcoinError::InvalidBtcAddress => Self::InvalidBtcAddress, BitcoinError::ArithmeticOverflow => Self::ArithmeticOverflow, BitcoinError::ArithmeticUnderflow => Self::ArithmeticUnderflow, } } }
39.416372
137
0.622645
11c147dcf3b5047299246567cd5ca8b78a3719a0
48
#[test] fn test() { assert_eq!(1 + 1, 2); }
9.6
25
0.458333
ccdb235164cd8a9dd184eb049c94d71413fe0ec0
125
// Export the enum variants, without the enum mod foo { export t1; enum t { t1, } } fn main() { let v = foo::t1; }
13.888889
45
0.568
f5c8b97c00f7e63550c03406eb0647e0e955bd3e
6,568
use std::collections::VecDeque; use std::io::{Read, Seek}; use arrow_format::ipc::BodyCompressionRef; use arrow_format::ipc::MetadataVersion; use crate::array::*; use crate::datatypes::{DataType, Field, PhysicalType}; use crate::error::Result; use crate::io::ipc::IpcField; use super::{array::*, Dictionaries}; use super::{IpcBuffer, Node}; #[allow(clippy::too_many_arguments)] pub fn read<R: Read + Seek>( field_nodes: &mut VecDeque<Node>, field: &Field, ipc_field: &IpcField, buffers: &mut VecDeque<IpcBuffer>, reader: &mut R, dictionaries: &Dictionaries, block_offset: u64, is_little_endian: bool, compression: Option<BodyCompressionRef>, version: MetadataVersion, ) -> Result<Box<dyn Array>> { use PhysicalType::*; let data_type = field.data_type.clone(); match data_type.to_physical_type() { Null => read_null(field_nodes, data_type).map(|x| x.boxed()), Boolean => read_boolean( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, ) .map(|x| x.boxed()), Primitive(primitive) => with_match_primitive_type!(primitive, |$T| { read_primitive::<$T, _>( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, ) .map(|x| x.boxed()) }), Binary => { let array = read_binary::<i32, _>( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, )?; Ok(Box::new(array)) } LargeBinary => { let array = read_binary::<i64, _>( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, )?; Ok(Box::new(array)) } FixedSizeBinary => { let array = read_fixed_size_binary( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, )?; Ok(Box::new(array)) } Utf8 => { let array = read_utf8::<i32, _>( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, )?; Ok(Box::new(array)) } LargeUtf8 => { let array = read_utf8::<i64, _>( field_nodes, data_type, buffers, reader, block_offset, is_little_endian, compression, )?; Ok(Box::new(array)) } List => read_list::<i32, _>( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), LargeList => read_list::<i64, _>( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), FixedSizeList => read_fixed_size_list( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), Struct => read_struct( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), Dictionary(key_type) => { match_integer_type!(key_type, |$T| { read_dictionary::<$T, _>( field_nodes, ipc_field.dictionary_id, buffers, reader, dictionaries, block_offset, compression, is_little_endian, ) .map(|x| x.boxed()) }) } Union => read_union( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), Map => read_map( field_nodes, data_type, ipc_field, buffers, reader, dictionaries, block_offset, is_little_endian, compression, version, ) .map(|x| x.boxed()), } } pub fn skip( field_nodes: &mut VecDeque<Node>, data_type: &DataType, buffers: &mut VecDeque<IpcBuffer>, ) -> Result<()> { use PhysicalType::*; match data_type.to_physical_type() { Null => skip_null(field_nodes), Boolean => skip_boolean(field_nodes, buffers), Primitive(_) => skip_primitive(field_nodes, buffers), LargeBinary | Binary => skip_binary(field_nodes, buffers), LargeUtf8 | Utf8 => skip_utf8(field_nodes, buffers), FixedSizeBinary => skip_fixed_size_binary(field_nodes, buffers), List => skip_list::<i32>(field_nodes, data_type, buffers), LargeList => skip_list::<i64>(field_nodes, data_type, buffers), FixedSizeList => skip_fixed_size_list(field_nodes, data_type, buffers), Struct => skip_struct(field_nodes, data_type, buffers), Dictionary(_) => skip_dictionary(field_nodes, buffers), Union => skip_union(field_nodes, data_type, buffers), Map => skip_map(field_nodes, data_type, buffers), } }
28.188841
79
0.474574
9cf2d1d00187c17108cebab0838fff7629bdea56
4,565
// Copyright 2019 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The proof of work needs to strike a balance between fast header //! verification to avoid DoS attacks and difficulty for block verifiers to //! build new blocks. In addition, mining new blocks should also be as //! difficult on high end custom-made hardware (ASICs) as on commodity hardware //! or smartphones. For this reason we use Cuckoo Cycle (see the cuckoo //! module for more information). //! //! Note that this miner implementation is here mostly for tests and //! reference. It's not optimized for speed. #![deny(non_upper_case_globals)] #![deny(non_camel_case_types)] #![deny(non_snake_case)] #![deny(unused_mut)] #![warn(missing_docs)] pub use self::common::EdgeType; pub use self::types::*; use crate::core::{Block, BlockHeader}; use crate::genesis; use crate::global; use chrono; use num; #[macro_use] mod common; pub mod cuckaroo; pub mod cuckarood; pub mod cuckatoo; mod error; #[allow(dead_code)] pub mod lean; mod siphash; mod types; pub use crate::pow::cuckaroo::{new_cuckaroo_ctx, CuckarooContext}; pub use crate::pow::cuckarood::{new_cuckarood_ctx, CuckaroodContext}; pub use crate::pow::cuckatoo::{new_cuckatoo_ctx, CuckatooContext}; pub use crate::pow::error::Error; use chrono::prelude::{DateTime, NaiveDateTime, Utc}; const MAX_SOLS: u32 = 10; /// Validates the proof of work of a given header, and that the proof of work /// satisfies the requirements of the header. pub fn verify_size(bh: &BlockHeader) -> Result<(), Error> { let mut ctx = global::create_pow_context::<u64>( bh.height, bh.pow.edge_bits(), bh.pow.proof.nonces.len(), MAX_SOLS, )?; ctx.set_header_nonce(bh.pre_pow(), None, false)?; ctx.verify(&bh.pow.proof) } /// Mines a genesis block using the internal miner pub fn mine_genesis_block() -> Result<Block, Error> { let mut gen = genesis::genesis_dev(); // total_difficulty on the genesis header *is* the difficulty of that block let genesis_difficulty = gen.header.pow.total_difficulty; let sz = global::min_edge_bits(); let proof_size = global::proofsize(); pow_size(&mut gen.header, genesis_difficulty, proof_size, sz)?; Ok(gen) } /// Runs a proof of work computation over the provided block using the provided /// Mining Worker, until the required difficulty target is reached. May take a /// while for a low target... pub fn pow_size( bh: &mut BlockHeader, diff: Difficulty, proof_size: usize, sz: u8, ) -> Result<(), Error> { let start_nonce = bh.pow.nonce; // try to find a cuckoo cycle on that header hash loop { // if we found a cycle (not guaranteed) and the proof hash is higher that the // diff, we're all good let mut ctx = global::create_pow_context::<u32>(bh.height, sz, proof_size, MAX_SOLS)?; ctx.set_header_nonce(bh.pre_pow(), None, true)?; if let Ok(proofs) = ctx.find_cycles() { bh.pow.proof = proofs[0].clone(); if bh.pow.to_difficulty(bh.height) >= diff { return Ok(()); } } // otherwise increment the nonce let (res, _) = bh.pow.nonce.overflowing_add(1); bh.pow.nonce = res; // and if we're back where we started, update the time (changes the hash as // well) if bh.pow.nonce == start_nonce { bh.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); } } } #[cfg(test)] mod test { use super::*; use crate::genesis; use crate::global; use crate::global::ChainTypes; /// We'll be generating genesis blocks differently #[test] fn genesis_pow() { global::set_mining_mode(ChainTypes::UserTesting); let mut b = genesis::genesis_dev(); b.header.pow.nonce = 28106; b.header.pow.proof.edge_bits = global::min_edge_bits(); println!("proof {}", global::proofsize()); pow_size( &mut b.header, Difficulty::min(), global::proofsize(), global::min_edge_bits(), ) .unwrap(); println!("nonce {}", b.header.pow.nonce); assert_ne!(b.header.pow.nonce, 310); assert!(b.header.pow.to_difficulty(0) >= Difficulty::min()); assert!(verify_size(&b.header).is_ok()); } }
30.433333
88
0.710186
2978a4708461a30980fadf1d369b983d520787ac
848
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 /// 'Native' functions that are actually bytecode isntructions //************************************************************************************************** // Transaction //************************************************************************************************** /// Fake module around transaction meta data pub mod transaction { pub const MOD: &str = "Transaction"; pub const GAS_PRICE: &str = "gas_unit_price"; pub const MAX_GAS: &str = "max_gas_units"; pub const GAS_REMAINING: &str = "gas_remaining"; pub const SENDER: &str = "sender"; pub const SEQUENCE_NUM: &str = "sequence_number"; pub const PUBLIC_KEY: &str = "public_key"; /// 'Inlined' during hlir::translate pub const ASSERT: &str = "assert"; }
36.869565
100
0.523585
f91fcb4ab69f3b921db307e91751ed5c09f7df4f
5,037
use std::convert::TryFrom; use std::hash::Hash; use std::ops::{Deref, DerefMut}; use crate::error::Error; pub(crate) mod math { use super::*; pub(crate) fn fact(mut n: usize) -> Result<usize, Error> { let orig = n; let mut answer = 1usize; loop { answer = match answer.checked_mul(n) { Some(val) => val, None => bail!("Factorial of {} overflows usize.", orig), }; if (n - 1) == 1 { break; } else { n -= 1; } } Ok(answer) } pub(crate) fn gcf(a: u64, b: u64) -> Result<u64, Error> { if a == 0 || b == 0 { bail!("gcf function only works with positive inputs."); } let (mut smaller, mut larger) = if a > b { (b, a) } else { (a, b) }; loop { let rem = larger % smaller; if rem == 0 { return Ok(smaller); } larger = smaller; smaller = rem; } } pub(crate) fn lcm(a: u64, b: u64) -> Result<u64, Error> { Ok(a * b / gcf(a, b)?) } #[cfg(test)] mod tests { use super::*; #[test] fn test_gcf() { assert_eq!(5, gcf(5, 5).unwrap()); assert_eq!(5, gcf(5, 10).unwrap()); assert_eq!(3, gcf(15, 21).unwrap()); assert!(gcf(1, 0).is_err()); } } } #[derive(Copy, Clone, Debug, PartialEq)] pub(crate) struct F64(f64); impl TryFrom<f64> for F64 { type Error = Error; fn try_from(f: f64) -> Result<Self, Self::Error> { if f.is_nan() { bail!("Cannot convert {} into F64", f); } Ok(F64(f)) } } impl Deref for F64 { type Target = f64; fn deref(&self) -> &Self::Target { &self.0 } } impl Eq for F64 {} // TODO: Verify that this is kosher. #[allow(clippy::derive_hash_xor_eq)] impl Hash for F64 { fn hash<H>(&self, state: &mut H) where H: std::hash::Hasher, { self.0.to_bits().hash(state); } } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct Vec2<T>(T, T); impl<T> Vec2<T> { pub(crate) const fn new(x: T, y: T) -> Self { Self(x, y) } } impl<T> Vec2<T> where T: Copy, { pub(crate) fn x(&self) -> T { self.0 } pub(crate) fn y(&self) -> T { self.1 } } impl<T> From<(T, T)> for Vec2<T> { fn from(tup: (T, T)) -> Self { Self(tup.0, tup.1) } } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct Vec3<T>([T; 3]); impl<T> Default for Vec3<T> where T: Copy + Default, { fn default() -> Self { Self([T::default(); 3]) } } impl<T> Vec3<T> { #[allow(unused)] pub(crate) const fn new(x: T, y: T, z: T) -> Self { Self([x, y, z]) } } impl<T> Vec3<T> where T: Copy, { #[allow(unused)] pub(crate) fn x(&self) -> T { self.0[0] } #[allow(unused)] pub(crate) fn y(&self) -> T { self.0[1] } #[allow(unused)] pub(crate) fn z(&self) -> T { self.0[2] } } impl<T> Deref for Vec3<T> { type Target = [T]; fn deref(&self) -> &Self::Target { &self.0 } } impl<T> DerefMut for Vec3<T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<T> From<(T, T, T)> for Vec3<T> { fn from(tup: (T, T, T)) -> Self { Self([tup.0, tup.1, tup.2]) } } impl<T> From<[T; 3]> for Vec3<T> { fn from(array: [T; 3]) -> Self { Self(array) } } #[cfg(all( any(target_arch = "x86", target_arch = "x86_64"), target_feature = "avx2" ))] mod simd { use std::mem; use super::*; #[cfg(target_arch = "x86")] use std::arch::x86::*; #[cfg(target_arch = "x86_64")] use std::arch::x86_64::*; impl From<__m256i> for Vec3<i64> { fn from(v: __m256i) -> Self { // safety: This is safe because the call to _mm256_storeu_si256 will write // values to the uninitialized array so we won't be tying to access junk memory. let mut a: [i64; 4] = unsafe { mem::MaybeUninit::uninit().assume_init() }; #[allow(clippy::cast_ptr_alignment)] unsafe { _mm256_storeu_si256(&mut a as *mut _ as *mut __m256i, v) }; Vec3::new(a[3], a[2], a[1]) } } } #[cfg(test)] pub(crate) mod tests { use std::fs; use std::io; use crate::error::Error; pub(crate) fn test_full_problem<F>(day: usize, run_func: F, expected1: &str, expected2: &str) where F: Fn(io::BufReader<fs::File>) -> Result<(String, String), Error>, { let path = format!("data/{:02}.txt", day); let file = std::fs::File::open(path).unwrap(); let reader = std::io::BufReader::new(file); let (actual1, actual2) = run_func(reader).unwrap(); assert_eq!(&actual1, expected1); assert_eq!(&actual2, expected2); } }
21.525641
97
0.491165
75a92e9ad414294229bebefe822732924b84b5e0
7,660
use crate::apu::{AudioOutput, APU}; use crate::cart::Cart; use crate::controller::NESController; use crate::error::*; use crate::mmu::{MMUSaveState, MMU}; use crate::mos6502::MOS6502; use crate::ppu::{Color, PPUSaveState, VideoInterface, PPU}; use bitflags::bitflags; bitflags! { pub struct NESConfig : u16 { const DEBUG = 1 << 0; const DEBUG_OUTPUT = 1 << 1 | Self::DEBUG.bits; } } impl From<&NESConfig> for crate::mos6502::CPUConfig { fn from(val: &NESConfig) -> Self { use crate::mos6502::CPUConfig; let mut out = CPUConfig::empty(); out.set(CPUConfig::DEBUG, val.contains(NESConfig::DEBUG)); out.set( CPUConfig::DEBUG_OUTPUT, val.contains(NESConfig::DEBUG_OUTPUT), ); out } } impl From<&NESConfig> for crate::mmu::MMUConfig { fn from(val: &NESConfig) -> Self { use crate::mmu::MMUConfig; let mut out = MMUConfig::empty(); out.set(MMUConfig::DEBUG, val.contains(NESConfig::DEBUG)); out.set( MMUConfig::DEBUG_OUTPUT, val.contains(NESConfig::DEBUG_OUTPUT), ); out } } struct NesVideoWrapper<V> { screen: V, frame_completed: std::cell::Cell<bool>, } impl<V: VideoInterface> VideoInterface for NesVideoWrapper<V> { #[inline] fn draw_pixel(&mut self, x: u16, y: u16, color: Color) { self.screen.draw_pixel(x, y, color); } #[inline] fn end_of_frame(&mut self) { self.frame_completed.set(true); self.screen.end_of_frame(); } } #[derive(Clone)] pub struct NesSaveState { cpu_state: MOS6502, mmu_state: MMUSaveState, ppu_state: PPUSaveState, } /// Represents the NES system. pub struct Nes<V: VideoInterface, C: NESController, A: AudioOutput> { pub cpu: MOS6502, pub ppu: PPU, pub mmu: MMU<C>, pub apu: APU<A>, screen: NesVideoWrapper<V>, cycles_counter: u32, oam_write: Option<u8>, _config: NESConfig, } impl<'a, V: VideoInterface, C: NESController, A: AudioOutput> Nes<V, C, A> { pub fn new<T: Into<Option<Cart>>>( cart: T, screen: V, controller: C, audio: A, config: Option<NESConfig>, ) -> Self { let mut cpu = MOS6502::new(config.as_ref().map(|c| c.into())); cpu.reset(); let ppu = PPU::default(); let apu = APU::new(audio); let mmu = MMU::new(cart, controller, config.as_ref().map(|c| c.into())); Nes { cpu, ppu, apu, mmu, screen: NesVideoWrapper { screen, frame_completed: std::cell::Cell::new(false), }, cycles_counter: 0, oam_write: None, _config: config.unwrap_or_else(NESConfig::empty), } } pub fn reset(&mut self) { self.cpu.reset(); self.mmu.reset(); self.apu.reset(); } pub fn save_state(&self) -> NesSaveState { NesSaveState { cpu_state: self.cpu.clone(), mmu_state: self.mmu.save_state(), ppu_state: self.ppu.save_state(), } } pub fn load_state(&mut self, s: NesSaveState) { self.cpu = s.cpu_state; self.mmu.load_state(s.mmu_state); self.ppu.load_state(s.ppu_state); } pub fn master_clock_tick(&mut self) -> Result<()> { if !self.mmu.has_cartridge() { return Err(Error::missing_cart()); } if self.mmu.oam_transfer { if let Some(v) = self.oam_write.take() { self.ppu.oam[self.mmu.oam_offset as usize] = v; self.mmu.oam_offset += 1; if self.mmu.oam_offset >= 256 { self.mmu.oam_transfer = false; self.mmu.oam_page = 0; self.mmu.oam_offset = 0; } } else { let addr = self.mmu.oam_page + self.mmu.oam_offset; self.oam_write = Some(self.mmu.read(addr)); } } else { if self.ppu.nmi { self.cpu.nmi(); self.ppu.nmi = false; } else if self.apu.get_irq() { self.cpu.irq(); } if self.cycles_counter == 0 { self.cycles_counter += self.cpu.tick(&mut self.mmu)?; } self.cycles_counter -= 1; } self.ppu.tick(&mut self.mmu, &mut self.screen); self.ppu.tick(&mut self.mmu, &mut self.screen); self.ppu.tick(&mut self.mmu, &mut self.screen); self.apu.tick(&mut self.mmu.apu_registers); self.apu.tick(&mut self.mmu.apu_registers); self.apu.tick(&mut self.mmu.apu_registers); Ok(()) } /// Runs the CPU until it recieves an NMI, signaling the end of a frame. pub fn run_frame(&mut self) -> Result<()> { loop { self.master_clock_tick()?; if self.screen.frame_completed.get() { self.screen.frame_completed.set(false); break; } } Ok(()) } pub fn insert_cartridge(&mut self, cart: Cart) { self.mmu.insert_cartridge(cart) } pub fn pattern_table(&self) -> [u8; 0x2000] { use crate::ppu::PPUMemory; let mut r = [0; 0x2000]; for i in 0..0x2000 { r[i as usize] = self.mmu.read_ppu(i as u16); } r } pub fn get_palette(&self, id: u16) -> [(u8, u8, u8); 4] { let index = (4 * id) as usize; let palette = &self.ppu.palette_ram[index..index + 4]; let mut out = [(0, 0, 0); 4]; for i in 0..4 { let c = palette[i as usize]; out[i as usize] = self.ppu.convert_color_to_rgb(c).into_tuple(); } out } pub fn palette_table(&self) -> [(u8, u8, u8); 32] { let mut out = [(0, 0, 0); 32]; let palette_table = &self.ppu.palette_ram[..]; for i in 0..32 { let c = palette_table[i]; out[i] = self.ppu.convert_color_to_rgb(c).into_tuple(); } out } /// Returns a clone of 0x2000-0x2fff of PPU memory pub fn get_nametables(&self) -> [u8; 0x1000] { use crate::ppu::PPUMemory; let mut buf = [0; 0x1000]; buf.iter_mut().enumerate().for_each(|(i, b)| { *b = self.mmu.read_ppu(0x2000 + i as u16); }); buf } #[inline] pub fn get_screen(&self) -> &V { &self.screen.screen } #[inline] pub fn get_screen_mut(&mut self) -> &mut V { &mut self.screen.screen } #[inline] pub fn get_audio_device(&self) -> &A { self.apu.audio_device() } #[inline] pub fn get_audio_device_mut(&mut self) -> &mut A { self.apu.audio_device_mut() } #[inline] pub fn get_controller(&self) -> &C { &self.mmu.controller } #[inline] pub fn get_controller_mut(&mut self) -> &mut C { &mut self.mmu.controller } } // #[cfg(test)] // mod tests { // use super::*; // #[test] // fn test_ram() { // let mut nes = Nes::new(Cart::dummy(), None); // for i in 0..2048 { // nes.mmu.write(i, (i & 0xff) as u8); // } // for i in 0..2048 { // assert_eq!(nes.mmu.read(i), (i & 0xff) as u8); // // RAM Mirrors // assert_eq!(nes.mmu.read(i+2048), (i & 0xff) as u8); // assert_eq!(nes.mmu.read(i+4096), (i & 0xff) as u8); // assert_eq!(nes.mmu.read(i+6144), (i & 0xff) as u8); // } // } // }
28.265683
80
0.52376
4abb5830477b8a7f2e7bb80e26ab001751deae8a
7,975
use argh::FromArgs; use std::{ env, fs::{self, File}, io::{Error, ErrorKind, Read as _, Write as _}, path::Path, process, str::FromStr, }; const BASE: &str = "timeit"; const BASE_DIR: &str = "rust-timeit"; const CARGO_TOML: &str = include_str!("Cargo.toml.tmpl"); const TIMEIT_EXPRESSION: &str = include_str!("expression.rs"); const TIMEIT_RS: &str = include_str!("timeit.rs"); const CYCLES_DEP: &str = r#"criterion-cycles-per-byte = "0.1.2""#; const PERF_DEP: &str = r#"criterion-linux-perf = "0.1""#; const CYCLES_USE: &str = "criterion_cycles_per_byte::CyclesPerByte"; const PERF_USE: &str = "criterion_linux_perf::{PerfMeasurement, PerfMode}"; macro_rules! perf_mode { ( $( $ident:ident => $word:literal, )* ) => { #[derive(Clone, Copy, Debug, PartialEq)] enum PerfMode { Help, $( $ident, )* } impl PerfMode { fn as_perf_mode(&self) -> &'static str { match self { Self::Help => unreachable!(), $( Self::$ident => stringify!($ident), )* } } fn all_modes() -> Vec<&'static str> { vec![ $( $word, )* ] } } impl FromStr for PerfMode { type Err = String; fn from_str(s: &str) -> Result<Self, String> { match s { "help" => { eprintln!("Valid values for --perf"); for mode in Self::all_modes() { eprintln!(" {}", mode); } process::exit(1); } $( $word => Ok(Self::$ident), )* _ => Err("Unknown perf mode".into()), } } } }; } perf_mode! { Cycles => "cycles", Instructions => "instructions", Branches => "branches", BranchMisses => "branch-misses", CacheRefs => "cache-refs", CacheMisses => "cache-misses", BusCycles => "bus-cycles", RefCycles => "ref-cycles", } #[derive(Debug, FromArgs)] #[argh(description = r#"Tool for measuring execution time of small Rust code snippets."#)] struct Args { /// code to be executed once before timing begins #[argh(option, short = 's')] setup: Option<String>, /// crate name and version to add to the dependencies section #[argh(option, short = 'd')] dependency: Vec<String>, /// add an extra "use" line #[argh(option, short = 'u', long = "use")] uses: Vec<String>, /// include the named file's contents in the source code #[argh(option, short = 'i')] include: Vec<String>, /// use the CPU cycle count instead of wall time #[argh(switch)] cycles: bool, /// use an alternate measurement instead of wall time (use `--perf /// help` to list all the options for this) #[cfg(target_os = "linux")] #[argh(option, short = 'p')] perf: Option<PerfMode>, /// wrap the expressions in `criterion::black_box` to ensure their full evaluation #[argh(switch, short = 'b')] black_box: bool, /// delete the cache directory before starting, making a fresh start #[argh(switch, short = 'f')] fresh: bool, /// clean up the cache directory after a successful finish #[argh(switch, short = 'c')] cleanup: bool, /// enable verbose mode #[argh(switch, short = 'v')] verbose: bool, #[argh(positional)] expression: Vec<String>, } impl Args { fn dependencies(&mut self) -> String { if self.cycles { self.dependency.push(CYCLES_DEP.into()); } #[cfg(target_os = "linux")] if self.perf.is_some() { self.dependency.push(PERF_DEP.into()); } self.dependency.join("\n") } fn uses(&mut self) -> String { if self.cycles { self.uses.push(CYCLES_USE.into()); } #[cfg(target_os = "linux")] if self.perf.is_some() { self.uses.push(PERF_USE.into()); } self.uses .iter() .map(|import| format!("use {};\n", import)) .collect::<Vec<_>>() .join("") } fn includes(&self) -> Result<String, Error> { self.include .iter() .map(|filename| { let mut contents = String::new(); fs::File::open(filename) .and_then(|mut file| file.read_to_string(&mut contents)) .map(move |_| contents) }) .collect::<Result<Vec<_>, _>>() .map(|includes| includes.join("\n")) } fn setup(&self) -> String { self.setup .as_ref() .map(|s| format!("{};", s)) .unwrap_or_default() } fn expressions(&self) -> String { self.expression .iter() .map(|expression| { let black_box = if self.black_box { "black_box" } else { "" }; TIMEIT_EXPRESSION .replace("/*BLACK_BOX*/", black_box) .replace("/*EXPRESSION*/", &expression) }) .collect::<Vec<_>>() .join("\n") } fn timer(&self) -> String { #[cfg(target_os = "linux")] if let Some(mode) = self.perf { return format!("PerfMeasurement::new(PerfMode::{})", mode.as_perf_mode()); } if self.cycles { "CyclesPerByte".into() } else { "WallTime".into() } } } fn create(filename: &str, template: &str, subst: &[(&str, &str)]) -> Result<(), Error> { let tempname = format!("{}.tmp", filename); let mut data = template.to_string(); for (key, value) in subst { data = data.replace(key, value); } let mut out = File::create(&tempname)?; out.write_all(data.as_bytes())?; out.flush()?; drop(out); fs::rename(tempname, filename) } fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<(), Error> { fs::remove_dir_all(path).or_else(|error| match error.kind() { ErrorKind::NotFound => Ok(()), _ => Err(error), }) } fn main() -> Result<(), Error> { let mut args = argh::from_env::<Args>(); if args.expression.is_empty() { eprintln!("Please specify at least one expression"); process::exit(1); } #[cfg(target_os = "linux")] if args.cycles && args.perf.is_some() { eprintln!("Cannot specify both --cycles and --perf"); process::exit(1); } // Pre-load the included files before changing the working directory let includes = args.includes()?; let mut base_dir = dirs::cache_dir().expect("Could not determine cache directory"); base_dir.push(BASE_DIR); if args.verbose { println!("Using cache directory {:?}.", base_dir); } if args.fresh { println!("Deleting cache directory."); remove_dir_all(&base_dir)?; } fs::create_dir_all(&base_dir)?; env::set_current_dir(&base_dir)?; fs::create_dir_all("benches")?; create( "Cargo.toml", CARGO_TOML, &[("@DEPENDENCIES@", &args.dependencies()), ("@BASE@", BASE)], )?; create( &format!("benches/{}.rs", BASE), TIMEIT_RS, &[ ("/*USES*/", &args.uses()), ("/*INCLUDES*/", &includes), ("/*SETUP*/", &args.setup()), ("/*EXPRESSIONS*/", &args.expressions()), ("/*TIMER*/", &args.timer()), ], )?; fs::remove_dir_all("target/criterion").ok(); let mut cmdline = vec!["bench", "--bench", "timeit", "--", "--noplot"]; if args.verbose { cmdline.push("--verbose"); } process::Command::new("cargo").args(&cmdline).status()?; if args.cleanup { println!("Deleting cache directory."); fs::remove_dir_all(&base_dir)?; } Ok(()) }
28.68705
90
0.516865
6ae0946af1cb4827ddec190b1c4e44b3a8ff8824
10,138
use modular_bitfield::prelude::*; use super::byteslice::ByteSliceExt; #[derive(Clone, Copy)] pub enum MatchResult { Unmatched, Matched { reduced_offset: u16, match_len: usize, match_len_expected: usize, match_len_min: usize, } } #[derive(Clone, Copy)] pub struct Bucket { nodes: [Node; super::LZ_MF_BUCKET_ITEM_SIZE], // pos:25 | match_len_expected:7 head: u16, /* match_len_expected: * the match length we got when searching match for this position * if no match is found, this value is set to 0. * * when a newer position matches this position, it is likely that the match length * is the same with this value. * * match_len_min: * the longest match of all newer position that matches this position * if no match is found, this value is set to LZ_MATCH_MIN_LEN-1. * * when a newer position matches this position, the match length is always * longer than this value, because shortter matches will stop at a newer position * that matches this position. * * A A A A A B B B B B A A A A A C C C C C A A A A A * | | * |<------------------| * | | * | match_len_expected=5 * match_len_min=6 */ } impl Bucket { pub fn new() -> Bucket { return Bucket { head: 0, nodes: [Node::new(); super::LZ_MF_BUCKET_ITEM_SIZE], }; } pub unsafe fn update(&mut self, pos: usize, reduced_offset: u16, match_len: usize) { let mut self_nodes = unchecked_index::unchecked_index(&mut self.nodes); let new_head = node_size_bounded_add(self.head, 1) as usize; // update match_len_min of matched position if match_len >= super::LZ_MATCH_MIN_LEN { let node_index = node_size_bounded_sub(self.head, reduced_offset) as usize; if self_nodes[node_index].match_len_min() <= match_len as u8 { self_nodes[node_index].set_match_len_min(match_len as u8 + 1); } } // update match_len_expected of incomping position let match_len_expected = match match_len { // match_len_expected < 128 because only 7 bits reserved 0 ..= 127 => match_len, _ => 0, }; self_nodes[new_head] = Node::new() .with_pos(pos as u32) .with_match_len_expected(match_len_expected as u8); // move head to next node self.head = new_head as u16; } pub fn forward(&mut self, forward_len: usize) { // reduce all positions for node in &mut self.nodes { node.set_pos(node.pos().saturating_sub(forward_len as u32)); } } pub unsafe fn get_match_pos_and_match_len(&self, reduced_offset: u16) -> (usize, usize, usize) { let self_nodes = unchecked_index::unchecked_index(&self.nodes); let node_index = node_size_bounded_sub(self.head, reduced_offset) as usize; return ( self_nodes[node_index].pos() as usize, std::cmp::max(self_nodes[node_index].match_len_expected() as usize, super::LZ_MATCH_MIN_LEN), std::cmp::max(self_nodes[node_index].match_len_min() as usize, super::LZ_MATCH_MIN_LEN), ); } } pub struct BucketMatcher { heads: [u16; super::LZ_MF_BUCKET_ITEM_HASH_SIZE], nexts: [u16; super::LZ_MF_BUCKET_ITEM_SIZE], } impl BucketMatcher { pub fn new() -> BucketMatcher { return BucketMatcher { heads: [u16::max_value(); super::LZ_MF_BUCKET_ITEM_HASH_SIZE], nexts: [u16::max_value(); super::LZ_MF_BUCKET_ITEM_SIZE], }; } pub unsafe fn update(&mut self, bucket: &Bucket, buf: &[u8], pos: usize) { let self_heads = &mut unchecked_index::unchecked_index(&mut self.heads); let self_nexts = &mut unchecked_index::unchecked_index(&mut self.nexts); let entry = hash_dword(buf, pos) % super::LZ_MF_BUCKET_ITEM_HASH_SIZE; self_nexts[bucket.head as usize] = self_heads[entry]; self_heads[entry] = bucket.head; } pub fn forward(&mut self, bucket: &Bucket) { // clear all entries/positions that points to out-of-date node self.heads.iter_mut() .filter(|head| **head != u16::max_value() && bucket.nodes[**head as usize].pos() == 0) .for_each(|head| *head = u16::max_value()); self.nexts.iter_mut() .filter(|next| **next != u16::max_value() && bucket.nodes[**next as usize].pos() == 0) .for_each(|next| *next = u16::max_value()); } pub unsafe fn find_match(&self, bucket: &Bucket, buf: &[u8], pos: usize, match_depth: usize) -> MatchResult { let self_heads = &unchecked_index::unchecked_index(&self.heads); let self_nexts = &unchecked_index::unchecked_index(&self.nexts); let bucket_nodes = &unchecked_index::unchecked_index(&bucket.nodes); let entry = hash_dword(buf, pos) % super::LZ_MF_BUCKET_ITEM_HASH_SIZE; let mut node_index = self_heads[entry] as usize; if node_index == u16::max_value() as usize { return MatchResult::Unmatched; } let mut max_len = super::LZ_MATCH_MIN_LEN - 1; let mut max_node_index = 0; let mut max_len_dword = buf.read(pos + max_len - 3); let mut max_match_len_min = 0; let mut max_match_len_expected = 0; for _ in 0..match_depth { let node_pos = bucket_nodes[node_index].pos() as usize; // check the last 4 bytes of longest match (fast) // then perform full LCP search if buf.read::<u32>(node_pos + max_len - 3) == max_len_dword { let lcp = super::mem::llcp_fast(buf, node_pos, pos, super::LZ_MATCH_MAX_LEN); if lcp > max_len { max_match_len_min = bucket_nodes[node_index].match_len_min() as usize; max_match_len_expected = bucket_nodes[node_index].match_len_expected() as usize; max_len = lcp; max_node_index = node_index; max_len_dword = buf.read(pos + max_len - 3); } if lcp == super::LZ_MATCH_MAX_LEN || (max_match_len_expected > 0 && lcp > max_match_len_expected) { /* * (1) (2) (3) * A A A A A B B B B B A A A A A C C C C C A A A A A C B * | | | * |<-5----------------| | * | | | * | match_len_expected=5| * match_len_min=6 | * END<--|<-6----------------| * | * lcp=6 > max_match_len_expected * ## skip further matches * if there are better matches, (2) would have had match it * and got a longer match_len_expected. */ break; } } let node_next = self_nexts[node_index] as usize; if node_next == u16::max_value() as usize || node_pos <= bucket_nodes[node_next].pos() as usize { break; } node_index = node_next; } if max_len >= super::LZ_MATCH_MIN_LEN && pos + max_len < buf.len() { return MatchResult::Matched { reduced_offset: node_size_bounded_sub(bucket.head, max_node_index as u16), match_len: max_len, match_len_expected: std::cmp::max(max_match_len_expected, super::LZ_MATCH_MIN_LEN), match_len_min: std::cmp::max(max_match_len_min, super::LZ_MATCH_MIN_LEN), }; } return MatchResult::Unmatched; } pub unsafe fn has_lazy_match(&self, bucket: &Bucket, buf: &[u8], pos: usize, min_match_len: usize, depth: usize) -> bool { let self_heads = &unchecked_index::unchecked_index(&self.heads); let self_nexts = &unchecked_index::unchecked_index(&self.nexts); let bucket_nodes = &unchecked_index::unchecked_index(&bucket.nodes); let entry = hash_dword(buf, pos) % super::LZ_MF_BUCKET_ITEM_HASH_SIZE; let mut node_index = self_heads[entry] as usize; if node_index == u16::max_value() as usize { return false; } let max_len_dword = buf.read::<u32>(pos + min_match_len - 4); for _ in 0..depth { let node_pos = bucket_nodes[node_index].pos() as usize; // first check the last 4 bytes of longest match (fast) // then perform full comparison if buf.read::<u32>(node_pos + min_match_len - 4) == max_len_dword { if super::mem::memequ_hack_fast(buf, node_pos, pos, min_match_len - 4) { return true; } }; let node_next = self_nexts[node_index] as usize; if node_next == u16::max_value() as usize || node_pos <= bucket_nodes[node_next].pos() as usize { break; } node_index = node_next; } return false; } } #[bitfield] #[derive(Clone, Copy)] struct Node { pos: B25, match_len_expected: B7, match_len_min: B8, } #[allow(dead_code)] fn _suppress_warnings() { let _ = Node::new().into_bytes(); let _ = Node::from_bytes([0u8; 5]); } fn node_size_bounded_add(v1: u16, v2: u16) -> u16 { return (v1 + v2) % super::LZ_MF_BUCKET_ITEM_SIZE as u16; } fn node_size_bounded_sub(v1: u16, v2: u16) -> u16 { return (v1 + super::LZ_MF_BUCKET_ITEM_SIZE as u16 - v2) % super::LZ_MF_BUCKET_ITEM_SIZE as u16; } unsafe fn hash_dword(buf: &[u8], pos: usize) -> usize { return crc32c_hw::update(0, &buf.read::<[u8; 4]>(pos)) as usize; }
40.230159
126
0.56303
14f1d34b85b078e9b567cc24d6d688ad1fd63bf5
651
use ornament::{Decorator, TextFragment}; #[derive(Clone, Debug, PartialEq)] enum Face { Default, Error, } impl Default for Face { fn default() -> Self { Face::Default } } fn decorator(tf: &TextFragment<Face>) -> String { use Face::*; match tf.face { Default => tf.text.to_owned(), Error => format!("*{}*", tf.text), } } fn main() { let text = Decorator::with_text("This ") .set_face(Face::Error) .append("error") .reset_face() .append(" is important!") .build(); println!("{}", text.render(decorator)); // output: This *error* is important! }
19.727273
49
0.548387
79d3912a6a33578e354fd0f769bc5926373c3ee1
663
#![deny(clippy::all, clippy::perf, clippy::correctness)] mod api; mod caches; mod commitment_reader; mod pad_reader; pub mod constants; pub mod fr32; pub mod param; pub mod parameters; pub mod pieces; pub mod serde_big_array; pub mod singletons; pub mod types; pub use self::api::*; pub use self::commitment_reader::*; pub use self::constants::SINGLE_PARTITION_PROOF_LEN; pub use self::pad_reader::*; pub use self::param::{ParameterData, ParameterMap}; pub use self::types::*; pub use storage_proofs; #[cfg(test)] pub(crate) const TEST_SEED: [u8; 16] = [ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, ];
22.1
99
0.719457
7227126a3a5a65b742c948bf7aad545096f40f45
2,462
use anyhow::Result; use crate::Client; pub struct Meta { pub client: Client, } impl Meta { #[doc(hidden)] pub fn new(client: Client) -> Self { Meta { client } } /** * GitHub API Root. * * This function performs a `GET` to the `/` endpoint. * * Get Hypermedia links to resources accessible in GitHub's REST API * * FROM: <https://docs.github.com/rest/overview/resources-in-the-rest-api#root-endpoint> */ pub async fn root(&self) -> Result<crate::types::MetaRootResponse> { let url = "".to_string(); self.client.get(&url, None).await } /** * Get GitHub meta information. * * This function performs a `GET` to the `/meta` endpoint. * * Returns meta information about GitHub, including a list of GitHub's IP addresses. For more information, see "[About GitHub's IP addresses](https://help.github.com/articles/about-github-s-ip-addresses/)." * * **Note:** The IP addresses shown in the documentation's response are only example values. You must always query the API directly to get the latest list of IP addresses. * * FROM: <https://docs.github.com/rest/reference/meta#get-github-meta-information> */ pub async fn get(&self) -> Result<crate::types::ApiOverview> { let url = "/meta".to_string(); self.client.get(&url, None).await } /** * Get Octocat. * * This function performs a `GET` to the `/octocat` endpoint. * * Get the octocat as ASCII art * * FROM: <https://docs.github.com/rest/reference/meta#get-octocat> * * **Parameters:** * * * `s: &str` -- The words to show in Octocat's speech bubble. */ pub async fn get_octocat(&self, s: &str) -> Result<String> { let mut query_args: Vec<(String, String)> = Default::default(); if !s.is_empty() { query_args.push(("s".to_string(), s.to_string())); } let query_ = serde_urlencoded::to_string(&query_args).unwrap(); let url = format!("/octocat?{}", query_); self.client.get(&url, None).await } /** * Get the Zen of GitHub. * * This function performs a `GET` to the `/zen` endpoint. * * Get a random sentence from the Zen of GitHub */ pub async fn get_zen(&self) -> Result<String> { let url = "/zen".to_string(); self.client.get(&url, None).await } }
30.02439
210
0.590983
61302c23bc80b59198b5f8d37f1704ee8a97ce86
4,981
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::liveness::{ leader_reputation::{ ActiveInactiveHeuristic, LeaderReputation, MetadataBackend, ReputationHeuristic, }, proposer_election::{next, ProposerElection}, }; use consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::{Author, Round}, }; use diem_types::{block_metadata::NewBlockEvent, validator_signer::ValidatorSigner}; struct MockHistory { window_size: usize, data: Vec<NewBlockEvent>, } impl MockHistory { fn new(window_size: usize, data: Vec<NewBlockEvent>) -> Self { Self { window_size, data } } } impl MetadataBackend for MockHistory { fn get_block_metadata(&self, _target_round: Round) -> Vec<NewBlockEvent> { let start = if self.data.len() > self.window_size { self.data.len() - self.window_size } else { 0 }; self.data[start..].to_vec() } } fn create_block(proposer: Author, voters: Vec<&ValidatorSigner>) -> NewBlockEvent { NewBlockEvent::new(0, proposer, voters.iter().map(|v| v.author()).collect(), 0) } #[test] fn test_simple_heuristic() { let active_weight = 9; let inactive_weight = 1; let mut proposers = vec![]; let mut signers = vec![]; for i in 0..8 { let signer = ValidatorSigner::random([i; 32]); proposers.push(signer.author()); signers.push(signer); } let heuristic = ActiveInactiveHeuristic::new(active_weight, inactive_weight); // 1. Window size not enough let weights = heuristic.get_weights(&proposers, &[]); assert_eq!(weights.len(), proposers.len()); for w in weights { assert_eq!(w, inactive_weight); } // 2. Sliding window with [proposer 0, voters 1, 2], [proposer 0, voters 3] let weights = heuristic.get_weights( &proposers, &[ create_block(proposers[0], vec![&signers[1], &signers[2]]), create_block(proposers[0], vec![&signers[3]]), ], ); assert_eq!(weights.len(), proposers.len()); for (i, w) in weights.iter().enumerate() { let expected = if i < 4 { active_weight } else { inactive_weight }; assert_eq!(*w, expected); } } #[test] fn test_api() { let active_weight = 9; let inactive_weight = 1; let mut proposers = vec![]; let mut signers = vec![]; for i in 0..5 { let signer = ValidatorSigner::random([i; 32]); proposers.push(signer.author()); signers.push(signer); } let history = vec![ create_block(proposers[0], vec![&signers[1], &signers[2]]), create_block(proposers[0], vec![&signers[3]]), ]; let leader_reputation = LeaderReputation::new( proposers.clone(), Box::new(MockHistory::new(1, history)), Box::new(ActiveInactiveHeuristic::new(active_weight, inactive_weight)), ); let round = 42u64; // first metadata is ignored because of window size 1 let expected_weights = vec![ active_weight, inactive_weight, inactive_weight, active_weight, inactive_weight, ]; let sum = expected_weights.iter().fold(0, |mut s, w| { s += *w; s }); let mut state = round.to_le_bytes().to_vec(); let chosen_weight = next(&mut state) % sum; let mut expected_index = 0usize; let mut accu = 0u64; for (i, w) in expected_weights.iter().enumerate() { accu += *w; if accu >= chosen_weight { expected_index = i; } } let unexpected_index = (expected_index + 1) % proposers.len(); let proposer_election: Box<dyn ProposerElection> = Box::new(leader_reputation); let output = proposer_election.get_valid_proposer(round); assert_eq!(output, proposers[expected_index]); assert!(proposer_election.is_valid_proposer(proposers[expected_index], 42)); assert!(!proposer_election.is_valid_proposer(proposers[unexpected_index], 42)); let good_proposal = Block::new_proposal( vec![], round, 1, certificate_for_genesis(), &signers[expected_index], ); assert!(proposer_election.is_valid_proposal(&good_proposal)); let bad_proposal = Block::new_proposal( vec![], round, 1, certificate_for_genesis(), &signers[unexpected_index], ); assert!(!proposer_election.is_valid_proposal(&bad_proposal)); let bad_proposal_2 = Block::new_proposal( vec![], round, 2, certificate_for_genesis(), &signers[expected_index], ); assert_ne!(good_proposal.id(), bad_proposal_2.id()); // another proposal from the valid proposer should fail assert!(!proposer_election.is_valid_proposal(&bad_proposal_2)); // good proposal still passes assert!(proposer_election.is_valid_proposal(&good_proposal)); }
31.726115
88
0.62638
218502300c3fb1fb152ef6818e2de76d6daebb42
420
// Test `OpImageSampleProjImplicitLod` // build-pass use spirv_std::{Image, Sampler}; #[spirv(fragment)] pub fn main( #[spirv(descriptor_set = 0, binding = 0)] image2d: &Image!(2D, type=f32, sampled), #[spirv(descriptor_set = 1, binding = 1)] sampler: &Sampler, output: &mut glam::Vec4, ) { let v3 = glam::Vec3::new(0.0, 1.0, 0.5); *output = image2d.sample_with_project_coordinate(*sampler, v3); }
28
86
0.657143
ddbd500fb8cfe7c125b07dfedb7fc7fdee8d05a2
1,711
use svm_common::Address; /// Contract execution error #[allow(missing_docs)] #[derive(PartialEq, Clone)] pub enum ContractExecError { NotFound(Address), CompilationFailed(Address), InstantiationFailed(Address), FuncNotFound(String), ExecFailed, } impl std::error::Error for ContractExecError { fn description(&self) -> &'static str { match self { ContractExecError::NotFound(_) => "Contract not found", ContractExecError::CompilationFailed(_) => "Compilation failed", ContractExecError::InstantiationFailed(_) => "Instance Instantiation failed", ContractExecError::FuncNotFound(_) => "Function not found", ContractExecError::ExecFailed => "Execution failed", } } } impl std::fmt::Display for ContractExecError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let msg = match self { ContractExecError::NotFound(addr) => format!("Contract `{:?}` not found", addr), ContractExecError::CompilationFailed(addr) => { format!("Compilation failed for contract `{:?}`", addr) } ContractExecError::InstantiationFailed(addr) => { format!("Instance Instantiation failed for contract `{:?}`", addr) } ContractExecError::FuncNotFound(func) => format!("Function `{}` not found", func), ContractExecError::ExecFailed => "Execution failed".to_string(), }; write!(f, "{}", msg) } } impl std::fmt::Debug for ContractExecError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { <Self as std::fmt::Display>::fmt(self, f) } }
34.918367
94
0.611338
4a9b3a31423d4e3392afccb2328baa9756f71087
3,479
//! Memory management for executable code. use alloc::boxed::Box; use alloc::string::String; use alloc::vec::Vec; use core::{cmp, mem}; use region; use wasmtime_runtime::{Mmap, VMFunctionBody}; /// Memory manager for executable code. pub struct CodeMemory { current: Mmap, mmaps: Vec<Mmap>, position: usize, published: usize, } impl CodeMemory { /// Create a new `CodeMemory` instance. pub fn new() -> Self { Self { current: Mmap::new(), mmaps: Vec::new(), position: 0, published: 0, } } /// Allocate `size` bytes of memory which can be made executable later by /// calling `publish()`. Note that we allocate the memory as writeable so /// that it can be written to and patched, though we make it readonly before /// actually executing from it. /// /// TODO: Add an alignment flag. fn allocate(&mut self, size: usize) -> Result<&mut [u8], String> { if self.current.len() - self.position < size { self.mmaps.push(mem::replace( &mut self.current, Mmap::with_at_least(cmp::max(0x10000, size))?, )); self.position = 0; } let old_position = self.position; self.position += size; Ok(&mut self.current.as_mut_slice()[old_position..self.position]) } /// Convert mut a slice from u8 to VMFunctionBody. fn view_as_mut_vmfunc_slice(slice: &mut [u8]) -> &mut [VMFunctionBody] { let byte_ptr: *mut [u8] = slice; let body_ptr = byte_ptr as *mut [VMFunctionBody]; unsafe { &mut *body_ptr } } /// Allocate enough memory to hold a copy of `slice` and copy the data into it. /// TODO: Reorganize the code that calls this to emit code directly into the /// mmap region rather than into a Vec that we need to copy in. pub fn allocate_copy_of_byte_slice( &mut self, slice: &[u8], ) -> Result<&mut [VMFunctionBody], String> { let new = self.allocate(slice.len())?; new.copy_from_slice(slice); Ok(Self::view_as_mut_vmfunc_slice(new)) } /// Allocate enough continuous memory block for multiple code blocks. See also /// allocate_copy_of_byte_slice. pub fn allocate_copy_of_byte_slices( &mut self, slices: &[&[u8]], ) -> Result<Box<[&mut [VMFunctionBody]]>, String> { let total_len = slices.into_iter().fold(0, |acc, slice| acc + slice.len()); let new = self.allocate(total_len)?; let mut tail = new; let mut result = Vec::with_capacity(slices.len()); for slice in slices { let (block, next_tail) = tail.split_at_mut(slice.len()); block.copy_from_slice(slice); tail = next_tail; result.push(Self::view_as_mut_vmfunc_slice(block)); } Ok(result.into_boxed_slice()) } /// Make all allocated memory executable. pub fn publish(&mut self) { self.mmaps .push(mem::replace(&mut self.current, Mmap::new())); self.position = 0; for m in &mut self.mmaps[self.published..] { if m.len() != 0 { unsafe { region::protect(m.as_mut_ptr(), m.len(), region::Protection::ReadExecute) } .expect("unable to make memory readonly and executable"); } } self.published = self.mmaps.len(); } }
33.776699
93
0.584651
7906af8634b65d16abb45492429c4071bf5e2e9c
1,095
#[doc = "Reader of register SBKEY_BODY0"] pub type R = crate::R<u32, super::SBKEY_BODY0>; #[doc = "Writer for register SBKEY_BODY0"] pub type W = crate::W<u32, super::SBKEY_BODY0>; #[doc = "Register SBKEY_BODY0 `reset()`'s with value 0"] impl crate::ResetValue for super::SBKEY_BODY0 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `FIELD`"] pub type FIELD_R = crate::R<u32, u32>; #[doc = "Write proxy for field `FIELD`"] pub struct FIELD_W<'a> { w: &'a mut W, } impl<'a> FIELD_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff); self.w } } impl R { #[doc = "Bits 0:31 - ."] #[inline(always)] pub fn field(&self) -> FIELD_R { FIELD_R::new((self.bits & 0xffff_ffff) as u32) } } impl W { #[doc = "Bits 0:31 - ."] #[inline(always)] pub fn field(&mut self) -> FIELD_W { FIELD_W { w: self } } }
26.707317
84
0.573516
fcbc69901cc4dd5e620710ef32808d4c6e895617
1,321
use hap::{ accessory::{window::WindowAccessory, AccessoryCategory, AccessoryInformation}, server::{IpServer, Server}, storage::{FileStorage, Storage}, tokio, Config, MacAddress, Pin, Result, }; #[tokio::main] async fn main() -> Result<()> { let window = WindowAccessory::new(1, AccessoryInformation { name: "Acme Window".into(), ..Default::default() })?; let mut storage = FileStorage::current_dir().await?; let config = match storage.load_config().await { Ok(mut config) => { config.redetermine_local_ip(); storage.save_config(&config).await?; config }, Err(_) => { let config = Config { pin: Pin::new([1, 1, 1, 2, 2, 3, 3, 3])?, name: "Acme Window".into(), device_id: MacAddress::new([10, 20, 30, 40, 50, 60]), category: AccessoryCategory::Window, ..Default::default() }; storage.save_config(&config).await?; config }, }; let server = IpServer::new(config, storage).await?; server.add_accessory(window).await?; let handle = server.run_handle(); std::env::set_var("RUST_LOG", "hap=debug"); env_logger::init(); handle.await }
26.42
82
0.543528
dd19d97fb1287099c1e88cf0a7fea05c0d5decad
27,899
use crate::codegen::cfg::Instr; use crate::sema::ast::{Expression, StringLocation}; use num_bigint::BigInt; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::rc::Rc; /* The available expression analysis implemented here build a graph to track expressions. Each operand and each operation represents a vertex. Edges are directed from operands to an operation. Let's say we have a+b. 'a', 'b' and 'e1=a+b' are vertexes. Edges are directed from 'a' to 'e1=a+b' and from 'b' to 'a+b'. If we add now 'a+b-c', we will have two new nodes: 'c' and 'e2=e1-c'. Edges will connect 'c' to 'e2=e1-c' and 'e1=a+b' to 'e2=e1-c'. Whenever a variable becomes unavailable (i.e. we kill its definition), we recursively remove the operand node and all its children operations from the graph. */ /// This enum defines operator types for the graph #[derive(PartialEq, Eq, Hash, Copy, Clone)] enum Operator { Add, Subtract, Multiply, Divide, Modulo, Power, BitwiseOr, BitwiseAnd, BitwiseXor, ShiftLeft, ShiftRight, Or, And, More, Less, MoreEqual, LessEqual, Equal, NotEqual, StringConcat, StringCompare, //Unary operations Not, ZeroExt, SignExt, Trunc, Cast, BytesCast, UnaryMinus, Complement, } /// NodeId is the identifier of each vertex of the graph pub type NodeId = usize; /// Each BasicExpression is a graph node #[derive(Clone)] pub struct BasicExpression { expr_type: ExpressionType, expression_id: NodeId, children: HashMap<NodeId, Rc<RefCell<BasicExpression>>>, } /// Type of constant to streamline the use of a hashmap #[derive(Eq, PartialEq, Hash, Clone)] pub enum ConstantType { Bool(bool), Bytes(Vec<u8>), Number(BigInt), ConstantVariable(Option<usize>, usize), } /// The type of expression that a node represents #[derive(Clone, PartialEq, Hash, Eq)] enum ExpressionType { BinaryOperation(NodeId, NodeId, Operator), UnaryOperation(NodeId, Operator), Variable(usize), FunctionArg(usize), Literal(ConstantType), } /// Sets contain the available expression at a certain portion of the CFG #[derive(Default)] pub struct AvailableExpressionSet { // node_no => BasicExpression expression_memory: HashMap<NodeId, Rc<RefCell<BasicExpression>>>, // Expression => node_id expr_map: HashMap<ExpressionType, NodeId>, } /// This struct serves only to maintain a global id, in such a way that new nodes will always have /// a different ID #[derive(Default)] pub struct AvailableExpression { global_id_counter: NodeId, } /// Get the respective Operator from an Expression fn get_operator_from_expression(exp: &Expression) -> Operator { match exp { Expression::Add(..) => Operator::Add, Expression::Subtract(..) => Operator::Subtract, Expression::Multiply(..) => Operator::Multiply, Expression::Divide(..) => Operator::Divide, Expression::Modulo(..) => Operator::Modulo, Expression::Power(..) => Operator::Power, Expression::BitwiseOr(..) => Operator::BitwiseOr, Expression::BitwiseAnd(..) => Operator::BitwiseAnd, Expression::BitwiseXor(..) => Operator::BitwiseXor, Expression::ShiftLeft(..) => Operator::ShiftLeft, Expression::ShiftRight(..) => Operator::ShiftRight, Expression::Or(..) => Operator::Or, Expression::And(..) => Operator::And, Expression::Not(..) => Operator::Not, Expression::ZeroExt(..) => Operator::ZeroExt, Expression::SignExt(..) => Operator::SignExt, Expression::Trunc(..) => Operator::Trunc, Expression::Cast(..) => Operator::Cast, Expression::BytesCast(..) => Operator::BytesCast, Expression::UnaryMinus(..) => Operator::UnaryMinus, Expression::More(..) => Operator::More, Expression::Less(..) => Operator::Less, Expression::MoreEqual(..) => Operator::MoreEqual, Expression::LessEqual(..) => Operator::LessEqual, Expression::Equal(..) => Operator::Equal, Expression::NotEqual(..) => Operator::NotEqual, Expression::Complement(..) => Operator::Complement, Expression::StringCompare(..) => Operator::StringCompare, Expression::StringConcat(..) => Operator::StringConcat, _ => { unreachable!("Expression does not represent an operator.") } } } impl AvailableExpression { /// Add a node to represent a literal pub fn add_literal_node( &mut self, expr: &Expression, expr_set: &mut AvailableExpressionSet, ) -> NodeId { let expr_type = match expr { Expression::BoolLiteral(_, value) => { ExpressionType::Literal(ConstantType::Bool(*value)) } Expression::NumberLiteral(_, _, value) => { ExpressionType::Literal(ConstantType::Number(value.clone())) } Expression::BytesLiteral(_, _, value) => { ExpressionType::Literal(ConstantType::Bytes(value.clone())) } Expression::ConstantVariable(_, _, contract_no, var_no) => { ExpressionType::Literal(ConstantType::ConstantVariable(*contract_no, *var_no)) } _ => unreachable!("This expression is not a literal or a constant variable"), }; expr_set.expression_memory.insert( self.global_id_counter, Rc::new(RefCell::new(BasicExpression { expr_type: expr_type.clone(), expression_id: self.global_id_counter, children: Default::default(), })), ); expr_set.expr_map.insert(expr_type, self.global_id_counter); self.global_id_counter += 1; self.global_id_counter - 1 } /// Add a node to represent a variable pub fn add_variable_node( &mut self, expr: &Expression, expr_set: &mut AvailableExpressionSet, ) -> NodeId { let expr_type = match expr { Expression::Variable(_, _, pos) => ExpressionType::Variable(*pos), Expression::FunctionArg(_, _, pos) => ExpressionType::FunctionArg(*pos), _ => unreachable!("This expression is not a variable or a function argument"), }; expr_set.expression_memory.insert( self.global_id_counter, Rc::new(RefCell::new(BasicExpression { expr_type: expr_type.clone(), expression_id: self.global_id_counter, children: Default::default(), })), ); expr_set.expr_map.insert(expr_type, self.global_id_counter); self.global_id_counter += 1; self.global_id_counter - 1 } /// Add a node to represent a binary expression pub fn add_binary_node( &mut self, exp: &Expression, expr_set: &mut AvailableExpressionSet, left: NodeId, right: NodeId, ) -> NodeId { let operation = get_operator_from_expression(exp); let new_node = Rc::new(RefCell::new(BasicExpression { expr_type: ExpressionType::BinaryOperation(left, right, operation), expression_id: self.global_id_counter, children: Default::default(), })); expr_set .expression_memory .insert(self.global_id_counter, Rc::clone(&new_node)); expr_set.expr_map.insert( ExpressionType::BinaryOperation(left, right, operation), self.global_id_counter, ); expr_set .expression_memory .get_mut(&left) .unwrap() .borrow_mut() .children .insert(self.global_id_counter, Rc::clone(&new_node)); expr_set .expression_memory .get_mut(&right) .unwrap() .borrow_mut() .children .insert(self.global_id_counter, Rc::clone(&new_node)); self.global_id_counter += 1; self.global_id_counter - 1 } /// Add a node to represent an unary operation pub fn add_unary_node( &mut self, exp: &Expression, parent: usize, expr_set: &mut AvailableExpressionSet, ) -> NodeId { let operation = get_operator_from_expression(exp); let new_node = Rc::new(RefCell::new(BasicExpression { expr_type: ExpressionType::UnaryOperation(parent, operation), expression_id: self.global_id_counter, children: Default::default(), })); expr_set .expression_memory .insert(self.global_id_counter, Rc::clone(&new_node)); expr_set.expr_map.insert( ExpressionType::UnaryOperation(parent, operation), self.global_id_counter, ); expr_set .expression_memory .get_mut(&parent) .unwrap() .borrow_mut() .children .insert(self.global_id_counter, Rc::clone(&new_node)); self.global_id_counter += 1; self.global_id_counter - 1 } } impl AvailableExpressionSet { /// Check if a commutative expression exists in the set fn check_commutative( &self, exp: &Expression, left: &Expression, right: &Expression, ) -> Option<NodeId> { let left_id = self.find_expression(left)?; let right_id = self.find_expression(right)?; let operator = get_operator_from_expression(exp); if let Some(exp_id) = self.expr_map.get(&ExpressionType::BinaryOperation( left_id, right_id, operator, )) { Some(*exp_id) } else { self.expr_map .get(&ExpressionType::BinaryOperation( right_id, left_id, operator, )) .copied() } } /// Add a commutative expression to the set if it is not there yet fn process_commutative( &mut self, exp: &Expression, left: &Expression, right: &Expression, ave: &mut AvailableExpression, ) -> Option<NodeId> { let left_id = self.gen_expression(left, ave)?; let right_id = self.gen_expression(right, ave)?; let operator = get_operator_from_expression(exp); if let Some(exp_id) = self.expr_map.get(&ExpressionType::BinaryOperation( left_id, right_id, operator, )) { return Some(*exp_id); } else if let Some(exp_id) = self.expr_map.get(&ExpressionType::BinaryOperation( right_id, left_id, operator, )) { return Some(*exp_id); } Some(ave.add_binary_node(exp, self, left_id, right_id)) } /// Get the hashmap key for a constant variable or a literal fn constant_key(exp: &Expression) -> ConstantType { match exp { Expression::ConstantVariable(_, _, contract_no, var_no) => { ConstantType::ConstantVariable(*contract_no, *var_no) } Expression::BytesLiteral(_, _, value) => ConstantType::Bytes(value.clone()), Expression::BoolLiteral(_, value) => ConstantType::Bool(*value), Expression::NumberLiteral(_, _, value) => ConstantType::Number(value.clone()), _ => unreachable!("Not a constant"), } } fn add_variable_or_arg( &mut self, exp: &Expression, expr_type: &ExpressionType, ave: &mut AvailableExpression, ) -> NodeId { if let Some(id) = self.expr_map.get(expr_type) { *id } else { ave.add_variable_node(exp, self) } } /// Add an expression to the graph if it does not exists there fn gen_expression( &mut self, exp: &Expression, ave: &mut AvailableExpression, ) -> Option<NodeId> { match exp { Expression::FunctionArg(_, _, pos) => { Some(self.add_variable_or_arg(exp, &ExpressionType::FunctionArg(*pos), ave)) } Expression::Variable(_, _, pos) => { Some(self.add_variable_or_arg(exp, &ExpressionType::Variable(*pos), ave)) } Expression::ConstantVariable(..) | Expression::NumberLiteral(..) | Expression::BoolLiteral(..) | Expression::BytesLiteral(..) => { let key = AvailableExpressionSet::constant_key(exp); let exp_id = if let Some(id) = self.expr_map.get(&ExpressionType::Literal(key)) { *id } else { ave.add_literal_node(exp, self) }; Some(exp_id) } // These operations are commutative Expression::Add(_, _, _, left, right) | Expression::Multiply(_, _, _, left, right) | Expression::BitwiseOr(_, _, left, right) | Expression::BitwiseAnd(_, _, left, right) | Expression::BitwiseXor(_, _, left, right) | Expression::Or(_, left, right) | Expression::And(_, left, right) | Expression::Equal(_, left, right) | Expression::NotEqual(_, left, right) => { self.process_commutative(exp, left, right, ave) } // These operations are not commutative Expression::Subtract(_, _, _, left, right) | Expression::Divide(_, _, left, right) | Expression::Modulo(_, _, left, right) | Expression::Power(_, _, _, left, right) | Expression::ShiftLeft(_, _, left, right) | Expression::ShiftRight(_, _, left, right, _) | Expression::More(_, right, left) | Expression::Less(_, right, left) | Expression::MoreEqual(_, right, left) => { let left_id = self.gen_expression(left, ave)?; let right_id = self.gen_expression(right, ave)?; let operator = get_operator_from_expression(exp); if let Some(exp_id) = self.expr_map.get(&ExpressionType::BinaryOperation( left_id, right_id, operator, )) { return Some(*exp_id); } Some(ave.add_binary_node(exp, self, left_id, right_id)) } // Unary operations Expression::ZeroExt(_, _, operand) | Expression::SignExt(_, _, operand) | Expression::Trunc(_, _, operand) | Expression::Cast(_, _, operand) | Expression::BytesCast(_, _, _, operand) | Expression::Not(_, operand) | Expression::Complement(_, _, operand) | Expression::UnaryMinus(_, _, operand) => { let id = self.gen_expression(operand, ave)?; let operator = get_operator_from_expression(exp); if let Some(expr_id) = self .expr_map .get(&ExpressionType::UnaryOperation(id, operator)) { return Some(*expr_id); } Some(ave.add_unary_node(exp, id, self)) } Expression::StringCompare(_, left, right) | Expression::StringConcat(_, _, left, right) => { if let (StringLocation::RunTime(operand_1), StringLocation::RunTime(operand_2)) = (left, right) { return self.process_commutative(exp, operand_1, operand_2, ave); } None } // Due to reaching definitions limitations, it is not possible to keep track of // the following operations Expression::StorageVariable(..) | Expression::Load(..) | Expression::StorageLoad(..) | Expression::Subscript(..) | Expression::DynamicArraySubscript(..) | Expression::InternalFunction { .. } | Expression::ExternalFunction { .. } | Expression::InternalFunctionCall { .. } | Expression::ExternalFunctionCall { .. } | Expression::ExternalFunctionCallRaw { .. } => None, _ => None, } } /// Remove from the set all children from a node fn kill_child(&mut self, child_node: &Rc<RefCell<BasicExpression>>, parent_id: &NodeId) { self.kill_recursive(&*child_node.borrow(), parent_id); child_node.borrow_mut().children.clear(); } /// Recursively remove from the set all the children of a node fn kill_recursive(&mut self, basic_exp: &BasicExpression, parent_id: &NodeId) { for (child_id, node) in &basic_exp.children { self.kill_child(node, &basic_exp.expression_id); self.expression_memory.remove(child_id); } if let ExpressionType::BinaryOperation(left, right, _) = &basic_exp.expr_type { let other_parent = if *left == *parent_id { right } else { left }; self.expression_memory .get_mut(other_parent) .unwrap() .borrow_mut() .children .remove(&basic_exp.expression_id); } self.expr_map.remove(&basic_exp.expr_type); } /// When a reaching definition change, we remove the variable node and all its descendants from /// the graph pub fn kill(&mut self, var_no: usize) { let key = ExpressionType::Variable(var_no); if !self.expr_map.contains_key(&key) { return; } let var_id = self.expr_map[&key]; let var_node = self.expression_memory[&var_id].clone(); for (child_id, node) in &var_node.borrow_mut().children { self.kill_child(node, &var_id); self.expression_memory.remove(child_id); } self.expression_memory.remove(&var_id); self.expr_map.remove(&key); } /// Check if we can add the expressions of an instruction to the graph pub fn process_instruction(&mut self, instr: &Instr, ave: &mut AvailableExpression) { match instr { Instr::BranchCond { cond: expr, .. } | Instr::Store { dest: expr, .. } | Instr::LoadStorage { storage: expr, .. } | Instr::ClearStorage { storage: expr, .. } | Instr::Print { expr } | Instr::AssertFailure { expr: Some(expr) } | Instr::PopStorage { storage: expr, .. } | Instr::AbiDecode { data: expr, .. } | Instr::SelfDestruct { recipient: expr } | Instr::Set { expr, .. } => { let _ = self.gen_expression(expr, ave); } Instr::PushMemory { value: expr, .. } => { let _ = self.gen_expression(expr, ave); } Instr::SetStorage { value, storage, .. } | Instr::PushStorage { value, storage, .. } => { let _ = self.gen_expression(value, ave); let _ = self.gen_expression(storage, ave); } Instr::SetStorageBytes { value, storage, offset, } => { let _ = self.gen_expression(value, ave); let _ = self.gen_expression(storage, ave); let _ = self.gen_expression(offset, ave); } Instr::Return { value: exprs } | Instr::Call { args: exprs, .. } => { for expr in exprs { let _ = self.gen_expression(expr, ave); } } Instr::Constructor { args, value, gas, salt, space, .. } => { for arg in args { let _ = self.gen_expression(arg, ave); } if let Some(expr) = value { let _ = self.gen_expression(expr, ave); } let _ = self.gen_expression(gas, ave); if let Some(expr) = salt { let _ = self.gen_expression(expr, ave); } if let Some(expr) = space { let _ = self.gen_expression(expr, ave); } } Instr::ExternalCall { address, payload, value, gas, .. } => { if let Some(expr) = address { let _ = self.gen_expression(expr, ave); } let _ = self.gen_expression(payload, ave); let _ = self.gen_expression(value, ave); let _ = self.gen_expression(gas, ave); } Instr::ValueTransfer { address, value, .. } => { let _ = self.gen_expression(address, ave); let _ = self.gen_expression(value, ave); } Instr::EmitEvent { data, topics, .. } => { for expr in data { let _ = self.gen_expression(expr, ave); } for expr in topics { let _ = self.gen_expression(expr, ave); } } Instr::AssertFailure { expr: None } | Instr::Unreachable | Instr::Nop | Instr::Branch { .. } | Instr::PopMemory { .. } => {} } } fn check_intersection( key: &ExpressionType, value: &NodeId, set_2: &AvailableExpressionSet, ) -> bool { if !set_2.expr_map.contains_key(key) { return false; } if matches!(key, ExpressionType::Variable(_)) { return *value == set_2.expr_map[key]; } true } /// When we exit two blocks, we must intersect their set of available expressions pub fn intersect_sets(&mut self, set_2: &AvailableExpressionSet) { self.expr_map .retain(|key, value| AvailableExpressionSet::check_intersection(key, value, set_2)); let mut to_maintain: HashSet<usize> = HashSet::new(); // Check if an expression is available on both sets, but has a different global id for node_id in self.expr_map.values() { if !set_2.expression_memory.contains_key(node_id) { to_maintain.insert(*node_id); self.expression_memory[node_id] .borrow_mut() .children .clear(); } } self.expression_memory.retain(|key, _| { set_2.expression_memory.contains_key(key) || to_maintain.contains(key) }); for (key, value) in &self.expression_memory { if let Some(node) = set_2.expression_memory.get(key) { value.borrow_mut().children.retain(|child_id, _| { node.borrow().children.contains_key(child_id) || to_maintain.contains(child_id) }); } } } fn check_variable_or_arg(&self, expr_type: &ExpressionType) -> Option<NodeId> { self.expr_map.get(expr_type).copied() } /// Check if an expression is available pub fn find_expression(&self, exp: &Expression) -> Option<NodeId> { match exp { Expression::FunctionArg(_, _, pos) => { self.check_variable_or_arg(&ExpressionType::FunctionArg(*pos)) } Expression::Variable(_, _, pos) => { self.check_variable_or_arg(&ExpressionType::Variable(*pos)) } Expression::ConstantVariable(..) | Expression::NumberLiteral(..) | Expression::BoolLiteral(..) | Expression::BytesLiteral(..) => { let key = AvailableExpressionSet::constant_key(exp); self.expr_map.get(&ExpressionType::Literal(key)).copied() } Expression::Add(_, _, _, left, right) | Expression::Multiply(_, _, _, left, right) | Expression::BitwiseOr(_, _, left, right) | Expression::BitwiseAnd(_, _, left, right) | Expression::BitwiseXor(_, _, left, right) | Expression::Or(_, left, right) | Expression::And(_, left, right) | Expression::Equal(_, left, right) | Expression::NotEqual(_, left, right) => self.check_commutative(exp, left, right), // These operations are not commutative Expression::Subtract(_, _, _, left, right) | Expression::Divide(_, _, left, right) | Expression::Modulo(_, _, left, right) | Expression::Power(_, _, _, left, right) | Expression::ShiftLeft(_, _, left, right) | Expression::ShiftRight(_, _, left, right, _) | Expression::More(_, right, left) | Expression::Less(_, right, left) | Expression::MoreEqual(_, right, left) => { let left_id = self.find_expression(left)?; let right_id = self.find_expression(right)?; let operator = get_operator_from_expression(exp); if let Some(exp_id) = self.expr_map.get(&ExpressionType::BinaryOperation( left_id, right_id, operator, )) { return Some(*exp_id); } None } Expression::ZeroExt(_, _, operand) | Expression::SignExt(_, _, operand) | Expression::Trunc(_, _, operand) | Expression::Cast(_, _, operand) | Expression::BytesCast(_, _, _, operand) | Expression::Not(_, operand) | Expression::Complement(_, _, operand) | Expression::UnaryMinus(_, _, operand) => { let id = self.find_expression(operand)?; let operator = get_operator_from_expression(exp); if let Some(expr_id) = self .expr_map .get(&ExpressionType::UnaryOperation(id, operator)) { return Some(*expr_id); } None } Expression::StringCompare(_, left, right) | Expression::StringConcat(_, _, left, right) => { if let (StringLocation::RunTime(operand_1), StringLocation::RunTime(operand_2)) = (left, right) { return self.check_commutative(exp, operand_1, operand_2); } None } _ => None, } } } impl Clone for AvailableExpressionSet { /// Clone a set fn clone(&self) -> AvailableExpressionSet { let mut new_set = AvailableExpressionSet { expression_memory: HashMap::default(), expr_map: self.expr_map.clone(), }; for (key, value) in &self.expression_memory { new_set.expression_memory.insert( *key, Rc::new(RefCell::new(BasicExpression { expr_type: value.borrow().expr_type.clone(), expression_id: value.borrow().expression_id, children: HashMap::default(), })), ); } for (key, value) in &self.expression_memory { let node = new_set.expression_memory.get(key).unwrap(); for child_id in value.borrow().children.keys() { node.borrow_mut().children.insert( *child_id, Rc::clone(new_set.expression_memory.get(child_id).unwrap()), ); } } new_set } }
34.528465
99
0.546471
d91ea9b2e466e553509f2f51ecc6476482ad2199
2,196
extern crate rand; pub use self::rand::prelude::*; /// MATLAB like zeros - zero matrix /// /// # Examples /// ``` /// #[macro_use] /// extern crate peroxide; /// use peroxide::fuga::*; /// /// fn main() { /// let a = zeros!(4); /// assert_eq!(a, c!(0,0,0,0)); /// /// let b = zeros!(3, 2); /// assert_eq!(b, matrix(c!(0,0,0,0,0,0), 3, 2, Row)); /// } /// ``` #[macro_export] macro_rules! zeros { ( $n:expr ) => { vec![0f64; $n] }; ( $r:expr, $c:expr ) => {{ let (r, c) = ($r, $c); matrix(vec![0f64; r * c], r, c, Row) }}; } /// MATLAB like rand - random matrix /// /// # Examples /// ``` /// #[macro_use] /// extern crate peroxide; /// use peroxide::fuga::*; /// /// fn main() { /// let a = rand!(2, 2); /// println!("{}", a); // 2 x 2 random matrix (0 ~ 1) /// } /// ``` #[macro_export] macro_rules! rand { () => {{ let mut rng = thread_rng(); rng.gen_range(0f64..=1f64) }}; ( $m:expr, $n:expr ) => {{ let r = $m; let c = $n; let mut rng = thread_rng(); let mut m = matrix(vec![0f64; r * c], r, c, Row); for i in 0..r { for j in 0..c { m[(i, j)] = rng.gen_range(0f64..=1f64); } } m }}; } /// MATLAB like eye - identity matrix /// /// # Examples /// /// ``` /// #[macro_use] /// extern crate peroxide; /// use peroxide::fuga::*; /// /// fn main() { /// let i = eye!(2); /// assert_eq!(i, matrix(c!(1,0,0,1), 2, 2, Row)); /// } /// ``` #[macro_export] macro_rules! eye { ( $n:expr ) => {{ let n = $n; let mut m = matrix(vec![0f64; n * n], n, n, Row); for i in 0..n { m[(i, i)] = 1f64; } m }}; } /// MATLAB like linspace /// /// # Examples /// ``` /// #[macro_use] /// extern crate peroxide; /// use peroxide::fuga::*; /// /// fn main() { /// let a = linspace!(1, 10, 10); /// assert_eq!(a, seq!(1,10,1)); /// } /// ``` #[macro_export] macro_rules! linspace { ( $start:expr, $end:expr, $length: expr) => {{ let step = ($end - $start) as f64 / ($length as f64 - 1f64); seq!($start, $end, step) }}; }
19.607143
68
0.438525
d6be98dd573c2bcc998833f65dc2452ec9dc0f1e
14,893
// This file is Copyright its original authors, visible in version control // history. // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. //! Handles all over the wire message encryption and decryption upon handshake completion. use ln::peers::{chacha, hkdf5869rfc}; use util::byte_utils; use std::collections::VecDeque; pub type SymmetricKey = [u8; 32]; /// Maximum Lightning message data length according to /// [BOLT-8](https://github.com/lightningnetwork/lightning-rfc/blob/v1.0/08-transport.md#lightning-message-specification) /// and [BOLT-1](https://github.com/lightningnetwork/lightning-rfc/blob/master/01-messaging.md#lightning-message-format): pub const LN_MAX_MSG_LEN: usize = 65535; pub const LN_MAX_PACKET_LENGTH: usize = MESSAGE_LENGTH_HEADER_SIZE + chacha::TAG_SIZE + LN_MAX_MSG_LEN + chacha::TAG_SIZE; pub const MESSAGE_LENGTH_HEADER_SIZE: usize = 2; pub const TAGGED_MESSAGE_LENGTH_HEADER_SIZE: usize = MESSAGE_LENGTH_HEADER_SIZE + chacha::TAG_SIZE; pub const KEY_ROTATION_INDEX: u32 = 1000; /// Instantiate a new (Encryptor, Decryptor) with specified sending and receiving keys pub fn create_encryptor_decryptor(sending_key: SymmetricKey, receiving_key: SymmetricKey, chaining_key: SymmetricKey) -> (Encryptor, Decryptor) { (Encryptor { sending_key, sending_chaining_key: chaining_key, sending_nonce: 0 }, Decryptor { receiving_key, receiving_chaining_key: chaining_key, receiving_nonce: 0, read_buffer: Some(vec![]), pending_message_length: None, decrypted_payloads: VecDeque::new(), }) } // Shared helper for the Encryptor and Decryptor fn increment_nonce_helper(nonce: &mut u32, chaining_key: &mut SymmetricKey, key: &mut SymmetricKey) { *nonce += 1; if *nonce == KEY_ROTATION_INDEX { rotate_key(chaining_key, key); *nonce = 0; } } // Shared helper for the Encryptor and Decryptor fn rotate_key(chaining_key: &mut SymmetricKey, key: &mut SymmetricKey) { let (new_chaining_key, new_key) = hkdf5869rfc::derive(chaining_key, key); chaining_key.copy_from_slice(&new_chaining_key); key.copy_from_slice(&new_key); } pub struct Encryptor { sending_key: SymmetricKey, sending_chaining_key: SymmetricKey, sending_nonce: u32, } pub struct Decryptor { receiving_key: SymmetricKey, receiving_chaining_key: SymmetricKey, receiving_nonce: u32, pending_message_length: Option<usize>, read_buffer: Option<Vec<u8>>, decrypted_payloads: VecDeque<Vec<u8>>, } impl Iterator for Decryptor { type Item = Vec<u8>; fn next(&mut self) -> Option<Self::Item> { self.decrypted_payloads.pop_front() } } impl Encryptor { pub fn encrypt_buf(&mut self, buffer: &[u8]) -> Vec<u8> { if buffer.len() > LN_MAX_MSG_LEN { panic!("Attempted to encrypt message longer than {} bytes!", LN_MAX_MSG_LEN); } let length = buffer.len() as u16; let length_bytes = byte_utils::be16_to_array(length); let mut ciphertext = vec![0u8; TAGGED_MESSAGE_LENGTH_HEADER_SIZE + length as usize + chacha::TAG_SIZE]; chacha::encrypt(&self.sending_key, self.sending_nonce as u64, &[0; 0], &length_bytes, &mut ciphertext[..TAGGED_MESSAGE_LENGTH_HEADER_SIZE]); self.increment_nonce(); &chacha::encrypt(&self.sending_key, self.sending_nonce as u64, &[0; 0], buffer, &mut ciphertext[TAGGED_MESSAGE_LENGTH_HEADER_SIZE..]); self.increment_nonce(); ciphertext } fn increment_nonce(&mut self) { increment_nonce_helper(&mut self.sending_nonce, &mut self.sending_chaining_key, &mut self.sending_key); } } impl Decryptor { // Read in new encrypted data and process it. This attempts to decrypt the input data and any // existing data in the internal read buffer and can return an error if there is an error raised // from the decryption code. pub fn read(&mut self, data: &[u8]) -> Result<(), String> { let mut read_buffer = self.read_buffer.take().unwrap(); let buffer = if read_buffer.is_empty() { data } else { read_buffer.extend_from_slice(data); read_buffer.as_slice() }; let mut read_offset = 0; loop { match self.decrypt_next(&buffer[read_offset..]) { Ok((Some(result), bytes_read)) => { read_offset += bytes_read; self.decrypted_payloads.push_back(result); }, Ok((None, 0)) => { self.read_buffer = Some(buffer[read_offset..].to_vec()); break; } Err(e) => { return Err(e); } Ok((None, _)) => { panic!("Invalid return from decrypt_next()") } } } // If we ever get to the end of the decryption phase and have more data in the read buffer // than is possible for a valid message something has gone wrong. An error with a mismatched // length and payload should result an error from the decryption code before we get here. if self.read_buffer.as_ref().unwrap().len() > LN_MAX_PACKET_LENGTH { panic!("Encrypted message data longer than {}", LN_MAX_PACKET_LENGTH); } Ok(()) } /// Decrypt the next payload from the slice returning the number of bytes consumed during the /// operation. This will always be (None, 0) if no payload could be decrypted. pub fn decrypt_next(&mut self, buffer: &[u8]) -> Result<(Option<Vec<u8>>, usize), String> { let message_length = if let Some(length) = self.pending_message_length { // we have already decrypted the header length } else { if buffer.len() < TAGGED_MESSAGE_LENGTH_HEADER_SIZE { // A message must be at least 18 bytes (2 for encrypted length, 16 for the tag) return Ok((None, 0)); } let encrypted_length = &buffer[0..TAGGED_MESSAGE_LENGTH_HEADER_SIZE]; let mut length_bytes = [0u8; MESSAGE_LENGTH_HEADER_SIZE]; chacha::decrypt(&self.receiving_key, self.receiving_nonce as u64, &[0; 0], encrypted_length, &mut length_bytes)?; self.increment_nonce(); // the message length byte_utils::slice_to_be16(&length_bytes) as usize }; let message_end_index = TAGGED_MESSAGE_LENGTH_HEADER_SIZE + message_length + chacha::TAG_SIZE; if buffer.len() < message_end_index { self.pending_message_length = Some(message_length); return Ok((None, 0)); } self.pending_message_length = None; let encrypted_message = &buffer[TAGGED_MESSAGE_LENGTH_HEADER_SIZE..message_end_index]; let mut message = vec![0u8; message_length]; chacha::decrypt(&self.receiving_key, self.receiving_nonce as u64, &[0; 0], encrypted_message, &mut message)?; self.increment_nonce(); Ok((Some(message), message_end_index)) } fn increment_nonce(&mut self) { increment_nonce_helper(&mut self.receiving_nonce, &mut self.receiving_chaining_key, &mut self.receiving_key); } // Used in tests to determine whether or not excess bytes entered the Decryptor without needing // to bring up infrastructure to properly encode it #[cfg(test)] pub fn read_buffer_length(&self) -> usize { match &self.read_buffer { &Some(ref vec) => { vec.len() } &None => 0 } } } #[cfg(test)] mod tests { use super::*; use hex; fn setup_peers() -> ((Encryptor, Decryptor), (Encryptor, Decryptor)) { let chaining_key_vec = hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap(); let mut chaining_key = [0u8; 32]; chaining_key.copy_from_slice(&chaining_key_vec); let sending_key_vec = hex::decode("969ab31b4d288cedf6218839b27a3e2140827047f2c0f01bf5c04435d43511a9").unwrap(); let mut sending_key = [0u8; 32]; sending_key.copy_from_slice(&sending_key_vec); let receiving_key_vec = hex::decode("bb9020b8965f4df047e07f955f3c4b88418984aadc5cdb35096b9ea8fa5c3442").unwrap(); let mut receiving_key = [0u8; 32]; receiving_key.copy_from_slice(&receiving_key_vec); let connected_peer = create_encryptor_decryptor(sending_key, receiving_key, chaining_key); let remote_peer = create_encryptor_decryptor(receiving_key, sending_key, chaining_key); (connected_peer, remote_peer) } #[test] fn test_empty_message() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let message: Vec<u8> = vec![]; let encrypted_message = connected_encryptor.encrypt_buf(&message); assert_eq!(encrypted_message.len(), 2 + 16 + 16); remote_decryptor.read(&encrypted_message[..]).unwrap(); let decrypted_message = remote_decryptor.next().unwrap(); assert_eq!(decrypted_message, Vec::<u8>::new()); } // Test that descrypting from a slice that is the partial data followed by another decrypt call // with the remaining data works. This exercises the slow-path for decryption and ensures the // data is written to the read_buffer properly. #[test] fn test_decrypt_from_slice_two_calls_no_header_then_rest() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let message: Vec<u8> = vec![1]; let encrypted_message = connected_encryptor.encrypt_buf(&message); remote_decryptor.read(&encrypted_message[..1]).unwrap(); assert!(remote_decryptor.next().is_none()); remote_decryptor.read(&encrypted_message[1..]).unwrap(); let decrypted_message = remote_decryptor.next().unwrap(); assert_eq!(decrypted_message, vec![1]); } // Include the header in the first slice #[test] fn test_decrypt_from_slice_two_calls_header_then_rest() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let message: Vec<u8> = vec![1]; let encrypted_message = connected_encryptor.encrypt_buf(&message); remote_decryptor.read(&encrypted_message[..20]).unwrap(); assert!(remote_decryptor.next().is_none()); remote_decryptor.read(&encrypted_message[20..]).unwrap(); let decrypted_message = remote_decryptor.next().unwrap(); assert_eq!(decrypted_message, vec![1]); } #[test] fn test_nonce_chaining() { let ((mut connected_encryptor, _), _) = setup_peers(); let message = hex::decode("68656c6c6f").unwrap(); let encrypted_message = connected_encryptor.encrypt_buf(&message); assert_eq!(encrypted_message, hex::decode("cf2b30ddf0cf3f80e7c35a6e6730b59fe802473180f396d88a8fb0db8cbcf25d2f214cf9ea1d95").unwrap()); // the second time the same message is encrypted, the ciphertext should be different let encrypted_message = connected_encryptor.encrypt_buf(&message); assert_eq!(encrypted_message, hex::decode("72887022101f0b6753e0c7de21657d35a4cb2a1f5cde2650528bbc8f837d0f0d7ad833b1a256a1").unwrap()); } #[test] /// Based on RFC test vectors: https://github.com/lightningnetwork/lightning-rfc/blob/master/08-transport.md#message-encryption-tests fn test_key_rotation() { let ((mut connected_encryptor, _), _) = setup_peers(); let message = hex::decode("68656c6c6f").unwrap(); let mut encrypted_messages: Vec<Vec<u8>> = Vec::new(); for _ in 0..1002 { let encrypted_message = connected_encryptor.encrypt_buf(&message); encrypted_messages.push(encrypted_message); } assert_eq!(encrypted_messages[500], hex::decode("178cb9d7387190fa34db9c2d50027d21793c9bc2d40b1e14dcf30ebeeeb220f48364f7a4c68bf8").unwrap()); assert_eq!(encrypted_messages[501], hex::decode("1b186c57d44eb6de4c057c49940d79bb838a145cb528d6e8fd26dbe50a60ca2c104b56b60e45bd").unwrap()); assert_eq!(encrypted_messages[1000], hex::decode("4a2f3cc3b5e78ddb83dcb426d9863d9d9a723b0337c89dd0b005d89f8d3c05c52b76b29b740f09").unwrap()); assert_eq!(encrypted_messages[1001], hex::decode("2ecd8c8a5629d0d02ab457a0fdd0f7b90a192cd46be5ecb6ca570bfc5e268338b1a16cf4ef2d36").unwrap()); } #[test] fn test_decryption_buffering() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let message = hex::decode("68656c6c6f").unwrap(); let mut encrypted_messages: Vec<Vec<u8>> = Vec::new(); for _ in 0..1002 { let encrypted_message = connected_encryptor.encrypt_buf(&message); encrypted_messages.push(encrypted_message); } for _ in 0..501 { // read two messages at once, filling buffer let mut current_encrypted_message = encrypted_messages.remove(0); let next_encrypted_message = encrypted_messages.remove(0); current_encrypted_message.extend_from_slice(&next_encrypted_message); remote_decryptor.read(&current_encrypted_message[..]).unwrap(); let decrypted_message = remote_decryptor.next().unwrap(); assert_eq!(decrypted_message, message); } for _ in 0..501 { // decrypt messages directly from buffer without adding to it remote_decryptor.read(&[]).unwrap(); let decrypted_message = remote_decryptor.next().unwrap(); assert_eq!(decrypted_message, message); } } // Decryption errors should result in Err #[test] fn decryption_failure_errors() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let encrypted = connected_encryptor.encrypt_buf(&[1]); remote_decryptor.receiving_key = [0; 32]; assert_eq!(remote_decryptor.read(&encrypted), Err("invalid hmac".to_string())); } // Test next()::None #[test] fn decryptor_iterator_empty() { let (_, (_, mut remote_decryptor)) = setup_peers(); assert_eq!(remote_decryptor.next(), None); } // Test next() -> next()::None #[test] fn decryptor_iterator_one_item_valid() { let ((mut connected_encryptor, _), (_, mut remote_decryptor)) = setup_peers(); let encrypted = connected_encryptor.encrypt_buf(&[1]); remote_decryptor.read(&encrypted).unwrap(); assert_eq!(remote_decryptor.next(), Some(vec![1])); assert_eq!(remote_decryptor.next(), None); } #[test] // https://github.com/lightningnetwork/lightning-rfc/blob/v1.0/08-transport.md#lightning-message-specification fn max_msg_len_limit_value() { assert_eq!(LN_MAX_MSG_LEN, 65535); assert_eq!(LN_MAX_PACKET_LENGTH, 65569); } #[test] #[should_panic(expected = "Attempted to encrypt message longer than 65535 bytes!")] fn max_message_len_encryption() { let ((mut connected_encryptor, _), _) = setup_peers(); let msg = [4u8; LN_MAX_MSG_LEN + 1]; let _should_panic = connected_encryptor.encrypt_buf(&msg); } // Test that the decryptor can handle multiple partial reads() that result in a total size // larger than LN_MAX_PACKET_LENGTH and still decrypt the messages. #[test] fn read_buffer_can_grow_over_max_payload_len() { let ((mut connected_encryptor, _), ( _, mut remote_decryptor)) = setup_peers(); let msg1 = [1u8; LN_MAX_MSG_LEN]; let msg2 = [2u8; LN_MAX_MSG_LEN]; let encrypted1 = connected_encryptor.encrypt_buf(&msg1); let encrypted2 = connected_encryptor.encrypt_buf(&msg2); let read1 = &encrypted1[..1]; let mut read2 = vec![]; read2.extend_from_slice(&encrypted1[1..]); read2.extend_from_slice(&encrypted2); remote_decryptor.read(read1).unwrap(); assert_eq!(remote_decryptor.next(), None); remote_decryptor.read(&read2[..]).unwrap(); assert_eq!(remote_decryptor.next(), Some(msg1.to_vec())); assert_eq!(remote_decryptor.next(), Some(msg2.to_vec())); } }
35.97343
145
0.740482
1de71a813866ef0acee8deb4f1eeb02daf2ce38f
361
//! The Veracruz proxy attestation server //! //! ## Authors //! //! The Veracruz Development Team. //! //! ## Licensing and copyright notice //! //! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for //! information on licensing and copyright. #[macro_use] extern crate diesel; mod attestation; mod orm; pub mod server; pub mod error;
16.409091
74
0.703601
75ed5f64e428cd757d0017e983950810cc1d2c0b
9,525
use proc_macro::{Delimiter, Group, Literal, TokenStream, TokenTree}; use proc_macro::token_stream::IntoIter; use quote::ToTokens; use syn::{self, Ident, LitInt, Token}; struct Header { name: Ident, start: i32, end: i32, } #[derive(Debug)] enum Segment { Normal(TokenStream), Repeat(TokenStream), } #[proc_macro] pub fn seq(input: TokenStream) -> TokenStream { match process(input) { Ok(v) => v, Err(e) => e.to_compile_error().into(), } } fn process(input: TokenStream) -> Result<TokenStream, syn::Error> { let mut result = TokenStream::new(); let mut iter = input.clone().into_iter(); let header = parse_header(&mut iter)?; let body = braced_body(&mut iter)?; let mut segments = vec![]; if find_repeat_section(body.clone().into_iter(), &mut segments) { for seg in segments { match seg { Segment::Normal(n) => result.extend(n), Segment::Repeat(r) => { for lit in header.start .. header.end { interrupt_ident_to_literal(&header.name, lit, r.clone().into_iter(), &mut result); } } } } eprintln!("{}", result); } else { for lit in header.start .. header.end { interrupt_ident_to_literal(&header.name, lit, body.clone().into_iter(), &mut result); } } Ok(result) } fn parse_header(iter: &mut IntoIter) -> Result<Header, syn::Error> { let name = parse_header_name(iter)?; parse_token::<Token![in]>(iter)?; let start = parse_header_lit(iter)?; parse_token::<Token![.]>(iter)?; parse_token::<Token![.]>(iter)?; let end = parse_header_lit(iter)?; Ok(Header{ name, start, end, }) } fn parse_header_name(iter: &mut IntoIter) -> Result<Ident, syn::Error> { if let Some(tt) = iter.next() { let ts: TokenStream = tt.into(); let ident: Ident = syn::parse(ts)?; return Ok(ident); } Err(syn::Error::new(proc_macro::Span::call_site().into(), "unexpected eof")) } fn parse_token<T: syn::parse::Parse>(iter: &mut IntoIter) -> Result<(), syn::Error> { if let Some(tt) = iter.next() { let ts: TokenStream = tt.into(); let _: T = syn::parse(ts)?; return Ok(()); } Err(syn::Error::new(proc_macro::Span::call_site().into(), "unexpected eof")) } fn parse_header_lit(iter: &mut IntoIter) -> Result<i32, syn::Error> { if let Some(tt) = iter.next() { let ts: TokenStream = tt.into(); let lit: LitInt = syn::parse(ts)?; let v = lit.base10_parse::<i32>()?; return Ok(v); } Err(syn::Error::new(proc_macro::Span::call_site().into(), "unexpected eof")) } fn braced_body(iter: &mut IntoIter) -> Result<TokenStream, syn::Error> { if let Some(tt) = iter.next() { if let proc_macro::TokenTree::Group(g) = tt { match g.delimiter() { proc_macro::Delimiter::Brace => return Ok(g.stream()), _ => { let err = syn::Error::new(g.span().into(), "unexpected delimiter"); return Err(err); } } } return Err(syn::Error::new(tt.span().into(), "a group is expected")); } return Err(syn::Error::new(proc_macro::Span::call_site().into(), "unexpected eof")); } // interrupt N to lit fn interrupt_ident_to_literal(name: &Ident, lit: i32, iter: IntoIter, output: &mut TokenStream) { let mut sharp = None; let mut ident_before_sharp: Option<TokenTree> = None; for tt in iter { match &tt { TokenTree::Group(g) => { if let Some(ident0) = &ident_before_sharp { output.extend::<TokenStream>(ident0.clone().into()); ident_before_sharp = None; } let mut tmp = TokenStream::new(); interrupt_ident_to_literal(name, lit, g.stream().into_iter(), &mut tmp); let mut new_g = Group::new(g.delimiter(), tmp); new_g.set_span(g.span()); let new_tt: TokenTree = new_g.into(); output.extend::<TokenStream>(new_tt.into()); } TokenTree::Punct(p) => { // 如果检测到 ident_before_sharp#name 则合并 ident_before_sharp 和 name 追加到结果; // 否则需要将 ident_before_sharp 作为单独的 ident 追加到结果。 if p.as_char() == '#' { sharp = Some(tt.clone()); continue } if let Some(ident0) = &ident_before_sharp { output.extend::<TokenStream>(ident0.clone().into()); ident_before_sharp = None; } // append the sharp if has any if let Some(s) = &sharp { output.extend::<TokenStream>(s.clone().into()); sharp = None; } output.extend::<TokenStream>(tt.into()); } TokenTree::Ident(i) => { if i.to_string() == name.to_string() { let t: TokenTree = Literal::i32_unsuffixed(lit).into(); let mut ts: TokenStream = t.into(); if let Some(_) = sharp { if let Some(ident0) = &ident_before_sharp { let new_name = format!("{}{}", ident0, lit); let new_ident = syn::Ident::new(&&new_name, ident0.span().into()); ts = new_ident.to_token_stream().into(); ident_before_sharp = None; // avoid duplicated extend to result } sharp = None; } output.extend(ts); continue } if let Some(ident0) = &ident_before_sharp { output.extend::<TokenStream>(ident0.clone().into()); } ident_before_sharp = Some(tt.clone()); } _ => { if let Some(ident0) = &ident_before_sharp { output.extend::<TokenStream>(ident0.clone().into()); ident_before_sharp = None; } // append the sharp if has any if let Some(s) = &sharp { output.extend::<TokenStream>(s.clone().into()); sharp = None; } output.extend::<TokenStream>(tt.into()); } } } } fn find_repeat_section(iter: IntoIter, segments: &mut Vec<Segment>) -> bool { let mut found = false; let mut sharp: Option<TokenTree> = None; let mut repeat: Option<TokenStream> = None; for tt in iter { match &tt { TokenTree::Group(g) => { if sharp.is_some() && g.delimiter() == Delimiter::Parenthesis{ repeat = Some(g.stream()); } else { if let Some(s) = &sharp { segments.push(Segment::Normal(s.clone().into())); sharp = None; } repeat = None; if find_repeat_section(g.stream().into_iter(), segments) { found = true; continue; } segments.push(Segment::Normal(tt.clone().into())); } } TokenTree::Punct(p) => { match p.as_char() { '#' => sharp = Some(tt.clone()), '*' => { if sharp.is_some() && repeat.is_some() { if let Some(r) = &repeat { segments.push(Segment::Repeat(r.clone().into())); found = true; } continue; } if let Some(s) = &sharp { segments.push(Segment::Normal(s.clone().into())); } if let Some(r) = &repeat { segments.push(Segment::Normal(r.clone().into())); } sharp = None; repeat = None; segments.push(Segment::Normal(tt.clone().into())); } _ => { if let Some(s) = &sharp { segments.push(Segment::Normal(s.clone().into())); } if let Some(r) = &repeat { segments.push(Segment::Normal(r.clone().into())); } sharp = None; repeat = None; segments.push(Segment::Normal(tt.clone().into())); } } } _ => { if let Some(s) = &sharp { segments.push(Segment::Normal(s.clone().into())); } if let Some(r) = &repeat { segments.push(Segment::Normal(r.clone().into())); } sharp = None; repeat = None; segments.push(Segment::Normal(tt.clone().into())) } } } found }
36.354962
106
0.458268
ef83f7fb90376213d6193b7e7b4ac505fabe50c5
1,304
pub use xitca_http::{http::response::Builder as WebResponseBuilder, ResponseBody}; use std::future::Future; use xitca_http::{ http::{const_header_value::TEXT_UTF8, header::CONTENT_TYPE, Response}, util::service::Responder, }; use super::request::WebRequest; // TODO: add app state to response type. pub type WebResponse = Response<ResponseBody>; impl<'a, 'r, 's, S> Responder<'a, &'r mut WebRequest<'s, S>> for WebResponse { type Output = WebResponse; type Future = impl Future<Output = Self::Output> + 'a; #[inline] fn respond_to(self, _: &'a mut &'r mut WebRequest<'s, S>) -> Self::Future { async { self } } } macro_rules! text_utf8 { ($type: ty) => { impl<'a, 'r, 's, S> Responder<'a, &'r mut WebRequest<'s, S>> for $type where S: 'static, { type Output = WebResponse; type Future = impl Future<Output = Self::Output> + 'a; fn respond_to(self, req: &'a mut &'r mut WebRequest<'s, S>) -> Self::Future { async move { let mut res = req.as_response(self); res.headers_mut().insert(CONTENT_TYPE, TEXT_UTF8); res } } } }; } text_utf8!(String); text_utf8!(&'static str);
27.744681
89
0.562883
21939301d6ecdf327064c0400f9efefd7bbdb500
8,156
use std::{ env, io::{Error, ErrorKind, Result}, path::{Path, PathBuf}, }; const TARGET: &str = "spirv-unknown-unknown"; const TARGET_DIR: &str = "target/compiletest"; const SPIRV_STD_TARGET: &str = "target/compiletest/spirv-std"; const SPIRV_STD_HOST_DEPS: &str = "target/compiletest/spirv-std/debug/deps"; const SPIRV_STD_TARGET_DEPS: &str = "target/compiletest/spirv-std/spirv-unknown-unknown/debug/deps"; fn main() { let manifest_dir = PathBuf::from("./"); std::env::set_var("CARGO_MANIFEST_DIR", &manifest_dir); // Pull in rustc_codegen_spirv as a dynamic library in the same way // spirv-builder does. let codegen_backend_path = find_rustc_codegen_spirv(); let libs = build_spirv_std(&manifest_dir, &codegen_backend_path); run_mode("ui", &codegen_backend_path, &libs); } /// Runs the given `mode` on the directory that matches that name, using the /// backend provided by `codegen_backend_path`. fn run_mode(mode: &'static str, codegen_backend_path: &Path, libs: &TestDeps) { let mut config = compiletest::Config::default(); /// RUSTFLAGS passed to all test files. fn test_rustc_flags(codegen_backend_path: &Path, deps: &TestDeps, libs: &[&Path]) -> String { [ &*rust_flags(codegen_backend_path), &*libs .iter() .map(|p| format!("-L {}", p.display())) .fold(String::new(), |a, b| b + " " + &a), "--edition 2018", &*format!("--extern noprelude:core={}", deps.core.display()), &*format!( "--extern noprelude:compiler_builtins={}", deps.compiler_builtins.display() ), &*format!( "--extern spirv_std_macros={}", deps.spirv_std_macros.display() ), &*format!("--extern spirv_std={}", deps.spirv_std.display()), &*format!("--extern glam={}", deps.glam.display()), "--crate-type dylib", "-Zunstable-options", "-Zcrate-attr=no_std", "-Zcrate-attr=feature(register_attr,asm)", "-Zcrate-attr=register_attr(spirv)", ] .join(" ") } let flags = test_rustc_flags( codegen_backend_path, libs, &[ &PathBuf::from(format!("dependency={}", SPIRV_STD_TARGET_DEPS)), &PathBuf::from(format!("dependency={}", SPIRV_STD_HOST_DEPS)), ], ); config.target_rustcflags = Some(flags); config.mode = mode.parse().expect("Invalid mode"); config.target = String::from(TARGET); config.src_base = PathBuf::from(format!("./tests/{}", mode)); config.build_base = PathBuf::from(format!("./{}-results", TARGET_DIR)); config.bless = std::env::args().any(|a| a == "--bless"); config.clean_rmeta(); compiletest::run_tests(&config); } /// Runs the processes needed to build `spirv-std`. fn build_spirv_std(manifest_dir: &Path, codegen_backend_path: &Path) -> TestDeps { let target_dir = format!("--target-dir={}", SPIRV_STD_TARGET); // Build compiletests-deps-helper std::process::Command::new("cargo") .args(&[ "build", "-p", "compiletests-deps-helper", "-Zbuild-std=core", &*format!("--target={}", TARGET), &*target_dir, ]) .env("RUSTFLAGS", rust_flags(&codegen_backend_path)) .env("CARGO_MANIFEST_DIR", manifest_dir) .current_dir(manifest_dir) .stderr(std::process::Stdio::inherit()) .stdout(std::process::Stdio::inherit()) .status() .and_then(map_status_to_result) .unwrap(); let compiler_builtins = find_lib(SPIRV_STD_TARGET_DEPS, "libcompiler_builtins", false).unwrap(); let core = find_lib(SPIRV_STD_TARGET_DEPS, "libcore", false).unwrap(); let spirv_std = find_lib(SPIRV_STD_TARGET_DEPS, "libspirv_std", false).unwrap(); let glam = find_lib(SPIRV_STD_TARGET_DEPS, "libglam", false).unwrap(); let spirv_std_macros = find_lib(SPIRV_STD_HOST_DEPS, "spirv_std_macros", true).unwrap(); if [ &compiler_builtins, &core, &spirv_std, &glam, &spirv_std_macros, ] .iter() .any(|o| o.is_none()) { clean_project(manifest_dir); build_spirv_std(manifest_dir, codegen_backend_path) } else { TestDeps { core: core.unwrap(), glam: glam.unwrap(), compiler_builtins: compiler_builtins.unwrap(), spirv_std: spirv_std.unwrap(), spirv_std_macros: spirv_std_macros.unwrap(), } } } fn clean_project(manifest_dir: &Path) { std::process::Command::new("cargo") .args(&["clean", &*format!("--target-dir={}", TARGET_DIR)]) .current_dir(manifest_dir) .stderr(std::process::Stdio::inherit()) .stdout(std::process::Stdio::inherit()) .status() .and_then(map_status_to_result) .unwrap(); } /// Attempt find the rlib that matches `base`, if multiple rlibs are found /// then a clean build is required and `None` is returned. fn find_lib( dir: impl AsRef<Path>, base: impl AsRef<Path>, dynamic: bool, ) -> Result<Option<PathBuf>> { let base = base.as_ref(); let expected_name = if dynamic { format!("{}{}", env::consts::DLL_PREFIX, base.display()) } else { base.display().to_string() }; let paths = std::fs::read_dir(dir.as_ref())? .filter_map(Result::ok) .map(|entry| entry.path()) .filter(|path| { let name = { let name = path.file_name(); if name.is_none() { return false; } name.unwrap() }; let name_matches = name.to_str().unwrap().starts_with(&expected_name); let extension_matches = path.extension().map_or(false, |ext| { if dynamic { ext == env::consts::DLL_EXTENSION } else { ext == "rlib" } }); name_matches && extension_matches }) .collect::<Vec<_>>(); Ok(if paths.len() > 1 { None } else { paths.into_iter().next() }) } /// Paths to all of the library artifacts of dependencies needed to compile tests. struct TestDeps { core: PathBuf, compiler_builtins: PathBuf, spirv_std: PathBuf, spirv_std_macros: PathBuf, glam: PathBuf, } /// The RUSTFLAGS passed to all SPIR-V builds. fn rust_flags(codegen_backend_path: &Path) -> String { [ &*format!("-Zcodegen-backend={}", codegen_backend_path.display()), "-Coverflow-checks=off", "-Cdebug-assertions=off", "-Cdebuginfo=2", "-Cembed-bitcode=no", ] .join(" ") } /// Convience function to map process failure to results in Rust. fn map_status_to_result(status: std::process::ExitStatus) -> Result<()> { match status.success() { true => Ok(()), false => Err(Error::new( ErrorKind::Other, format!( "process terminated with non-zero code: {}", status.code().unwrap_or(0) ), )), } } // https://github.com/rust-lang/cargo/blob/1857880b5124580c4aeb4e8bc5f1198f491d61b1/src/cargo/util/paths.rs#L29-L52 fn dylib_path_envvar() -> &'static str { if cfg!(windows) { "PATH" } else if cfg!(target_os = "macos") { "DYLD_FALLBACK_LIBRARY_PATH" } else { "LD_LIBRARY_PATH" } } fn dylib_path() -> Vec<PathBuf> { match env::var_os(dylib_path_envvar()) { Some(var) => env::split_paths(&var).collect(), None => Vec::new(), } } fn find_rustc_codegen_spirv() -> PathBuf { let filename = format!( "{}rustc_codegen_spirv{}", env::consts::DLL_PREFIX, env::consts::DLL_SUFFIX ); for mut path in dylib_path() { path.push(&filename); if path.is_file() { return path; } } panic!("Could not find {} in library path", filename); }
32.110236
115
0.576018
e4eb6c7a1080b5276449282c02eecd24dadddbca
6,411
extern crate tuix; use tuix::*; use tuix::widgets::Button; static THEME: &'static str = include_str!("themes/menus_theme.css"); // #[derive(Clone, Debug)] // pub enum MenuBarEvent { // SetText(String), // OptionChanged(u32), // } // impl Message for MenuBarEvent {} // pub struct MenuBar { // open_menu: Entity, // } // impl MenuBar { // pub fn new(text: &str) -> Self { // MenuBar { // open_menu: Entity::null(), // } // } // } // impl EventHandler for MenuBar { // fn build<'a>( // mut self, // state: &'a mut State, // parent: Entity, // event_manager: &'a mut EventManager, // ) -> Builder<'a> { // let id = state.add(parent); // id.set_width(state, 200.0) // .set_height(state, 30.0) // .set_display(state, Display::Flexbox); // event_manager.build(id, parent, state, self) // } // fn handle_event( // &mut self, // id: Entity, // state: &mut State, // event: &Event, // event_manager: &mut EventManager, // ) -> bool { // // if let Some(menu_event) = event.message.downcast::<MenuEvent>() { // // match menu_event { // // } // // } // if let Some(window_event) = event.message.downcast::<WindowEvent>() { // match window_event { // WindowEvent::MouseMove(x, y) => { // for child in id.child_iter(&state.hierarchy) { // if child == state.hovered { // //event_manager.insert_event(Event::new(MenuEvent::Open(child)).target(child).propagate(false)); // return false; // } // } // } // WindowEvent::MouseDown(button, mods) => match button { // MouseButton::Left => { // if state.hovered == id { // event_manager.insert_event(Event::new(StyleEvent::Restyle); // } else { // } // } // _ => {} // }, // WindowEvent::MouseUp(button, mods) => match button { // MouseButton::Left => {} // _ => {} // }, // _ => {} // } // } // false // } // } fn main() { // Create the app let app = Application::new(|win_desc, state, window| { state.add_theme(THEME); // let menu1 = Menu::new("Menu", MenuPosition::Down).build(state, window, |builder| { // builder // .set_width(Length::Pixels(100.0)) // .set_height(Length::Pixels(30.0)) // .set_flex_grow(0.0) // .set_text_justify(Justify::Center) // .class("menu") // }); // // Button::new().build2(state, menu1, |builder| builder.class("spacer2")); // Button::with_label("Item 1").build(state, menu1, |builder| builder.class("item")); // Button::with_label("Item 2").build(state, menu1, |builder| builder.class("item")); // Button::with_label("Item 3") // .on_press(Event::new(WindowEvent::WindowClose)) // .build(state, menu1, |builder| builder.class("item")); // let spacer = Button::new().build(state, menu1, |builder| builder.class("spacer")); // Button::new().build(state, spacer, |builder| builder.class("spacer1")); // Button::new().build(state, spacer, |builder| builder.class("spacer2")); // let menu2 = Menu::new("Submenu", MenuPosition::Right).build(state, menu1, |builder| { // builder.class("item").class("submenu") // }); // Button::with_label("Item 4").build(state, menu1, |builder| builder.class("item")); // // Button::new().build2(state, menu1, |builder| builder.class("spacer2")); // Button::with_label("SubItem 1").build(state, menu2, |builder| builder.class("item")); // Button::with_label("SubItem 2").build(state, menu2, |builder| builder.class("item")); // Button::with_label("SubItem 3") // .on_press(Event::new(WindowEvent::WindowClose)) // .build(state, menu2, |builder| builder.class("item")); // Button::new().build(state, menu1, |builder| builder.class("spacer2")); // let menu3 = Menu::new("SubSubMenu", MenuPosition::Right).build(state, menu2, |builder| { // builder.class("item").class("submenu") // }); // Button::with_label("SubSubItem 1").build(state, menu3, |builder| builder.class("item")); // Button::with_label("SubSubItem 2").build(state, menu3, |builder| builder.class("item")); // Button::with_label("SubSubItem 3") // .on_press(Event::new(WindowEvent::WindowClose)) // .build(state, menu3, |builder| builder.class("item")); let button = Button::with_label("Right Click Me").build(state, window, |builder| { builder .set_left(Length::Pixels(100.0)) .set_top(Length::Pixels(100.0)) .set_width(Length::Pixels(150.0)) .set_height(Length::Pixels(30.0)) .set_background_color(Color::green()) }); let (_container, menu) = ContextMenu::new().build(state, button, |builder| { builder .set_width(Length::Percentage(1.0)) .set_height(Length::Percentage(1.0)) }); menu.set_width(state, Length::Pixels(100.0)); Button::with_label("Option 1").build(state, menu, |builder| { builder .set_height(Length::Pixels(30.0)) .set_background_color(Color::rgb(50, 50, 50)) }); Button::with_label("Option 2").build(state, menu, |builder| { builder .set_height(Length::Pixels(30.0)) .set_background_color(Color::rgb(50, 50, 50)) }); Button::with_label("Option 3").build(state, menu, |builder| { builder .set_height(Length::Pixels(30.0)) .set_background_color(Color::rgb(50, 50, 50)) }); win_desc.with_title("Menus") }); app.run(); }
34.467742
127
0.504601
fe3bcf0ab826af6b466115c54ea27c9da0612ca3
4,647
//! lint when there is a large size difference between variants on an enum use crate::utils::{snippet_opt, span_lint_and_then}; use rustc::hir::*; use rustc::impl_lint_pass; use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass}; use rustc::ty::layout::LayoutOf; use rustc_errors::Applicability; use rustc_session::declare_tool_lint; declare_clippy_lint! { /// **What it does:** Checks for large size differences between variants on /// `enum`s. /// /// **Why is this bad?** Enum size is bounded by the largest variant. Having a /// large variant /// can penalize the memory layout of that enum. /// /// **Known problems:** None. /// /// **Example:** /// ```rust /// enum Test { /// A(i32), /// B([i32; 8000]), /// } /// ``` pub LARGE_ENUM_VARIANT, perf, "large size difference between variants on an enum" } #[derive(Copy, Clone)] pub struct LargeEnumVariant { maximum_size_difference_allowed: u64, } impl LargeEnumVariant { #[must_use] pub fn new(maximum_size_difference_allowed: u64) -> Self { Self { maximum_size_difference_allowed, } } } impl_lint_pass!(LargeEnumVariant => [LARGE_ENUM_VARIANT]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for LargeEnumVariant { fn check_item(&mut self, cx: &LateContext<'_, '_>, item: &Item<'_>) { let did = cx.tcx.hir().local_def_id(item.hir_id); if let ItemKind::Enum(ref def, _) = item.kind { let ty = cx.tcx.type_of(did); let adt = ty.ty_adt_def().expect("already checked whether this is an enum"); let mut smallest_variant: Option<(_, _)> = None; let mut largest_variant: Option<(_, _)> = None; for (i, variant) in adt.variants.iter().enumerate() { let size: u64 = variant .fields .iter() .filter_map(|f| { let ty = cx.tcx.type_of(f.did); // don't count generics by filtering out everything // that does not have a layout cx.layout_of(ty).ok().map(|l| l.size.bytes()) }) .sum(); let grouped = (size, (i, variant)); update_if(&mut smallest_variant, grouped, |a, b| b.0 <= a.0); update_if(&mut largest_variant, grouped, |a, b| b.0 >= a.0); } if let (Some(smallest), Some(largest)) = (smallest_variant, largest_variant) { let difference = largest.0 - smallest.0; if difference > self.maximum_size_difference_allowed { let (i, variant) = largest.1; span_lint_and_then( cx, LARGE_ENUM_VARIANT, def.variants[i].span, "large size difference between variants", |db| { if variant.fields.len() == 1 { let span = match def.variants[i].data { VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, ..) => { fields[0].ty.span }, VariantData::Unit(..) => unreachable!(), }; if let Some(snip) = snippet_opt(cx, span) { db.span_suggestion( span, "consider boxing the large fields to reduce the total size of the \ enum", format!("Box<{}>", snip), Applicability::MaybeIncorrect, ); return; } } db.span_help( def.variants[i].span, "consider boxing the large fields to reduce the total size of the enum", ); }, ); } } } } } fn update_if<T, F>(old: &mut Option<T>, new: T, f: F) where F: Fn(&T, &T) -> bool, { if let Some(ref mut val) = *old { if f(val, &new) { *val = new; } } else { *old = Some(new); } }
35.473282
113
0.453411
29c8fb6d8d971760085cbaa0c9617ccb7dbcda9f
8,019
use std::mem; use std::os::raw; use super::DelegateState; use Event; use events::{ Touch, TouchPhase }; use objc::runtime::{ Class, Object, Sel, BOOL, YES }; use objc::declare::{ ClassDecl }; use super::ffi::{ longjmp, id, nil, CGRect, CGPoint, CGFloat, UIViewAutoresizingFlexibleWidth, UIViewAutoresizingFlexibleHeight }; use super::jmpbuf; pub fn create_delegate_class() { extern fn did_finish_launching(this: &mut Object, _: Sel, _: id, _: id) -> BOOL { unsafe { let main_screen: id = msg_send![Class::get("UIScreen").unwrap(), mainScreen]; let bounds: CGRect = msg_send![main_screen, bounds]; let scale: CGFloat = msg_send![main_screen, nativeScale]; let window: id = msg_send![Class::get("UIWindow").unwrap(), alloc]; let window: id = msg_send![window, initWithFrame:bounds.clone()]; let size = (bounds.size.width as u32, bounds.size.height as u32); let view_controller: id = msg_send![Class::get("MainViewController").unwrap(), alloc]; let view_controller: id = msg_send![view_controller, init]; let class = Class::get("MainView").unwrap(); let view:id = msg_send![class, alloc]; let view:id = msg_send![view, initForGl:&bounds]; let _: () = msg_send![view_controller, setView:view]; let _: () = msg_send![window, setRootViewController:view_controller]; let _: () = msg_send![window, addSubview:view]; let _: () = msg_send![window, makeKeyAndVisible]; let state = Box::new(DelegateState::new(window, view_controller, view, size, scale as f32)); let state_ptr: *mut DelegateState = mem::transmute(state); this.set_ivar("glutinState", state_ptr as *mut raw::c_void); let _: () = msg_send![this, performSelector:sel!(postLaunch:) withObject:nil afterDelay:0.0]; } YES } extern fn post_launch(_: &Object, _: Sel, _: id) { unsafe { longjmp(mem::transmute(&mut jmpbuf),1); } } extern fn did_become_active(this: &Object, _: Sel, _: id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); state.events_queue.push_back(Event::Focused(true)); } } extern fn will_resign_active(this: &Object, _: Sel, _: id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); state.events_queue.push_back(Event::Focused(false)); } } extern fn will_enter_foreground(this: &Object, _: Sel, _: id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); state.events_queue.push_back(Event::Suspended(false)); } } extern fn did_enter_background(this: &Object, _: Sel, _: id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); state.events_queue.push_back(Event::Suspended(true)); } } extern fn will_terminate(this: &Object, _: Sel, _: id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); // push event to the front to garantee that we'll process it // immidiatly after jump state.events_queue.push_front(Event::Closed); longjmp(mem::transmute(&mut jmpbuf),1); } } extern fn handle_touches(this: &Object, _: Sel, touches: id, _:id) { unsafe { let state: *mut raw::c_void = *this.get_ivar("glutinState"); let state = &mut *(state as *mut DelegateState); let touches_enum: id = msg_send![touches, objectEnumerator]; loop { let touch: id = msg_send![touches_enum, nextObject]; if touch == nil { break } let location: CGPoint = msg_send![touch, locationInView:nil]; let touch_id = touch as u64; let phase: i32 = msg_send![touch, phase]; state.events_queue.push_back(Event::Touch(Touch { id: touch_id, location: (location.x as f64, location.y as f64), phase: match phase { 0 => TouchPhase::Started, 1 => TouchPhase::Moved, // 2 is UITouchPhaseStationary and is not expected here 3 => TouchPhase::Ended, 4 => TouchPhase::Cancelled, _ => panic!("unexpected touch phase: {:?}", phase) } })); } } } let superclass = Class::get("UIResponder").unwrap(); let mut decl = ClassDecl::new("AppDelegate", superclass).unwrap(); unsafe { decl.add_method(sel!(application:didFinishLaunchingWithOptions:), did_finish_launching as extern fn(&mut Object, Sel, id, id) -> BOOL); decl.add_method(sel!(applicationDidBecomeActive:), did_become_active as extern fn(&Object, Sel, id)); decl.add_method(sel!(applicationWillResignActive:), will_resign_active as extern fn(&Object, Sel, id)); decl.add_method(sel!(applicationWillEnterForeground:), will_enter_foreground as extern fn(&Object, Sel, id)); decl.add_method(sel!(applicationDidEnterBackground:), did_enter_background as extern fn(&Object, Sel, id)); decl.add_method(sel!(applicationWillTerminate:), will_terminate as extern fn(&Object, Sel, id)); decl.add_method(sel!(touchesBegan:withEvent:), handle_touches as extern fn(this: &Object, _: Sel, _: id, _:id)); decl.add_method(sel!(touchesMoved:withEvent:), handle_touches as extern fn(this: &Object, _: Sel, _: id, _:id)); decl.add_method(sel!(touchesEnded:withEvent:), handle_touches as extern fn(this: &Object, _: Sel, _: id, _:id)); decl.add_method(sel!(touchesCancelled:withEvent:), handle_touches as extern fn(this: &Object, _: Sel, _: id, _:id)); decl.add_method(sel!(postLaunch:), post_launch as extern fn(&Object, Sel, id)); decl.add_ivar::<*mut raw::c_void>("glutinState");; decl.register(); } } pub fn create_view_class() { let superclass = Class::get("UIViewController").unwrap(); let decl = ClassDecl::new("MainViewController",superclass).unwrap(); decl.register(); extern fn init_for_gl(this: &Object, _: Sel, frame: *const raw::c_void) -> id { unsafe { let bounds: *const CGRect = mem::transmute(frame); let view: id = msg_send![this, initWithFrame:(*bounds).clone()]; let _: () = msg_send![view, setAutoresizingMask: UIViewAutoresizingFlexibleWidth|UIViewAutoresizingFlexibleHeight]; let _: () = msg_send![view, setAutoresizesSubviews:YES]; let layer: id = msg_send![view, layer]; let _ : () = msg_send![layer, setOpaque:YES]; view } } extern fn layer_class(_: &Class, _: Sel) -> *const Class { unsafe { mem::transmute(Class::get("CAEAGLLayer").unwrap()) } } let superclass = Class::get("UIView").unwrap(); let mut decl = ClassDecl::new("MainView", superclass).unwrap(); unsafe { decl.add_method(sel!(initForGl:), init_for_gl as extern fn(&Object, Sel, *const raw::c_void) -> id); decl.add_class_method(sel!(layerClass), layer_class as extern fn(&Class, Sel) -> *const Class); decl.register(); } }
35.482301
127
0.580372
39c527936750104ca2c888411e99297932621cec
154
mod egg_dropping; mod fibonacci; pub use self::egg_dropping::egg_drop; pub use self::fibonacci::fibonacci; pub use self::fibonacci::recursive_fibonacci;
22
45
0.798701
67bc4a8a7f3f33cdad0460b2cbab60e8dada1ea7
68,006
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364) #![cfg_attr(stage0, feature(custom_attribute))] #![crate_name = "rustc_privacy"] #![unstable(feature = "rustc_private")] #![staged_api] #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/nightly/")] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate rustc; use self::PrivacyResult::*; use self::FieldName::*; use std::mem::replace; use rustc::ast_map; use rustc::metadata::csearch; use rustc::middle::def; use rustc::middle::privacy::ImportUse::*; use rustc::middle::privacy::LastPrivate::*; use rustc::middle::privacy::PrivateDep::*; use rustc::middle::privacy::{ExternalExports, ExportedItems, PublicItems}; use rustc::middle::ty::{MethodTypeParam, MethodStatic}; use rustc::middle::ty::{MethodCall, MethodMap, MethodOrigin, MethodParam}; use rustc::middle::ty::{MethodStaticClosure, MethodObject}; use rustc::middle::ty::MethodTraitObject; use rustc::middle::ty::{self, Ty}; use rustc::util::nodemap::{NodeMap, NodeSet}; use syntax::ast; use syntax::ast_util::{is_local, local_def}; use syntax::codemap::Span; use syntax::parse::token; use syntax::visit::{self, Visitor}; type Context<'a, 'tcx> = (&'a MethodMap<'tcx>, &'a def::ExportMap); /// Result of a checking operation - None => no errors were found. Some => an /// error and contains the span and message for reporting that error and /// optionally the same for a note about the error. type CheckResult = Option<(Span, String, Option<(Span, String)>)>; //////////////////////////////////////////////////////////////////////////////// /// The parent visitor, used to determine what's the parent of what (node-wise) //////////////////////////////////////////////////////////////////////////////// struct ParentVisitor { parents: NodeMap<ast::NodeId>, curparent: ast::NodeId, } impl<'v> Visitor<'v> for ParentVisitor { fn visit_item(&mut self, item: &ast::Item) { self.parents.insert(item.id, self.curparent); let prev = self.curparent; match item.node { ast::ItemMod(..) => { self.curparent = item.id; } // Enum variants are parented to the enum definition itself because // they inherit privacy ast::ItemEnum(ref def, _) => { for variant in &def.variants { // The parent is considered the enclosing enum because the // enum will dictate the privacy visibility of this variant // instead. self.parents.insert(variant.node.id, item.id); } } // Trait methods are always considered "public", but if the trait is // private then we need some private item in the chain from the // method to the root. In this case, if the trait is private, then // parent all the methods to the trait to indicate that they're // private. ast::ItemTrait(_, _, _, ref trait_items) if item.vis != ast::Public => { for trait_item in trait_items { self.parents.insert(trait_item.id, item.id); } } _ => {} } visit::walk_item(self, item); self.curparent = prev; } fn visit_foreign_item(&mut self, a: &ast::ForeignItem) { self.parents.insert(a.id, self.curparent); visit::walk_foreign_item(self, a); } fn visit_fn(&mut self, a: visit::FnKind<'v>, b: &'v ast::FnDecl, c: &'v ast::Block, d: Span, id: ast::NodeId) { // We already took care of some trait methods above, otherwise things // like impl methods and pub trait methods are parented to the // containing module, not the containing trait. if !self.parents.contains_key(&id) { self.parents.insert(id, self.curparent); } visit::walk_fn(self, a, b, c, d); } fn visit_impl_item(&mut self, ii: &'v ast::ImplItem) { // visit_fn handles methods, but associated consts have to be handled // here. if !self.parents.contains_key(&ii.id) { self.parents.insert(ii.id, self.curparent); } visit::walk_impl_item(self, ii); } fn visit_struct_def(&mut self, s: &ast::StructDef, _: ast::Ident, _: &'v ast::Generics, n: ast::NodeId) { // Struct constructors are parented to their struct definitions because // they essentially are the struct definitions. match s.ctor_id { Some(id) => { self.parents.insert(id, n); } None => {} } // While we have the id of the struct definition, go ahead and parent // all the fields. for field in &s.fields { self.parents.insert(field.node.id, self.curparent); } visit::walk_struct_def(self, s) } } //////////////////////////////////////////////////////////////////////////////// /// The embargo visitor, used to determine the exports of the ast //////////////////////////////////////////////////////////////////////////////// struct EmbargoVisitor<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, export_map: &'a def::ExportMap, // This flag is an indicator of whether the previous item in the // hierarchical chain was exported or not. This is the indicator of whether // children should be exported as well. Note that this can flip from false // to true if a reexported module is entered (or an action similar). prev_exported: bool, // This is a list of all exported items in the AST. An exported item is any // function/method/item which is usable by external crates. This essentially // means that the result is "public all the way down", but the "path down" // may jump across private boundaries through reexport statements. exported_items: ExportedItems, // This sets contains all the destination nodes which are publicly // re-exported. This is *not* a set of all reexported nodes, only a set of // all nodes which are reexported *and* reachable from external crates. This // means that the destination of the reexport is exported, and hence the // destination must also be exported. reexports: NodeSet, // These two fields are closely related to one another in that they are only // used for generation of the 'PublicItems' set, not for privacy checking at // all public_items: PublicItems, prev_public: bool, } impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> { // There are checks inside of privacy which depend on knowing whether a // trait should be exported or not. The two current consumers of this are: // // 1. Should default methods of a trait be exported? // 2. Should the methods of an implementation of a trait be exported? // // The answer to both of these questions partly rely on whether the trait // itself is exported or not. If the trait is somehow exported, then the // answers to both questions must be yes. Right now this question involves // more analysis than is currently done in rustc, so we conservatively // answer "yes" so that all traits need to be exported. fn exported_trait(&self, _id: ast::NodeId) -> bool { true } } impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &ast::Item) { let orig_all_pub = self.prev_public; self.prev_public = orig_all_pub && item.vis == ast::Public; if self.prev_public { self.public_items.insert(item.id); } let orig_all_exported = self.prev_exported; match item.node { // impls/extern blocks do not break the "public chain" because they // cannot have visibility qualifiers on them anyway ast::ItemImpl(..) | ast::ItemDefaultImpl(..) | ast::ItemForeignMod(..) => {} // Traits are a little special in that even if they themselves are // not public they may still be exported. ast::ItemTrait(..) => { self.prev_exported = self.exported_trait(item.id); } // Private by default, hence we only retain the "public chain" if // `pub` is explicitly listed. _ => { self.prev_exported = (orig_all_exported && item.vis == ast::Public) || self.reexports.contains(&item.id); } } let public_first = self.prev_exported && self.exported_items.insert(item.id); match item.node { // Enum variants inherit from their parent, so if the enum is // public all variants are public unless they're explicitly priv ast::ItemEnum(ref def, _) if public_first => { for variant in &def.variants { self.exported_items.insert(variant.node.id); self.public_items.insert(variant.node.id); } } // Implementations are a little tricky to determine what's exported // out of them. Here's a few cases which are currently defined: // // * Impls for private types do not need to export their methods // (either public or private methods) // // * Impls for public types only have public methods exported // // * Public trait impls for public types must have all methods // exported. // // * Private trait impls for public types can be ignored // // * Public trait impls for private types have their methods // exported. I'm not entirely certain that this is the correct // thing to do, but I have seen use cases of where this will cause // undefined symbols at linkage time if this case is not handled. // // * Private trait impls for private types can be completely ignored ast::ItemImpl(_, _, _, _, ref ty, ref impl_items) => { let public_ty = match ty.node { ast::TyPath(..) => { match self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def() { def::DefPrimTy(..) => true, def => { let did = def.def_id(); !is_local(did) || self.exported_items.contains(&did.node) } } } _ => true, }; let tr = ty::impl_trait_ref(self.tcx, local_def(item.id)); let public_trait = tr.clone().map_or(false, |tr| { !is_local(tr.def_id) || self.exported_items.contains(&tr.def_id.node) }); if public_ty || public_trait { for impl_item in impl_items { match impl_item.node { ast::ConstImplItem(..) => { if (public_ty && impl_item.vis == ast::Public) || tr.is_some() { self.exported_items.insert(impl_item.id); } } ast::MethodImplItem(ref sig, _) => { let meth_public = match sig.explicit_self.node { ast::SelfStatic => public_ty, _ => true, } && impl_item.vis == ast::Public; if meth_public || tr.is_some() { self.exported_items.insert(impl_item.id); } } ast::TypeImplItem(_) | ast::MacImplItem(_) => {} } } } } // Default methods on traits are all public so long as the trait // is public ast::ItemTrait(_, _, _, ref trait_items) if public_first => { for trait_item in trait_items { debug!("trait item {}", trait_item.id); self.exported_items.insert(trait_item.id); } } // Struct constructors are public if the struct is all public. ast::ItemStruct(ref def, _) if public_first => { match def.ctor_id { Some(id) => { self.exported_items.insert(id); } None => {} } // fields can be public or private, so lets check for field in &def.fields { let vis = match field.node.kind { ast::NamedField(_, vis) | ast::UnnamedField(vis) => vis }; if vis == ast::Public { self.public_items.insert(field.node.id); } } } ast::ItemTy(ref ty, _) if public_first => { if let ast::TyPath(..) = ty.node { match self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def() { def::DefPrimTy(..) | def::DefTyParam(..) => {}, def => { let did = def.def_id(); if is_local(did) { self.exported_items.insert(did.node); } } } } } _ => {} } visit::walk_item(self, item); self.prev_exported = orig_all_exported; self.prev_public = orig_all_pub; } fn visit_foreign_item(&mut self, a: &ast::ForeignItem) { if (self.prev_exported && a.vis == ast::Public) || self.reexports.contains(&a.id) { self.exported_items.insert(a.id); } } fn visit_mod(&mut self, m: &ast::Mod, _sp: Span, id: ast::NodeId) { // This code is here instead of in visit_item so that the // crate module gets processed as well. if self.prev_exported { assert!(self.export_map.contains_key(&id), "wut {}", id); for export in self.export_map.get(&id).unwrap() { if is_local(export.def_id) { self.reexports.insert(export.def_id.node); } } } visit::walk_mod(self, m) } } //////////////////////////////////////////////////////////////////////////////// /// The privacy visitor, where privacy checks take place (violations reported) //////////////////////////////////////////////////////////////////////////////// struct PrivacyVisitor<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, curitem: ast::NodeId, in_foreign: bool, parents: NodeMap<ast::NodeId>, external_exports: ExternalExports, } enum PrivacyResult { Allowable, ExternallyDenied, DisallowedBy(ast::NodeId), } enum FieldName { UnnamedField(usize), // index // (Name, not Ident, because struct fields are not macro-hygienic) NamedField(ast::Name), } impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> { // used when debugging fn nodestr(&self, id: ast::NodeId) -> String { self.tcx.map.node_to_string(id).to_string() } // Determines whether the given definition is public from the point of view // of the current item. fn def_privacy(&self, did: ast::DefId) -> PrivacyResult { if !is_local(did) { if self.external_exports.contains(&did) { debug!("privacy - {:?} was externally exported", did); return Allowable; } debug!("privacy - is {:?} a public method", did); return match self.tcx.impl_or_trait_items.borrow().get(&did) { Some(&ty::ConstTraitItem(ref ac)) => { debug!("privacy - it's a const: {:?}", *ac); match ac.container { ty::TraitContainer(id) => { debug!("privacy - recursing on trait {:?}", id); self.def_privacy(id) } ty::ImplContainer(id) => { match ty::impl_trait_ref(self.tcx, id) { Some(t) => { debug!("privacy - impl of trait {:?}", id); self.def_privacy(t.def_id) } None => { debug!("privacy - found inherent \ associated constant {:?}", ac.vis); if ac.vis == ast::Public { Allowable } else { ExternallyDenied } } } } } } Some(&ty::MethodTraitItem(ref meth)) => { debug!("privacy - well at least it's a method: {:?}", *meth); match meth.container { ty::TraitContainer(id) => { debug!("privacy - recursing on trait {:?}", id); self.def_privacy(id) } ty::ImplContainer(id) => { match ty::impl_trait_ref(self.tcx, id) { Some(t) => { debug!("privacy - impl of trait {:?}", id); self.def_privacy(t.def_id) } None => { debug!("privacy - found a method {:?}", meth.vis); if meth.vis == ast::Public { Allowable } else { ExternallyDenied } } } } } } Some(&ty::TypeTraitItem(ref typedef)) => { match typedef.container { ty::TraitContainer(id) => { debug!("privacy - recursing on trait {:?}", id); self.def_privacy(id) } ty::ImplContainer(id) => { match ty::impl_trait_ref(self.tcx, id) { Some(t) => { debug!("privacy - impl of trait {:?}", id); self.def_privacy(t.def_id) } None => { debug!("privacy - found a typedef {:?}", typedef.vis); if typedef.vis == ast::Public { Allowable } else { ExternallyDenied } } } } } } None => { debug!("privacy - nope, not even a method"); ExternallyDenied } }; } debug!("privacy - local {} not public all the way down", self.tcx.map.node_to_string(did.node)); // return quickly for things in the same module if self.parents.get(&did.node) == self.parents.get(&self.curitem) { debug!("privacy - same parent, we're done here"); return Allowable; } // We now know that there is at least one private member between the // destination and the root. let mut closest_private_id = did.node; loop { debug!("privacy - examining {}", self.nodestr(closest_private_id)); let vis = match self.tcx.map.find(closest_private_id) { // If this item is a method, then we know for sure that it's an // actual method and not a static method. The reason for this is // that these cases are only hit in the ExprMethodCall // expression, and ExprCall will have its path checked later // (the path of the trait/impl) if it's a static method. // // With this information, then we can completely ignore all // trait methods. The privacy violation would be if the trait // couldn't get imported, not if the method couldn't be used // (all trait methods are public). // // However, if this is an impl method, then we dictate this // decision solely based on the privacy of the method // invocation. // FIXME(#10573) is this the right behavior? Why not consider // where the method was defined? Some(ast_map::NodeImplItem(ii)) => { match ii.node { ast::ConstImplItem(..) | ast::MethodImplItem(..) => { let imp = self.tcx.map .get_parent_did(closest_private_id); match ty::impl_trait_ref(self.tcx, imp) { Some(..) => return Allowable, _ if ii.vis == ast::Public => { return Allowable } _ => ii.vis } } ast::TypeImplItem(_) | ast::MacImplItem(_) => return Allowable, } } Some(ast_map::NodeTraitItem(_)) => { return Allowable; } // This is not a method call, extract the visibility as one // would normally look at it Some(ast_map::NodeItem(it)) => it.vis, Some(ast_map::NodeForeignItem(_)) => { self.tcx.map.get_foreign_vis(closest_private_id) } Some(ast_map::NodeVariant(..)) => { ast::Public // need to move up a level (to the enum) } _ => ast::Public, }; if vis != ast::Public { break } // if we've reached the root, then everything was allowable and this // access is public. if closest_private_id == ast::CRATE_NODE_ID { return Allowable } closest_private_id = *self.parents.get(&closest_private_id).unwrap(); // If we reached the top, then we were public all the way down and // we can allow this access. if closest_private_id == ast::DUMMY_NODE_ID { return Allowable } } debug!("privacy - closest priv {}", self.nodestr(closest_private_id)); if self.private_accessible(closest_private_id) { Allowable } else { DisallowedBy(closest_private_id) } } /// For a local private node in the AST, this function will determine /// whether the node is accessible by the current module that iteration is /// inside. fn private_accessible(&self, id: ast::NodeId) -> bool { let parent = *self.parents.get(&id).unwrap(); debug!("privacy - accessible parent {}", self.nodestr(parent)); // After finding `did`'s closest private member, we roll ourselves back // to see if this private member's parent is anywhere in our ancestry. // By the privacy rules, we can access all of our ancestor's private // members, so that's why we test the parent, and not the did itself. let mut cur = self.curitem; loop { debug!("privacy - questioning {}, {}", self.nodestr(cur), cur); match cur { // If the relevant parent is in our history, then we're allowed // to look inside any of our ancestor's immediate private items, // so this access is valid. x if x == parent => return true, // If we've reached the root, then we couldn't access this item // in the first place ast::DUMMY_NODE_ID => return false, // Keep going up _ => {} } cur = *self.parents.get(&cur).unwrap(); } } fn report_error(&self, result: CheckResult) -> bool { match result { None => true, Some((span, msg, note)) => { self.tcx.sess.span_err(span, &msg[..]); match note { Some((span, msg)) => { self.tcx.sess.span_note(span, &msg[..]) } None => {}, } false }, } } /// Guarantee that a particular definition is public. Returns a CheckResult /// which contains any errors found. These can be reported using `report_error`. /// If the result is `None`, no errors were found. fn ensure_public(&self, span: Span, to_check: ast::DefId, source_did: Option<ast::DefId>, msg: &str) -> CheckResult { let id = match self.def_privacy(to_check) { ExternallyDenied => { return Some((span, format!("{} is private", msg), None)) } Allowable => return None, DisallowedBy(id) => id, }; // If we're disallowed by a particular id, then we attempt to give a // nice error message to say why it was disallowed. It was either // because the item itself is private or because its parent is private // and its parent isn't in our ancestry. let (err_span, err_msg) = if id == source_did.unwrap_or(to_check).node { return Some((span, format!("{} is private", msg), None)); } else { (span, format!("{} is inaccessible", msg)) }; let item = match self.tcx.map.find(id) { Some(ast_map::NodeItem(item)) => { match item.node { // If an impl disallowed this item, then this is resolve's // way of saying that a struct/enum's static method was // invoked, and the struct/enum itself is private. Crawl // back up the chains to find the relevant struct/enum that // was private. ast::ItemImpl(_, _, _, _, ref ty, _) => { match ty.node { ast::TyPath(..) => {} _ => return Some((err_span, err_msg, None)), }; let def = self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def(); let did = def.def_id(); assert!(is_local(did)); match self.tcx.map.get(did.node) { ast_map::NodeItem(item) => item, _ => self.tcx.sess.span_bug(item.span, "path is not an item") } } _ => item } } Some(..) | None => return Some((err_span, err_msg, None)), }; let desc = match item.node { ast::ItemMod(..) => "module", ast::ItemTrait(..) => "trait", ast::ItemStruct(..) => "struct", ast::ItemEnum(..) => "enum", _ => return Some((err_span, err_msg, None)) }; let msg = format!("{} `{}` is private", desc, token::get_ident(item.ident)); Some((err_span, err_msg, Some((span, msg)))) } // Checks that a field is in scope. fn check_field(&mut self, span: Span, id: ast::DefId, name: FieldName) { let fields = ty::lookup_struct_fields(self.tcx, id); let field = match name { NamedField(f_name) => { debug!("privacy - check named field {} in struct {:?}", f_name, id); fields.iter().find(|f| f.name == f_name).unwrap() } UnnamedField(idx) => &fields[idx] }; if field.vis == ast::Public || (is_local(field.id) && self.private_accessible(field.id.node)) { return } let struct_type = ty::lookup_item_type(self.tcx, id).ty; let struct_desc = match struct_type.sty { ty::ty_struct(_, _) => format!("struct `{}`", ty::item_path_str(self.tcx, id)), // struct variant fields have inherited visibility ty::ty_enum(..) => return, _ => self.tcx.sess.span_bug(span, "can't find struct for field") }; let msg = match name { NamedField(name) => format!("field `{}` of {} is private", token::get_name(name), struct_desc), UnnamedField(idx) => format!("field #{} of {} is private", idx + 1, struct_desc), }; self.tcx.sess.span_err(span, &msg[..]); } // Given the ID of a method, checks to ensure it's in scope. fn check_static_method(&mut self, span: Span, method_id: ast::DefId, name: ast::Name) { // If the method is a default method, we need to use the def_id of // the default implementation. let method_id = match ty::impl_or_trait_item(self.tcx, method_id) { ty::MethodTraitItem(method_type) => { method_type.provided_source.unwrap_or(method_id) } _ => { self.tcx.sess .span_bug(span, "got non-method item in check_static_method") } }; let string = token::get_name(name); self.report_error(self.ensure_public(span, method_id, None, &format!("method `{}`", string))); } // Checks that a path is in scope. fn check_path(&mut self, span: Span, path_id: ast::NodeId, last: ast::Name) { debug!("privacy - path {}", self.nodestr(path_id)); let path_res = *self.tcx.def_map.borrow().get(&path_id).unwrap(); let ck = |tyname: &str| { let ck_public = |def: ast::DefId| { debug!("privacy - ck_public {:?}", def); let name = token::get_name(last); let origdid = path_res.def_id(); self.ensure_public(span, def, Some(origdid), &format!("{} `{}`", tyname, name)) }; match path_res.last_private { LastMod(AllPublic) => {}, LastMod(DependsOn(def)) => { self.report_error(ck_public(def)); }, LastImport { value_priv, value_used: check_value, type_priv, type_used: check_type } => { // This dance with found_error is because we don't want to // report a privacy error twice for the same directive. let found_error = match (type_priv, check_type) { (Some(DependsOn(def)), Used) => { !self.report_error(ck_public(def)) }, _ => false, }; if !found_error { match (value_priv, check_value) { (Some(DependsOn(def)), Used) => { self.report_error(ck_public(def)); }, _ => {}, } } // If an import is not used in either namespace, we still // want to check that it could be legal. Therefore we check // in both namespaces and only report an error if both would // be illegal. We only report one error, even if it is // illegal to import from both namespaces. match (value_priv, check_value, type_priv, check_type) { (Some(p), Unused, None, _) | (None, _, Some(p), Unused) => { let p = match p { AllPublic => None, DependsOn(def) => ck_public(def), }; if p.is_some() { self.report_error(p); } }, (Some(v), Unused, Some(t), Unused) => { let v = match v { AllPublic => None, DependsOn(def) => ck_public(def), }; let t = match t { AllPublic => None, DependsOn(def) => ck_public(def), }; if let (Some(_), Some(t)) = (v, t) { self.report_error(Some(t)); } }, _ => {}, } }, } }; // FIXME(#12334) Imports can refer to definitions in both the type and // value namespaces. The privacy information is aware of this, but the // def map is not. Therefore the names we work out below will not always // be accurate and we can get slightly wonky error messages (but type // checking is always correct). match path_res.full_def() { def::DefFn(..) => ck("function"), def::DefStatic(..) => ck("static"), def::DefConst(..) => ck("const"), def::DefAssociatedConst(..) => ck("associated const"), def::DefVariant(..) => ck("variant"), def::DefTy(_, false) => ck("type"), def::DefTy(_, true) => ck("enum"), def::DefTrait(..) => ck("trait"), def::DefStruct(..) => ck("struct"), def::DefMethod(..) => ck("method"), def::DefMod(..) => ck("module"), _ => {} } } // Checks that a method is in scope. fn check_method(&mut self, span: Span, origin: &MethodOrigin, name: ast::Name) { match *origin { MethodStatic(method_id) => { self.check_static_method(span, method_id, name) } MethodStaticClosure(_) => {} // Trait methods are always all public. The only controlling factor // is whether the trait itself is accessible or not. MethodTypeParam(MethodParam { ref trait_ref, .. }) | MethodTraitObject(MethodObject { ref trait_ref, .. }) => { self.report_error(self.ensure_public(span, trait_ref.def_id, None, "source trait")); } } } } impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &ast::Item) { if let ast::ItemUse(ref vpath) = item.node { if let ast::ViewPathList(ref prefix, ref list) = vpath.node { for pid in list { match pid.node { ast::PathListIdent { id, name } => { debug!("privacy - ident item {}", id); self.check_path(pid.span, id, name.name); } ast::PathListMod { id } => { debug!("privacy - mod item {}", id); let name = prefix.segments.last().unwrap().identifier.name; self.check_path(pid.span, id, name); } } } } } let orig_curitem = replace(&mut self.curitem, item.id); visit::walk_item(self, item); self.curitem = orig_curitem; } fn visit_expr(&mut self, expr: &ast::Expr) { match expr.node { ast::ExprField(ref base, ident) => { if let ty::ty_struct(id, _) = ty::expr_ty_adjusted(self.tcx, &**base).sty { self.check_field(expr.span, id, NamedField(ident.node.name)); } } ast::ExprTupField(ref base, idx) => { if let ty::ty_struct(id, _) = ty::expr_ty_adjusted(self.tcx, &**base).sty { self.check_field(expr.span, id, UnnamedField(idx.node)); } } ast::ExprMethodCall(ident, _, _) => { let method_call = MethodCall::expr(expr.id); match self.tcx.method_map.borrow().get(&method_call) { None => { self.tcx.sess.span_bug(expr.span, "method call not in \ method map"); } Some(method) => { debug!("(privacy checking) checking impl method"); self.check_method(expr.span, &method.origin, ident.node.name); } } } ast::ExprStruct(_, ref fields, _) => { match ty::expr_ty(self.tcx, expr).sty { ty::ty_struct(ctor_id, _) => { // RFC 736: ensure all unmentioned fields are visible. // Rather than computing the set of unmentioned fields // (i.e. `all_fields - fields`), just check them all. let all_fields = ty::lookup_struct_fields(self.tcx, ctor_id); for field in all_fields { self.check_field(expr.span, ctor_id, NamedField(field.name)); } } ty::ty_enum(_, _) => { match self.tcx.def_map.borrow().get(&expr.id).unwrap().full_def() { def::DefVariant(_, variant_id, _) => { for field in fields { self.check_field(expr.span, variant_id, NamedField(field.ident.node.name)); } } _ => self.tcx.sess.span_bug(expr.span, "resolve didn't \ map enum struct \ constructor to a \ variant def"), } } _ => self.tcx.sess.span_bug(expr.span, "struct expr \ didn't have \ struct type?!"), } } ast::ExprPath(..) => { let guard = |did: ast::DefId| { let fields = ty::lookup_struct_fields(self.tcx, did); let any_priv = fields.iter().any(|f| { f.vis != ast::Public && ( !is_local(f.id) || !self.private_accessible(f.id.node)) }); if any_priv { self.tcx.sess.span_err(expr.span, "cannot invoke tuple struct constructor \ with private fields"); } }; match self.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) { Some(def::DefStruct(did)) => { guard(if is_local(did) { local_def(self.tcx.map.get_parent(did.node)) } else { // "tuple structs" with zero fields (such as // `pub struct Foo;`) don't have a ctor_id, hence // the unwrap_or to the same struct id. let maybe_did = csearch::get_tuple_struct_definition_if_ctor( &self.tcx.sess.cstore, did); maybe_did.unwrap_or(did) }) } _ => {} } } _ => {} } visit::walk_expr(self, expr); } fn visit_pat(&mut self, pattern: &ast::Pat) { // Foreign functions do not have their patterns mapped in the def_map, // and there's nothing really relevant there anyway, so don't bother // checking privacy. If you can name the type then you can pass it to an // external C function anyway. if self.in_foreign { return } match pattern.node { ast::PatStruct(_, ref fields, _) => { match ty::pat_ty(self.tcx, pattern).sty { ty::ty_struct(id, _) => { for field in fields { self.check_field(pattern.span, id, NamedField(field.node.ident.name)); } } ty::ty_enum(_, _) => { match self.tcx.def_map.borrow().get(&pattern.id).map(|d| d.full_def()) { Some(def::DefVariant(_, variant_id, _)) => { for field in fields { self.check_field(pattern.span, variant_id, NamedField(field.node.ident.name)); } } _ => self.tcx.sess.span_bug(pattern.span, "resolve didn't \ map enum struct \ pattern to a \ variant def"), } } _ => self.tcx.sess.span_bug(pattern.span, "struct pattern didn't have \ struct type?!"), } } // Patterns which bind no fields are allowable (the path is check // elsewhere). ast::PatEnum(_, Some(ref fields)) => { match ty::pat_ty(self.tcx, pattern).sty { ty::ty_struct(id, _) => { for (i, field) in fields.iter().enumerate() { if let ast::PatWild(..) = field.node { continue } self.check_field(field.span, id, UnnamedField(i)); } } ty::ty_enum(..) => { // enum fields have no privacy at this time } _ => {} } } _ => {} } visit::walk_pat(self, pattern); } fn visit_foreign_item(&mut self, fi: &ast::ForeignItem) { self.in_foreign = true; visit::walk_foreign_item(self, fi); self.in_foreign = false; } fn visit_path(&mut self, path: &ast::Path, id: ast::NodeId) { self.check_path(path.span, id, path.segments.last().unwrap().identifier.name); visit::walk_path(self, path); } } //////////////////////////////////////////////////////////////////////////////// /// The privacy sanity check visitor, ensures unnecessary visibility isn't here //////////////////////////////////////////////////////////////////////////////// struct SanePrivacyVisitor<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, in_fn: bool, } impl<'a, 'tcx, 'v> Visitor<'v> for SanePrivacyVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &ast::Item) { if self.in_fn { self.check_all_inherited(item); } else { self.check_sane_privacy(item); } let in_fn = self.in_fn; let orig_in_fn = replace(&mut self.in_fn, match item.node { ast::ItemMod(..) => false, // modules turn privacy back on _ => in_fn, // otherwise we inherit }); visit::walk_item(self, item); self.in_fn = orig_in_fn; } fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl, b: &'v ast::Block, s: Span, _: ast::NodeId) { // This catches both functions and methods let orig_in_fn = replace(&mut self.in_fn, true); visit::walk_fn(self, fk, fd, b, s); self.in_fn = orig_in_fn; } } impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> { /// Validates all of the visibility qualifiers placed on the item given. This /// ensures that there are no extraneous qualifiers that don't actually do /// anything. In theory these qualifiers wouldn't parse, but that may happen /// later on down the road... fn check_sane_privacy(&self, item: &ast::Item) { let tcx = self.tcx; let check_inherited = |sp: Span, vis: ast::Visibility, note: &str| { if vis != ast::Inherited { tcx.sess.span_err(sp, "unnecessary visibility qualifier"); if !note.is_empty() { tcx.sess.span_note(sp, note); } } }; match item.node { // implementations of traits don't need visibility qualifiers because // that's controlled by having the trait in scope. ast::ItemImpl(_, _, _, Some(..), _, ref impl_items) => { check_inherited(item.span, item.vis, "visibility qualifiers have no effect on trait \ impls"); for impl_item in impl_items { check_inherited(impl_item.span, impl_item.vis, ""); } } ast::ItemImpl(..) => { check_inherited(item.span, item.vis, "place qualifiers on individual methods instead"); } ast::ItemForeignMod(..) => { check_inherited(item.span, item.vis, "place qualifiers on individual functions \ instead"); } ast::ItemEnum(ref def, _) => { for v in &def.variants { match v.node.vis { ast::Public => { if item.vis == ast::Public { tcx.sess.span_err(v.span, "unnecessary `pub` \ visibility"); } } ast::Inherited => {} } } } ast::ItemTrait(..) | ast::ItemDefaultImpl(..) | ast::ItemConst(..) | ast::ItemStatic(..) | ast::ItemStruct(..) | ast::ItemFn(..) | ast::ItemMod(..) | ast::ItemTy(..) | ast::ItemExternCrate(_) | ast::ItemUse(_) | ast::ItemMac(..) => {} } } /// When inside of something like a function or a method, visibility has no /// control over anything so this forbids any mention of any visibility fn check_all_inherited(&self, item: &ast::Item) { let tcx = self.tcx; fn check_inherited(tcx: &ty::ctxt, sp: Span, vis: ast::Visibility) { if vis != ast::Inherited { tcx.sess.span_err(sp, "visibility has no effect inside functions"); } } let check_struct = |def: &ast::StructDef| { for f in &def.fields { match f.node.kind { ast::NamedField(_, p) => check_inherited(tcx, f.span, p), ast::UnnamedField(..) => {} } } }; check_inherited(tcx, item.span, item.vis); match item.node { ast::ItemImpl(_, _, _, _, _, ref impl_items) => { for impl_item in impl_items { match impl_item.node { ast::MethodImplItem(..) => { check_inherited(tcx, impl_item.span, impl_item.vis); } _ => {} } } } ast::ItemForeignMod(ref fm) => { for i in &fm.items { check_inherited(tcx, i.span, i.vis); } } ast::ItemEnum(ref def, _) => { for v in &def.variants { check_inherited(tcx, v.span, v.node.vis); } } ast::ItemStruct(ref def, _) => check_struct(&**def), ast::ItemExternCrate(_) | ast::ItemUse(_) | ast::ItemTrait(..) | ast::ItemDefaultImpl(..) | ast::ItemStatic(..) | ast::ItemConst(..) | ast::ItemFn(..) | ast::ItemMod(..) | ast::ItemTy(..) | ast::ItemMac(..) => {} } } } struct VisiblePrivateTypesVisitor<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, exported_items: &'a ExportedItems, public_items: &'a PublicItems, in_variant: bool, } struct CheckTypeForPrivatenessVisitor<'a, 'b: 'a, 'tcx: 'b> { inner: &'a VisiblePrivateTypesVisitor<'b, 'tcx>, /// whether the type refers to private types. contains_private: bool, /// whether we've recurred at all (i.e. if we're pointing at the /// first type on which visit_ty was called). at_outer_type: bool, // whether that first type is a public path. outer_type_is_public_path: bool, } impl<'a, 'tcx> VisiblePrivateTypesVisitor<'a, 'tcx> { fn path_is_private_type(&self, path_id: ast::NodeId) -> bool { let did = match self.tcx.def_map.borrow().get(&path_id).map(|d| d.full_def()) { // `int` etc. (None doesn't seem to occur.) None | Some(def::DefPrimTy(..)) => return false, Some(def) => def.def_id(), }; // A path can only be private if: // it's in this crate... if !is_local(did) { return false } // .. and it corresponds to a private type in the AST (this returns // None for type parameters) match self.tcx.map.find(did.node) { Some(ast_map::NodeItem(ref item)) => item.vis != ast::Public, Some(_) | None => false, } } fn trait_is_public(&self, trait_id: ast::NodeId) -> bool { // FIXME: this would preferably be using `exported_items`, but all // traits are exported currently (see `EmbargoVisitor.exported_trait`) self.public_items.contains(&trait_id) } fn check_ty_param_bound(&self, ty_param_bound: &ast::TyParamBound) { if let ast::TraitTyParamBound(ref trait_ref, _) = *ty_param_bound { if !self.tcx.sess.features.borrow().visible_private_types && self.path_is_private_type(trait_ref.trait_ref.ref_id) { let span = trait_ref.trait_ref.path.span; self.tcx.sess.span_err(span, "private trait in exported type \ parameter bound"); } } } fn item_is_public(&self, id: &ast::NodeId, vis: ast::Visibility) -> bool { self.exported_items.contains(id) || vis == ast::Public } } impl<'a, 'b, 'tcx, 'v> Visitor<'v> for CheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> { fn visit_ty(&mut self, ty: &ast::Ty) { if let ast::TyPath(..) = ty.node { if self.inner.path_is_private_type(ty.id) { self.contains_private = true; // found what we're looking for so let's stop // working. return } else if self.at_outer_type { self.outer_type_is_public_path = true; } } self.at_outer_type = false; visit::walk_ty(self, ty) } // don't want to recurse into [, .. expr] fn visit_expr(&mut self, _: &ast::Expr) {} } impl<'a, 'tcx, 'v> Visitor<'v> for VisiblePrivateTypesVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &ast::Item) { match item.node { // contents of a private mod can be reexported, so we need // to check internals. ast::ItemMod(_) => {} // An `extern {}` doesn't introduce a new privacy // namespace (the contents have their own privacies). ast::ItemForeignMod(_) => {} ast::ItemTrait(_, _, ref bounds, _) => { if !self.trait_is_public(item.id) { return } for bound in &**bounds { self.check_ty_param_bound(bound) } } // impls need some special handling to try to offer useful // error messages without (too many) false positives // (i.e. we could just return here to not check them at // all, or some worse estimation of whether an impl is // publicly visible). ast::ItemImpl(_, _, ref g, ref trait_ref, ref self_, ref impl_items) => { // `impl [... for] Private` is never visible. let self_contains_private; // impl [... for] Public<...>, but not `impl [... for] // Vec<Public>` or `(Public,)` etc. let self_is_public_path; // check the properties of the Self type: { let mut visitor = CheckTypeForPrivatenessVisitor { inner: self, contains_private: false, at_outer_type: true, outer_type_is_public_path: false, }; visitor.visit_ty(&**self_); self_contains_private = visitor.contains_private; self_is_public_path = visitor.outer_type_is_public_path; } // miscellaneous info about the impl // `true` iff this is `impl Private for ...`. let not_private_trait = trait_ref.as_ref().map_or(true, // no trait counts as public trait |tr| { let did = ty::trait_ref_to_def_id(self.tcx, tr); !is_local(did) || self.trait_is_public(did.node) }); // `true` iff this is a trait impl or at least one method is public. // // `impl Public { $( fn ...() {} )* }` is not visible. // // This is required over just using the methods' privacy // directly because we might have `impl<T: Foo<Private>> ...`, // and we shouldn't warn about the generics if all the methods // are private (because `T` won't be visible externally). let trait_or_some_public_method = trait_ref.is_some() || impl_items.iter() .any(|impl_item| { match impl_item.node { ast::ConstImplItem(..) | ast::MethodImplItem(..) => { self.exported_items.contains(&impl_item.id) } ast::TypeImplItem(_) | ast::MacImplItem(_) => false, } }); if !self_contains_private && not_private_trait && trait_or_some_public_method { visit::walk_generics(self, g); match *trait_ref { None => { for impl_item in impl_items { // This is where we choose whether to walk down // further into the impl to check its items. We // should only walk into public items so that we // don't erroneously report errors for private // types in private items. match impl_item.node { ast::ConstImplItem(..) | ast::MethodImplItem(..) if self.item_is_public(&impl_item.id, impl_item.vis) => { visit::walk_impl_item(self, impl_item) } ast::TypeImplItem(..) => { visit::walk_impl_item(self, impl_item) } _ => {} } } } Some(ref tr) => { // Any private types in a trait impl fall into three // categories. // 1. mentioned in the trait definition // 2. mentioned in the type params/generics // 3. mentioned in the associated types of the impl // // Those in 1. can only occur if the trait is in // this crate and will've been warned about on the // trait definition (there's no need to warn twice // so we don't check the methods). // // Those in 2. are warned via walk_generics and this // call here. visit::walk_path(self, &tr.path); // Those in 3. are warned with this call. for impl_item in impl_items { if let ast::TypeImplItem(ref ty) = impl_item.node { self.visit_ty(ty); } } } } } else if trait_ref.is_none() && self_is_public_path { // impl Public<Private> { ... }. Any public static // methods will be visible as `Public::foo`. let mut found_pub_static = false; for impl_item in impl_items { match impl_item.node { ast::ConstImplItem(..) => { if self.item_is_public(&impl_item.id, impl_item.vis) { found_pub_static = true; visit::walk_impl_item(self, impl_item); } } ast::MethodImplItem(ref sig, _) => { if sig.explicit_self.node == ast::SelfStatic && self.item_is_public(&impl_item.id, impl_item.vis) { found_pub_static = true; visit::walk_impl_item(self, impl_item); } } _ => {} } } if found_pub_static { visit::walk_generics(self, g) } } return } // `type ... = ...;` can contain private types, because // we're introducing a new name. ast::ItemTy(..) => return, // not at all public, so we don't care _ if !self.item_is_public(&item.id, item.vis) => { return; } _ => {} } // We've carefully constructed it so that if we're here, then // any `visit_ty`'s will be called on things that are in // public signatures, i.e. things that we're interested in for // this visitor. debug!("VisiblePrivateTypesVisitor entering item {:?}", item); visit::walk_item(self, item); } fn visit_generics(&mut self, generics: &ast::Generics) { for ty_param in &*generics.ty_params { for bound in &*ty_param.bounds { self.check_ty_param_bound(bound) } } for predicate in &generics.where_clause.predicates { match predicate { &ast::WherePredicate::BoundPredicate(ref bound_pred) => { for bound in &*bound_pred.bounds { self.check_ty_param_bound(bound) } } &ast::WherePredicate::RegionPredicate(_) => {} &ast::WherePredicate::EqPredicate(ref eq_pred) => { self.visit_ty(&*eq_pred.ty); } } } } fn visit_foreign_item(&mut self, item: &ast::ForeignItem) { if self.exported_items.contains(&item.id) { visit::walk_foreign_item(self, item) } } fn visit_ty(&mut self, t: &ast::Ty) { debug!("VisiblePrivateTypesVisitor checking ty {:?}", t); if let ast::TyPath(_, ref p) = t.node { if !self.tcx.sess.features.borrow().visible_private_types && self.path_is_private_type(t.id) { self.tcx.sess.span_err(p.span, "private type in exported type signature"); } } visit::walk_ty(self, t) } fn visit_variant(&mut self, v: &ast::Variant, g: &ast::Generics) { if self.exported_items.contains(&v.node.id) { self.in_variant = true; visit::walk_variant(self, v, g); self.in_variant = false; } } fn visit_struct_field(&mut self, s: &ast::StructField) { match s.node.kind { ast::NamedField(_, vis) if vis == ast::Public || self.in_variant => { visit::walk_struct_field(self, s); } _ => {} } } // we don't need to introspect into these at all: an // expression/block context can't possibly contain exported things. // (Making them no-ops stops us from traversing the whole AST without // having to be super careful about our `walk_...` calls above.) fn visit_block(&mut self, _: &ast::Block) {} fn visit_expr(&mut self, _: &ast::Expr) {} } pub fn check_crate(tcx: &ty::ctxt, export_map: &def::ExportMap, external_exports: ExternalExports) -> (ExportedItems, PublicItems) { let krate = tcx.map.krate(); // Figure out who everyone's parent is let mut visitor = ParentVisitor { parents: NodeMap(), curparent: ast::DUMMY_NODE_ID, }; visit::walk_crate(&mut visitor, krate); // Use the parent map to check the privacy of everything let mut visitor = PrivacyVisitor { curitem: ast::DUMMY_NODE_ID, in_foreign: false, tcx: tcx, parents: visitor.parents, external_exports: external_exports, }; visit::walk_crate(&mut visitor, krate); // Sanity check to make sure that all privacy usage and controls are // reasonable. let mut visitor = SanePrivacyVisitor { in_fn: false, tcx: tcx, }; visit::walk_crate(&mut visitor, krate); tcx.sess.abort_if_errors(); // Build up a set of all exported items in the AST. This is a set of all // items which are reachable from external crates based on visibility. let mut visitor = EmbargoVisitor { tcx: tcx, exported_items: NodeSet(), public_items: NodeSet(), reexports: NodeSet(), export_map: export_map, prev_exported: true, prev_public: true, }; loop { let before = visitor.exported_items.len(); visit::walk_crate(&mut visitor, krate); if before == visitor.exported_items.len() { break } } let EmbargoVisitor { exported_items, public_items, .. } = visitor; { let mut visitor = VisiblePrivateTypesVisitor { tcx: tcx, exported_items: &exported_items, public_items: &public_items, in_variant: false, }; visit::walk_crate(&mut visitor, krate); } return (exported_items, public_items); }
42.583594
96
0.460783
165dc53b139ff11db55c89554da86a4314449aa7
2,593
use csv; use regex::bytes::RegexBuilder; use config::{Config, Delimiter}; use select::SelectColumns; use util; use CliResult; static USAGE: &str = " Filters CSV data by whether the given regex matches a row. The regex is applied to each field in each row, and if any field matches, then the row is written to the output. The columns to search can be limited with the '--select' flag (but the full row is still written to the output if there is a match). Usage: xsv search [options] <regex> [<input>] xsv search --help search options: -i, --ignore-case Case insensitive search. This is equivalent to prefixing the regex with '(?i)'. -s, --select <arg> Select the columns to search. See 'xsv select -h' for the full syntax. -v, --invert-match Select only rows that did not match Common options: -h, --help Display this message -o, --output <file> Write output to <file> instead of stdout. -n, --no-headers When set, the first row will not be interpreted as headers. (i.e., They are not searched, analyzed, sliced, etc.) -d, --delimiter <arg> The field delimiter for reading CSV data. Must be a single character. (default: ,) "; #[derive(Deserialize)] struct Args { arg_input: Option<String>, arg_regex: String, flag_select: SelectColumns, flag_output: Option<String>, flag_no_headers: bool, flag_delimiter: Option<Delimiter>, flag_invert_match: bool, flag_ignore_case: bool, } pub fn run(argv: &[&str]) -> CliResult<()> { let args: Args = util::get_args(USAGE, argv)?; let pattern = RegexBuilder::new(&*args.arg_regex) .case_insensitive(args.flag_ignore_case) .build()?; let rconfig = Config::new(&args.arg_input) .delimiter(args.flag_delimiter) .no_headers(args.flag_no_headers) .select(args.flag_select); let mut rdr = rconfig.reader()?; let mut wtr = Config::new(&args.flag_output).writer()?; let headers = rdr.byte_headers()?.clone(); let sel = rconfig.selection(&headers)?; if !rconfig.no_headers { wtr.write_record(&headers)?; } let mut record = csv::ByteRecord::new(); while rdr.read_byte_record(&mut record)? { let mut m = sel.select(&record).any(|f| pattern.is_match(f)); if args.flag_invert_match { m = !m; } if m { wtr.write_byte_record(&record)?; } } Ok(wtr.flush()?) }
32.012346
78
0.615889
ab89f6aff4b50798aadf5191f823e4200110472b
1,946
// Compiler: // // Run-time: // status: 0 // stdout: 10 // 14 // 1 // 12 // 12 // 1 #![feature(auto_traits, lang_items, no_core, start, intrinsics)] #![no_std] #![no_core] /* * Core */ // Because we don't have core yet. #[lang = "sized"] pub trait Sized {} #[lang = "copy"] trait Copy { } impl Copy for isize {} #[lang = "receiver"] trait Receiver { } #[lang = "freeze"] pub(crate) unsafe auto trait Freeze {} mod intrinsics { use super::Sized; extern "rust-intrinsic" { pub fn abort() -> !; } } mod libc { #[link(name = "c")] extern "C" { pub fn printf(format: *const i8, ...) -> i32; } } #[lang = "structural_peq"] pub trait StructuralPartialEq {} #[lang = "structural_teq"] pub trait StructuralEq {} #[lang = "drop_in_place"] #[allow(unconditional_recursion)] pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. drop_in_place(to_drop); } /* * Code */ struct Test { field: isize, } struct WithRef { refe: &'static Test, } static mut CONSTANT: isize = 10; static mut TEST: Test = Test { field: 12, }; static mut TEST2: Test = Test { field: 14, }; static mut WITH_REF: WithRef = WithRef { refe: unsafe { &TEST }, }; #[start] fn main(mut argc: isize, _argv: *const *const u8) -> isize { unsafe { libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT); libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field); TEST2.field = argc; libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field); libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field); WITH_REF.refe = &TEST2; libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field); libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field); } 0 }
18.186916
80
0.583762
28e5dd5ac961e032a524638bb6e9af5722c29ac7
1,650
use std::error::Error; use std::fmt::{self, Display}; use std::num::ParseIntError; use std::str::FromStr; #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub struct Point { pub x: i32, pub y: i32, } impl Point { pub fn new(x: i32, y: i32) -> Point { Point { x, y } } pub fn distance(self, Point { x, y }: Point) -> i32 { (self.x - x).abs() + (self.y - y).abs() } } #[derive(Debug)] pub enum ParsePointError { Integer(ParseIntError), Other(&'static str), } impl From<ParseIntError> for ParsePointError { fn from(e: ParseIntError) -> ParsePointError { ParsePointError::Integer(e) } } impl FromStr for Point { type Err = ParsePointError; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut parts = s.trim().split(", ").map(|x| x.parse()); let x = parts.next().ok_or(ParsePointError::Other("Bad format"))??; let y = parts.next().ok_or(ParsePointError::Other("Bad format"))??; if parts.next().is_some() { return Err(ParsePointError::Other("Bad format")); } Ok(Point { x, y }) } } impl Display for ParsePointError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ParsePointError::Integer(e) => write!(f, "Failed to parse integer: {}", e), ParsePointError::Other(message) => f.write_str(message), } } } impl Error for ParsePointError { fn source(&self) -> Option<&(dyn Error + 'static)> { match self { ParsePointError::Integer(e) => Some(e), ParsePointError::Other(_) => None, } } }
24.264706
87
0.567879
0a3d327643eef2e492dd1605627e802821a52f66
169
//! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_1681" #[allow(unused)] use super::runner; mod calc; mod element; mod expression; mod url;
13
79
0.733728
14433c03a6d6370ae40c8e2724d1486168266895
1,275
use super::create_handle::create_handle; use crate::trampoline::StoreInstanceHandle; use crate::Store; use crate::{TableType, ValType}; use anyhow::{bail, Result}; use wasmtime_environ::entity::PrimaryMap; use wasmtime_environ::{wasm, EntityIndex, Module}; pub fn create_handle_with_table(store: &Store, table: &TableType) -> Result<StoreInstanceHandle> { let mut module = Module::new(); let table = wasm::Table { wasm_ty: table.element().to_wasm_type(), minimum: table.limits().min(), maximum: table.limits().max(), ty: match table.element() { ValType::FuncRef => wasm::TableElementType::Func, ValType::ExternRef => wasm::TableElementType::Val(wasmtime_runtime::ref_type()), _ => bail!("cannot support {:?} as a table element", table.element()), }, }; let tunable = Default::default(); let table_plan = wasmtime_environ::TablePlan::for_table(table, &tunable); let table_id = module.table_plans.push(table_plan); module .exports .insert("table".to_string(), EntityIndex::Table(table_id)); create_handle( module, store, PrimaryMap::new(), Default::default(), Box::new(()), PrimaryMap::new(), ) }
32.692308
98
0.633725
62a3c3745ee67af099067c6265e569bae995f506
3,873
use crate::bitvector::BITVECTOR_NIBBLE_SIZE; use log::info; // GAMEINPUT_MAX_BYTES * GAMEINPUT_MAX_PLAYERS * 8 must be less than // 2^BITVECTOR_NIBBLE_SIZE (see bitvector.h) pub const GAMEINPUT_MAX_BYTES: usize = 9; pub const GAMEINPUT_MAX_PLAYERS: usize = 2; pub const INPUT_BUFFER_SIZE: usize = GAMEINPUT_MAX_BYTES * GAMEINPUT_MAX_PLAYERS; pub type Input = [u8; GAMEINPUT_MAX_BYTES]; pub type InputBuffer = [Input; GAMEINPUT_MAX_PLAYERS]; // pub type InputBuffer = [u8; INPUT_BUFFER_SIZE]; pub type FrameNum = u32; pub type Frame = Option<FrameNum>; #[derive(Debug, Eq, PartialEq, Copy, Clone, Default)] pub struct GameInput { pub frame: Frame, pub size: usize, // Number of u8 in the InputBuffer array. pub bits: InputBuffer, } impl GameInput { pub const fn new() -> Self { GameInput { frame: None, size: 0, bits: [[b'0'; GAMEINPUT_MAX_BYTES]; GAMEINPUT_MAX_PLAYERS], } } pub fn init(frame: Frame, bits: Option<&InputBuffer>, size: usize) -> GameInput { assert!(size <= GAMEINPUT_MAX_BYTES); match bits { Some(i_bits) => GameInput { frame, size, bits: i_bits.clone(), }, None => GameInput { frame, size, bits: [[b'0'; GAMEINPUT_MAX_BYTES]; GAMEINPUT_MAX_PLAYERS], }, } } // TODO: Document what the actual hell these do. pub const fn value(&self, i: usize) -> bool { (self.bits[i / GAMEINPUT_MAX_BYTES][i % GAMEINPUT_MAX_PLAYERS] & (1 << (i % BITVECTOR_NIBBLE_SIZE))) != 0 } pub fn set(&mut self, i: usize) { self.bits[i / GAMEINPUT_MAX_BYTES][i % GAMEINPUT_MAX_PLAYERS] |= 1 << (i % BITVECTOR_NIBBLE_SIZE); } pub fn clear(&mut self, i: usize) { self.bits[i / GAMEINPUT_MAX_BYTES][i % GAMEINPUT_MAX_PLAYERS] &= !(1 << (i % BITVECTOR_NIBBLE_SIZE)); } pub fn erase(&mut self) { self.bits = [[b'0'; GAMEINPUT_MAX_BYTES]; GAMEINPUT_MAX_PLAYERS]; } pub fn describe(&self, show_frame: bool) -> String { let mut buf: String = String::from(""); if let Some(frame) = self.frame { if show_frame { buf = format!("(frame:{} size:{}", frame, self.size); } else { buf = format!("(size:{}", self.size); } } for i in 0..self.size { if self.value(i) { buf.push_str(&format!("{:2}", i)); } } buf.push(')'); buf } // fn log(prefix: &String, show_frame: bool) {} pub fn equal(&self, other: &GameInput, bitsonly: bool) -> bool { if !bitsonly { match (self.frame, other.frame) { (Some(self_frame), Some(other_frame)) => { if self_frame != other_frame { info!("frames don't match: {}, {}\n", self_frame, other_frame,); } } (Some(self_frame), None) => { info!("frames don't match: {}, {}\n", self_frame, "None",); } (None, Some(other_frame)) => { info!("frames don't match: {}, {}\n", "None", other_frame,); } (None, None) => { info!("frames don't match: {}, {}\n", "None", "None",); } } } if self.size != other.size { info!("sizes don't match: {}, {}\n", self.size, other.size); } let bits_equality = self.bits != other.bits; if !bits_equality { info!("bits don't match\n"); } return (bitsonly || self.frame == other.frame) && self.size == other.size && bits_equality; } }
33.678261
99
0.51717
ebfd92869429c6c59c96b2072826fcc271418d25
313
pub mod serial; pub mod pic; pub mod rtc; pub mod pit; pub mod high_precision_timer; pub mod rand; pub unsafe fn init() { pic::init(); } pub unsafe fn init_noncore() { pit::init(); rtc::init(); #[cfg(feature = "serial")] serial::init(); high_precision_timer::init(); }
16.473684
34
0.587859
613a982dcc161ea2de3411f21f5590d9c89aa676
837
use clap::Clap; use readability::extractor::extract; use std::fs::File; use std::io::{self, BufReader, Write}; use std::path::PathBuf; /// HTML to Readability CLI #[derive(Clap)] #[clap(version = "1.0", author = "Dustin K <[email protected]")] struct Args { file: PathBuf, url: url::Url, } fn main() -> Result<(), anyhow::Error> { let opts: Args = Args::parse(); let file = File::open(&opts.file)?; let mut reader = BufReader::new(file); let product = extract(&mut reader, &opts.url)?; let stdout = io::stdout(); let mut handle = stdout.lock(); handle.write_all(format!("<h1>{}</h1>", product.title).as_bytes())?; handle.write_all(product.content.as_bytes())?; handle.write_all( format!(r#"<p>Source: <a href="{}">{}</a>"#, opts.url, product.title).as_bytes(), )?; Ok(()) }
28.862069
89
0.609319
dd0e03e74af5558b81876353f728372529eafe0e
131
//! kdirect types pub mod direct; pub mod handle; pub mod kdagent; pub mod kdentry; pub mod kdhash; pub mod persist; pub mod srv;
13.1
17
0.732824
1ca631054bc3dc642ced379193e794c8917aead5
463
use gtk::prelude::*; #[derive(Clone)] pub struct GUIHeader { pub button_settings: gtk::Button, pub button_app_info: gtk::Button, } impl GUIHeader { pub fn create_from_builder(builder: &gtk::Builder) -> Self { let button_settings: gtk::Button = builder.get_object("button_settings").unwrap(); let button_app_info: gtk::Button = builder.get_object("button_app_info").unwrap(); Self { button_settings, button_app_info } } }
28.9375
90
0.686825
0871d8aab39f4119bb85905dfbf4b6214c6d1a5f
5,582
use llvm_sys::*; use llvm_sys::prelude::*; use llvm_sys::core::*; use llvm_sys::transforms::vectorize::*; use llvm_sys::transforms::scalar::*; use llvm_sys::transforms::ipo::*; use std::default::Default; macro_rules! pass { ($name: ident, $pass: ident) => { pub fn $name(&mut self) { unsafe{ $pass(self.data); } } } } /// Manages code gen passes pub struct PassManager { data: LLVMPassManagerRef } impl Drop for PassManager { fn drop(&mut self) { unsafe { LLVMDisposePassManager(self.data); } } } impl PassManager { /// Creates a new empty Pass Manager pub fn new() -> PassManager { unsafe { PassManager { data: LLVMCreatePassManager() } } } /// Gives the option to the end developer to /// handle some optimizations more eloquently pub fn apply_opt<T: ApplyOpt>(&mut self, opt: T) { opt.add_pass(self); } pass!(aggressive_dce,LLVMAddAggressiveDCEPass); pass!(alignment_from_assumptions,LLVMAddAlignmentFromAssumptionsPass); pass!(cfg_simplification,LLVMAddCFGSimplificationPass); pass!(constant_propigation,LLVMAddConstantPropagationPass); pass!(correlated_value_propagation,LLVMAddCorrelatedValuePropagationPass); pass!(dead_store_elimination,LLVMAddDeadStoreEliminationPass); pass!(demote_memory_to_register,LLVMAddDemoteMemoryToRegisterPass); pass!(early_cse,LLVMAddEarlyCSEPass); pass!(gvn,LLVMAddGVNPass); pass!(ind_var_simplify,LLVMAddIndVarSimplifyPass); pass!(instruction_combining,LLVMAddInstructionCombiningPass); pass!(jump_threading,LLVMAddJumpThreadingPass); pass!(licm,LLVMAddLICMPass); pass!(loop_deletion,LLVMAddLoopDeletionPass); pass!(loop_reroll,LLVMAddLoopRerollPass); pass!(loop_rotate,LLVMAddLoopRotatePass); pass!(loop_unswitch,LLVMAddLoopUnswitchPass); pass!(lower_expect_intrinsic,LLVMAddLowerExpectIntrinsicPass); pass!(lower_switch,LLVMAddLowerSwitchPass); pass!(memcpy_opt,LLVMAddMemCpyOptPass); pass!(merge_load_store_motion,LLVMAddMergedLoadStoreMotionPass); pass!(partially_inline_lib_calls,LLVMAddPartiallyInlineLibCallsPass); pass!(promote_memory_to_register,LLVMAddPromoteMemoryToRegisterPass); pass!(reassociate,LLVMAddReassociatePass); pass!(sccp,LLVMAddSCCPPass); pass!(scalarizer,LLVMAddScalarizerPass); pass!(scoped_no_alias_aa,LLVMAddScopedNoAliasAAPass); pass!(simplify_lib_calls,LLVMAddSimplifyLibCallsPass); pass!(tail_call_elimination,LLVMAddTailCallEliminationPass); pass!(verifer,LLVMAddVerifierPass); pass!(always_inliner,LLVMAddAlwaysInlinerPass); pass!(argument_promotion,LLVMAddArgumentPromotionPass); pass!(constant_merge,LLVMAddConstantMergePass); pass!(dead_arg_elimination,LLVMAddDeadArgEliminationPass); pass!(function_attrs,LLVMAddFunctionAttrsPass); pass!(global_dce,LLVMAddGlobalDCEPass); pass!(global_optimizer,LLVMAddGlobalOptimizerPass); pass!(ip_constant_propagation,LLVMAddIPConstantPropagationPass); pass!(ipsccp,LLVMAddIPSCCPPass); pass!(prune,LLVMAddPruneEHPass); pass!(strip_dead_prototypes,LLVMAddStripDeadPrototypesPass); pass!(strip_symbols,LLVMAddStripSymbolsPass); pub fn internalize_pass(&mut self, all_but_main: u32) { unsafe{ LLVMAddInternalizePass(self.data, all_but_main); } } /// Allows access to inner data field /// for within library functions pub unsafe fn inner(&mut self) -> LLVMPassManagerRef { self.data } } /// For enums that can apply optimizations pub trait ApplyOpt { fn add_pass(&self, mngr: &mut PassManager); } /// Controls which `ScalarReplAggregatesPass` /// is performed /// /// you can re-call the method to apply more /// then one type. #[derive(Clone,Copy,Debug)] pub enum ScalarReplAggregates { /// applies no optimization pass None, /// equalivant of `LLVMAddScalarReplAggregatesPass` Default, /// equalivant of `LLVMAddScalarReplAggregatesPassSSA` SSA, /// equalivant of `LLVMAddScalarReplAggregatesWithThreshold` Threshold(i32) } impl ApplyOpt for ScalarReplAggregates { fn add_pass(&self, mngr: &mut PassManager) { unsafe { match self.clone() { ScalarReplAggregates::None => { }, ScalarReplAggregates::Default => LLVMAddScalarReplAggregatesPass(mngr.inner()), ScalarReplAggregates::SSA => LLVMAddScalarReplAggregatesPassSSA(mngr.inner()), ScalarReplAggregates::Threshold(val) => LLVMAddScalarReplAggregatesPassWithThreshold(mngr.inner(),val) }; } } } /// Vectorization Passes /// /// you can add multiple /// by re-calling the `appy_opt` /// value multiple times. #[derive(Clone,Copy,Debug)] pub enum Vectorize { /// applies no optimization pass None, /// equalivant of `LLVMAddBBVectorizePass` BB, /// equalivant of `LLVMAddLoopVectorizePass` Loop, /// equalivant of `LLVMAddSLPVectorizePass` SLP } impl Default for Vectorize { /// Returns `Vectorize::None` which is functionally a no-op fn default() -> Vectorize { Vectorize::None } } impl ApplyOpt for Vectorize { fn add_pass(&self, mngr: &mut PassManager) { unsafe { match self.clone() { Vectorize::None => { }, Vectorize::BB => LLVMAddBBVectorizePass(mngr.inner()), Vectorize::Loop => LLVMAddLoopVectorizePass(mngr.inner()), Vectorize::SLP => LLVMAddSLPVectorizePass(mngr.inner()) }; } } }
31.715909
118
0.708886
28e465e8542439d667a7b8f9435f786e24fa8f80
690
// move_semantics4.rs // Refactor this code so that instead of having `vec0` and creating the vector // in `fn main`, we create it within `fn fill_vec` and transfer the // freshly created vector from fill_vec to its caller. // Execute `rustlings hint move_semantics4` for hints! fn main() { let mut vec1 = fill_vec(); println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1); vec1.push(88); println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1); } // `fill_vec()` no longer takes `vec: Vec<i32>` as argument fn fill_vec() -> Vec<i32> { let mut vec = Vec::<i32>::new(); vec.push(22); vec.push(44); vec.push(66); vec }
24.642857
78
0.617391
ef74e9e58260d23acd334a8efaa4142c382b7f5a
8,787
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Handles negotiating buffer sets with the codec server and sysmem. use crate::{buffer_collection_constraints::*, Result}; use anyhow::Context as _; use fidl::encoding::Decodable; use fidl::endpoints::{create_endpoints, ClientEnd}; use fidl_fuchsia_media::*; use fidl_fuchsia_sysmem::*; use fuchsia_component::client; use fuchsia_stream_processors::*; use fuchsia_zircon as zx; use std::{ convert::TryFrom, fmt, iter::{IntoIterator, StepBy}, ops::RangeFrom, }; use thiserror::Error; #[derive(Debug, Error)] pub enum Error { ReclaimClientTokenChannel, ServerOmittedBufferVmo, PacketReferencesInvalidBuffer, VmoReadFail(zx::Status), } impl fmt::Display for Error { fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self, w) } } /// The pattern to use when advancing ordinals. #[derive(Debug, Clone, Copy)] pub enum OrdinalPattern { /// Odd ordinal pattern starts at 1 and moves in increments of 2: [1,3,5..] Odd, /// All ordinal pattern starts at 1 and moves in increments of 1: [1,2,3..] All, } impl IntoIterator for OrdinalPattern { type Item = u64; type IntoIter = StepBy<RangeFrom<Self::Item>>; fn into_iter(self) -> Self::IntoIter { let (start, step) = match self { OrdinalPattern::Odd => (1, 2), OrdinalPattern::All => (1, 1), }; (start..).step_by(step) } } pub fn get_ordinal(pattern: &mut <OrdinalPattern as IntoIterator>::IntoIter) -> u64 { pattern.next().expect("Getting next item in infinite pattern") } pub enum BufferSetType { Input, Output, } pub struct BufferSetFactory; // This client only intends to be filling one input buffer or hashing one output buffer at any given // time. const MIN_BUFFER_COUNT_FOR_CAMPING: u32 = 1; impl BufferSetFactory { pub async fn buffer_set( buffer_lifetime_ordinal: u64, constraints: ValidStreamBufferConstraints, codec: &mut StreamProcessorProxy, buffer_set_type: BufferSetType, buffer_collection_constraints: Option<BufferCollectionConstraints>, ) -> Result<BufferSet> { let (collection_client, settings) = Self::settings(buffer_lifetime_ordinal, constraints, buffer_collection_constraints) .await?; vlog!(2, "Got settings; waiting for buffers. {:?}", settings); match buffer_set_type { BufferSetType::Input => codec .set_input_buffer_partial_settings(settings) .context("Sending input partial settings to codec")?, BufferSetType::Output => codec .set_output_buffer_partial_settings(settings) .context("Sending output partial settings to codec")?, }; let (status, collection_info) = collection_client.wait_for_buffers_allocated().await.context("Waiting for buffers")?; vlog!(2, "Sysmem responded: {:?}", status); let collection_info = zx::Status::ok(status).map(|_| collection_info)?; if let BufferSetType::Output = buffer_set_type { vlog!(2, "Completing settings for output."); codec.complete_output_buffer_partial_settings(buffer_lifetime_ordinal)?; } //collection_client.close()?; vlog!( 2, "Got {} buffers of size {:?}", collection_info.buffer_count, collection_info.settings.buffer_settings.size_bytes ); vlog!(3, "Buffer collection is: {:#?}", collection_info.settings); for (i, buffer) in collection_info.buffers.iter().enumerate() { // We enumerate beyond collection_info.buffer_count just for debugging // purposes at this log level. vlog!(3, "Buffer {} is : {:#?}", i, buffer); } Ok(BufferSet::try_from(BufferSetSpec { proxy: collection_client, buffer_lifetime_ordinal, collection_info, })?) } async fn settings( buffer_lifetime_ordinal: u64, constraints: ValidStreamBufferConstraints, buffer_collection_constraints: Option<BufferCollectionConstraints>, ) -> Result<(BufferCollectionProxy, StreamBufferPartialSettings)> { let (client_token, client_token_request) = create_endpoints::<BufferCollectionTokenMarker>()?; let (codec_token, codec_token_request) = create_endpoints::<BufferCollectionTokenMarker>()?; let client_token = client_token.into_proxy()?; let sysmem_client = client::connect_to_service::<AllocatorMarker>().context("Connecting to sysmem")?; sysmem_client .allocate_shared_collection(client_token_request) .context("Allocating shared collection")?; client_token.duplicate(std::u32::MAX, codec_token_request)?; let (collection_client, collection_request) = create_endpoints::<BufferCollectionMarker>()?; sysmem_client.bind_shared_collection( ClientEnd::new( client_token .into_channel() .map_err(|_| Error::ReclaimClientTokenChannel)? .into_zx_channel(), ), collection_request, )?; let collection_client = collection_client.into_proxy()?; collection_client.sync().await.context("Syncing codec_token_request with sysmem")?; let mut collection_constraints = buffer_collection_constraints.unwrap_or(BUFFER_COLLECTION_CONSTRAINTS_DEFAULT); assert_eq!( collection_constraints.min_buffer_count_for_camping, 0, "min_buffer_count_for_camping should default to 0 before we've set it" ); collection_constraints.min_buffer_count_for_camping = MIN_BUFFER_COUNT_FOR_CAMPING; vlog!(3, "Our buffer collection constraints are: {:#?}", collection_constraints); // By design we must say true even if all our fields are left at // default, or sysmem will not give us buffer handles. let has_constraints = true; collection_client .set_constraints(has_constraints, &mut collection_constraints) .context("Sending buffer constraints to sysmem")?; Ok(( collection_client, StreamBufferPartialSettings { buffer_lifetime_ordinal: Some(buffer_lifetime_ordinal), buffer_constraints_version_ordinal: Some( constraints.buffer_constraints_version_ordinal, ), sysmem_token: Some(codec_token), ..StreamBufferPartialSettings::new_empty() }, )) } } struct BufferSetSpec { proxy: BufferCollectionProxy, buffer_lifetime_ordinal: u64, collection_info: BufferCollectionInfo2, } #[derive(Debug, PartialEq)] pub struct Buffer { pub data: zx::Vmo, pub start: u64, pub size: u64, } #[derive(Debug)] pub struct BufferSet { pub proxy: BufferCollectionProxy, pub buffers: Vec<Buffer>, pub buffer_lifetime_ordinal: u64, pub buffer_size: usize, } impl TryFrom<BufferSetSpec> for BufferSet { type Error = anyhow::Error; fn try_from(mut src: BufferSetSpec) -> std::result::Result<Self, Self::Error> { let mut buffers = vec![]; for (i, buffer) in src.collection_info.buffers [0..(src.collection_info.buffer_count as usize)] .iter_mut() .enumerate() { buffers.push(Buffer { data: buffer.vmo.take().ok_or(Error::ServerOmittedBufferVmo).context(format!( "Trying to ingest {}th buffer of {}: {:#?}", i, src.collection_info.buffer_count, buffer ))?, start: buffer.vmo_usable_start, size: src.collection_info.settings.buffer_settings.size_bytes as u64, }); } Ok(Self { proxy: src.proxy, buffers, buffer_lifetime_ordinal: src.buffer_lifetime_ordinal, buffer_size: src.collection_info.settings.buffer_settings.size_bytes as usize, }) } } impl BufferSet { pub fn read_packet(&self, packet: &ValidPacket) -> Result<Vec<u8>> { let buffer = self .buffers .get(packet.buffer_index as usize) .ok_or(Error::PacketReferencesInvalidBuffer)?; let mut dest = vec![0; packet.valid_length_bytes as usize]; buffer.data.read(&mut dest, packet.start_offset as u64).map_err(Error::VmoReadFail)?; Ok(dest) } }
35.007968
100
0.639012
29a8200104ac05929d07161e8832d65d354e614f
757
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // compile-pass #![allow(dead_code)] // Tests that impls are allowed to have looser, more permissive bounds // than the traits require. trait A { fn b<C:Sync,D>(&self, x: C) -> C; } struct E { f: isize } impl A for E { fn b<F,G>(&self, _x: F) -> F { panic!() } } pub fn main() {}
25.233333
70
0.686922
720a006564dbbeb8ebd7026a2dc9980658f7fbf3
10,486
mod collect; use nu_test_support::fs::Stub::FileWithContent; use nu_test_support::playground::Playground; use nu_test_support::{nu, pipeline}; #[test] fn trims() { Playground::setup("str_test_1", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [dependency] name = "nu " "#, )]); let actual = nu!( cwd: dirs.test(), "open sample.toml | str trim dependency.name | get dependency.name | echo $it" ); assert_eq!(actual.out, "nu"); }) } #[test] fn error_trim_multiple_chars() { let actual = nu!( cwd: ".", pipeline( r#" echo 'does it work now?!' | str trim -c '?!' "# ) ); assert!(actual.err.contains("char")); } #[test] fn capitalizes() { Playground::setup("str_test_2", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [dependency] name = "nu" "#, )]); let actual = nu!( cwd: dirs.test(), "open sample.toml | str capitalize dependency.name | get dependency.name | echo $it" ); assert_eq!(actual.out, "Nu"); }) } #[test] fn downcases() { Playground::setup("str_test_3", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [dependency] name = "LIGHT" "#, )]); let actual = nu!( cwd: dirs.test(), "open sample.toml | str downcase dependency.name | get dependency.name | echo $it" ); assert_eq!(actual.out, "light"); }) } #[test] fn upcases() { Playground::setup("str_test_4", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nushell" "#, )]); let actual = nu!( cwd: dirs.test(), "open sample.toml | str upcase package.name | get package.name | echo $it" ); assert_eq!(actual.out, "NUSHELL"); }) } #[test] fn camelcases() { Playground::setup("str_test_3", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [dependency] name = "THIS_IS_A_TEST" "#, )]); let actual = nu!( cwd: dirs.test(), "open sample.toml | str camel-case dependency.name | get dependency.name | echo $it" ); assert_eq!(actual.out, "thisIsATest"); }) } #[test] fn converts_to_int() { let actual = nu!( cwd: "tests/fixtures/formats", pipeline( r#" echo '{number_as_string: "1"}' | from json | str to-int number_as_string | rename number | where number == 1 | get number | echo $it "# )); assert_eq!(actual.out, "1"); } #[test] fn converts_to_decimal() { let actual = nu!( cwd: "tests/fixtures/formats", pipeline( r#" echo "3.1, 0.0415" | split row "," | str to-decimal | math sum "# )); assert_eq!(actual.out, "3.1415"); } #[test] fn sets() { Playground::setup("str_test_5", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nushell" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str set wykittenshell package.name | get package.name | echo $it "# )); assert_eq!(actual.out, "wykittenshell"); }) } #[test] fn find_and_replaces() { Playground::setup("str_test_6", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [fortune.teller] phone = "1-800-KATZ" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str find-replace KATZ "5289" fortune.teller.phone | get fortune.teller.phone | echo $it "# )); assert_eq!(actual.out, "1-800-5289"); }) } #[test] fn find_and_replaces_without_passing_field() { Playground::setup("str_test_7", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [fortune.teller] phone = "1-800-KATZ" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | get fortune.teller.phone | str find-replace KATZ "5289" | echo $it "# )); assert_eq!(actual.out, "1-800-5289"); }) } #[test] fn substrings_the_input() { Playground::setup("str_test_8", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [fortune.teller] phone = "1-800-ROBALINO" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring 6,14 fortune.teller.phone | get fortune.teller.phone | echo $it "# )); assert_eq!(actual.out, "ROBALINO"); }) } #[test] fn substring_errors_if_start_index_is_greater_than_end_index() { Playground::setup("str_test_9", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [fortune.teller] phone = "1-800-ROBALINO" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring 6,5 fortune.teller.phone | echo $it "# )); assert!(actual .err .contains("End must be greater than or equal to Start")) }) } #[test] fn substrings_the_input_and_returns_the_string_if_end_index_exceeds_length() { Playground::setup("str_test_10", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nu-arepas" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring 0,999 package.name | get package.name | echo $it "# )); assert_eq!(actual.out, "nu-arepas"); }) } #[test] fn substrings_the_input_and_returns_blank_if_start_index_exceeds_length() { Playground::setup("str_test_11", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nu-arepas" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring 50,999 package.name | get package.name | echo $it "# )); assert_eq!(actual.out, ""); }) } #[test] fn substrings_the_input_and_treats_start_index_as_zero_if_blank_start_index_given() { Playground::setup("str_test_12", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nu-arepas" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring ,2 package.name | get package.name | echo $it "# )); assert_eq!(actual.out, "nu"); }) } #[test] fn substrings_the_input_and_treats_end_index_as_length_if_blank_end_index_given() { Playground::setup("str_test_13", |dirs, sandbox| { sandbox.with_files(vec![FileWithContent( "sample.toml", r#" [package] name = "nu-arepas" "#, )]); let actual = nu!( cwd: dirs.test(), pipeline( r#" open sample.toml | str substring 3, package.name | get package.name | echo $it "# )); assert_eq!(actual.out, "arepas"); }) } #[test] fn from_decimal_correct_trailing_zeros() { let actual = nu!( cwd: ".", pipeline( r#" = 1.23000 | str from -d 3 "# )); assert!(actual.out.contains("1.230")); } #[test] fn from_int_decimal_correct_trailing_zeros() { let actual = nu!( cwd: ".", pipeline( r#" = 1.00000 | str from -d 3 "# )); assert!(actual.out.contains("1.000")); } #[test] fn from_int_decimal_trim_trailing_zeros() { let actual = nu!( cwd: ".", pipeline( r#" = 1.00000 | str from | format "{$it} flat" "# )); assert!(actual.out.contains("1 flat")); // "1" would match "1.0" } #[test] fn from_table() { let actual = nu!( cwd: ".", pipeline( r#" echo '[{"name": "foo", "weight": 32.377}, {"name": "bar", "weight": 15.2}]' | from json | str from weight -d 2 "# )); assert!(actual.out.contains("32.38")); assert!(actual.out.contains("15.20")); } #[test] fn str_reverse() { let actual = nu!( cwd: ".", pipeline( r#" echo "nushell" | str reverse "# )); assert!(actual.out.contains("llehsun")); }
23.777778
96
0.454129
9b53d2457dffdc09ea4789b644e6d2b2a3cfc0f5
154
use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() .export_heap_base() .import_memory() .build() }
15.4
40
0.714286
e8e99b95d5c1f7f354df124e7fd114a4a1669b91
2,109
use std::slice::from_raw_parts_mut; use wasmtime::Store; pub fn write_bytes_to_wasm_memory<T : WasmMemory>(bytes: &[u8], memory: &T, ptr: usize, len: usize){ let mem_array: &mut [u8]; unsafe { mem_array = memory.data_unchecked_mut(); for i in 0..len { // iterate over the serialized struct, copying it to the memory of the target module, // using the ptr provided by caller mem_array[ptr + i as usize] = bytes[i as usize]; } } } #[cfg(feature = "bincode-ffi")] pub fn write_bincode_to_wasm_memory<T : serde::Serialize, M : WasmMemory>(data: T, memory: &M, ptr: usize, len: usize){ let serialized_array = bincode::serialize(&data).expect("Failed to serialize type"); write_bytes_to_wasm_memory(&*serialized_array, memory, ptr, len) } #[cfg(feature = "bytemuck-ffi")] pub fn write_bytemuck_to_wasm_memory<T : bytemuck::Pod, M : WasmMemory>(data: T, memory: &M, ptr: usize, len: usize){ let bytes = bytemuck::bytes_of(&data); //println!("Writing {} bytes using Bytemuck",bytes.len()); write_bytes_to_wasm_memory(bytes, memory, ptr, len) } pub trait WasmMemory { unsafe fn data_unchecked_mut(&self) -> &mut [u8]; } #[cfg(all(feature = "wasmer"))] impl WasmMemory for wasmer::Memory { unsafe fn data_unchecked_mut(&self) -> &mut [u8] { self::data_unchecked_mut() } } #[cfg(all(feature = "wasmtime"))] pub struct WasmtimeMemory<'a,T>{ store : &'a wasmtime::Store<T>, memory : &'a wasmtime::Memory } #[cfg(all(feature = "wasmtime"))] impl WasmtimeMemory<_> { pub fn new<'a,T>( store: &'a wasmtime::Store<T>, memory: &'a wasmtime::Memory ) -> WasmtimeMemory<'a,T>{ WasmtimeMemory{ store, memory } } } #[cfg(all(feature = "wasmtime"))] impl WasmMemory for WasmtimeMemory<_> { unsafe fn data_unchecked_mut(&self) -> &mut [u8] { unsafe { let ptr = self.memory.data_ptr(self.store); let len = self.memory.data_size(self.store); from_raw_parts_mut(ptr,len) } } }
29.291667
119
0.623992
1687061db49214a00ef89cf832308da079b6b54f
5,706
//! Types for the IBC events emitted from Tendermint Websocket by the connection module. use serde_derive::{Deserialize, Serialize}; use crate::events::IbcEvent; use crate::ics02_client::height::Height; use crate::ics24_host::identifier::{ClientId, ConnectionId}; /// The content of the `type` field for the event that a chain produces upon executing a connection handshake transaction. const INIT_EVENT_TYPE: &str = "connection_open_init"; const TRY_EVENT_TYPE: &str = "connection_open_try"; const ACK_EVENT_TYPE: &str = "connection_open_ack"; const CONFIRM_EVENT_TYPE: &str = "connection_open_confirm"; /// The content of the `key` field for the attribute containing the connection identifier. const CONN_ID_ATTRIBUTE_KEY: &str = "connection_id"; const CLIENT_ID_ATTRIBUTE_KEY: &str = "client_id"; const COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY: &str = "counterparty_connection_id"; const COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY: &str = "counterparty_client_id"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option<IbcEvent> { match event.type_str.as_ref() { INIT_EVENT_TYPE => Some(IbcEvent::OpenInitConnection(OpenInit::from( extract_attributes_from_tx(event), ))), TRY_EVENT_TYPE => Some(IbcEvent::OpenTryConnection(OpenTry::from( extract_attributes_from_tx(event), ))), ACK_EVENT_TYPE => Some(IbcEvent::OpenAckConnection(OpenAck::from( extract_attributes_from_tx(event), ))), CONFIRM_EVENT_TYPE => Some(IbcEvent::OpenConfirmConnection(OpenConfirm::from( extract_attributes_from_tx(event), ))), _ => None, } } fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Attributes { let mut attr = Attributes::default(); for tag in &event.attributes { let key = tag.key.as_ref(); let value = tag.value.as_ref(); match key { CONN_ID_ATTRIBUTE_KEY => attr.connection_id = value.parse().ok(), CLIENT_ID_ATTRIBUTE_KEY => attr.client_id = value.parse().unwrap(), COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY => { attr.counterparty_connection_id = value.parse().ok() } COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY => { attr.counterparty_client_id = value.parse().unwrap() } // TODO: `Attributes` has 5 fields and we're only parsing 4 _ => {} } } attr } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { pub height: Height, pub connection_id: Option<ConnectionId>, pub client_id: ClientId, pub counterparty_connection_id: Option<ConnectionId>, pub counterparty_client_id: ClientId, } impl Default for Attributes { fn default() -> Self { Attributes { height: Default::default(), connection_id: Default::default(), client_id: Default::default(), counterparty_connection_id: Default::default(), counterparty_client_id: Default::default(), } } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct OpenInit(Attributes); impl OpenInit { pub fn attributes(&self) -> &Attributes { &self.0 } pub fn connection_id(&self) -> &Option<ConnectionId> { &self.0.connection_id } pub fn height(&self) -> Height { self.0.height } pub fn set_height(&mut self, height: Height) { self.0.height = height; } } impl From<Attributes> for OpenInit { fn from(attrs: Attributes) -> Self { OpenInit(attrs) } } impl From<OpenInit> for IbcEvent { fn from(v: OpenInit) -> Self { IbcEvent::OpenInitConnection(v) } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct OpenTry(Attributes); impl OpenTry { pub fn attributes(&self) -> &Attributes { &self.0 } pub fn connection_id(&self) -> &Option<ConnectionId> { &self.0.connection_id } pub fn height(&self) -> Height { self.0.height } pub fn set_height(&mut self, height: Height) { self.0.height = height; } } impl From<Attributes> for OpenTry { fn from(attrs: Attributes) -> Self { OpenTry(attrs) } } impl From<OpenTry> for IbcEvent { fn from(v: OpenTry) -> Self { IbcEvent::OpenTryConnection(v) } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct OpenAck(Attributes); impl OpenAck { pub fn attributes(&self) -> &Attributes { &self.0 } pub fn connection_id(&self) -> &Option<ConnectionId> { &self.0.connection_id } pub fn height(&self) -> Height { self.0.height } pub fn set_height(&mut self, height: Height) { self.0.height = height; } } impl From<Attributes> for OpenAck { fn from(attrs: Attributes) -> Self { OpenAck(attrs) } } impl From<OpenAck> for IbcEvent { fn from(v: OpenAck) -> Self { IbcEvent::OpenAckConnection(v) } } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct OpenConfirm(Attributes); impl OpenConfirm { pub fn attributes(&self) -> &Attributes { &self.0 } pub fn connection_id(&self) -> &Option<ConnectionId> { &self.0.connection_id } pub fn height(&self) -> Height { self.0.height } pub fn set_height(&mut self, height: Height) { self.0.height = height; } } impl From<Attributes> for OpenConfirm { fn from(attrs: Attributes) -> Self { OpenConfirm(attrs) } } impl From<OpenConfirm> for IbcEvent { fn from(v: OpenConfirm) -> Self { IbcEvent::OpenConfirmConnection(v) } }
28.247525
122
0.64143
081a756af69dd47d02a1ec26676c51025e7dd3b5
692
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:cci_borrow_lib.rs #![feature(managed_boxes)] extern crate cci_borrow_lib; use cci_borrow_lib::foo; pub fn main() { let p = @22u; let r = foo(p); println!("r={}", r); assert_eq!(r, 22u); }
28.833333
69
0.703757
1c51df054a45f7bd493dc6cae65ed1c76a1544ad
3,929
use pin_project::{pin_project, UnsafeUnpin}; # [pin (__private (UnsafeUnpin , project = EnumProj , project_ref = EnumProjRef))] enum Enum<T, U> { Struct { #[pin] pinned: T, unpinned: U, }, Tuple(#[pin] T, U), Unit, } #[allow(box_pointers)] #[allow(deprecated)] #[allow(explicit_outlives_requirements)] #[allow(single_use_lifetimes)] #[allow(unreachable_pub)] #[allow(clippy::unknown_clippy_lints)] #[allow(clippy::pattern_type_mismatch)] #[allow(clippy::redundant_pub_crate)] #[allow(clippy::type_repetition_in_bounds)] #[allow(dead_code)] #[allow(clippy::mut_mut)] enum EnumProj<'pin, T, U> where Enum<T, U>: 'pin, { Struct { pinned: ::pin_project::__private::Pin<&'pin mut (T)>, unpinned: &'pin mut (U), }, Tuple(::pin_project::__private::Pin<&'pin mut (T)>, &'pin mut (U)), Unit, } #[allow(box_pointers)] #[allow(deprecated)] #[allow(explicit_outlives_requirements)] #[allow(single_use_lifetimes)] #[allow(unreachable_pub)] #[allow(clippy::unknown_clippy_lints)] #[allow(clippy::pattern_type_mismatch)] #[allow(clippy::redundant_pub_crate)] #[allow(clippy::type_repetition_in_bounds)] #[allow(dead_code)] #[allow(clippy::ref_option_ref)] enum EnumProjRef<'pin, T, U> where Enum<T, U>: 'pin, { Struct { pinned: ::pin_project::__private::Pin<&'pin (T)>, unpinned: &'pin (U), }, Tuple(::pin_project::__private::Pin<&'pin (T)>, &'pin (U)), Unit, } #[allow(box_pointers)] #[allow(deprecated)] #[allow(explicit_outlives_requirements)] #[allow(single_use_lifetimes)] #[allow(unreachable_pub)] #[allow(clippy::unknown_clippy_lints)] #[allow(clippy::pattern_type_mismatch)] #[allow(clippy::redundant_pub_crate)] #[allow(clippy::type_repetition_in_bounds)] #[allow(clippy::semicolon_if_nothing_returned)] #[allow(clippy::use_self)] #[allow(clippy::used_underscore_binding)] const _: () = { impl<T, U> Enum<T, U> { fn project<'pin>( self: ::pin_project::__private::Pin<&'pin mut Self>, ) -> EnumProj<'pin, T, U> { unsafe { match self.get_unchecked_mut() { Self::Struct { pinned, unpinned } => EnumProj::Struct { pinned: ::pin_project::__private::Pin::new_unchecked(pinned), unpinned, }, Self::Tuple(_0, _1) => { EnumProj::Tuple(::pin_project::__private::Pin::new_unchecked(_0), _1) } Self::Unit => EnumProj::Unit, } } } #[allow(clippy::missing_const_for_fn)] fn project_ref<'pin>( self: ::pin_project::__private::Pin<&'pin Self>, ) -> EnumProjRef<'pin, T, U> { unsafe { match self.get_ref() { Self::Struct { pinned, unpinned } => EnumProjRef::Struct { pinned: ::pin_project::__private::Pin::new_unchecked(pinned), unpinned, }, Self::Tuple(_0, _1) => { EnumProjRef::Tuple(::pin_project::__private::Pin::new_unchecked(_0), _1) } Self::Unit => EnumProjRef::Unit, } } } } impl<'pin, T, U> ::pin_project::__private::Unpin for Enum<T, U> where ::pin_project::__private::Wrapper<'pin, Self>: ::pin_project::UnsafeUnpin { } trait EnumMustNotImplDrop {} #[allow(clippy::drop_bounds, drop_bounds)] impl<T: ::pin_project::__private::Drop> EnumMustNotImplDrop for T {} impl<T, U> EnumMustNotImplDrop for Enum<T, U> {} #[doc(hidden)] impl<T, U> ::pin_project::__private::PinnedDrop for Enum<T, U> { unsafe fn drop(self: ::pin_project::__private::Pin<&mut Self>) {} } }; unsafe impl<T: Unpin, U> UnsafeUnpin for Enum<T, U> {} fn main() {}
33.016807
96
0.580555
75e7e789f9cc24da9343ea48f1b1b703e7813c6e
11,764
//! Pokedex-related components. use std::collections::HashMap; use std::fmt::Debug; use std::iter; use std::sync::Arc; use pkmn::model::resource::Name; use pkmn::model::species::GenderRatio; use pkmn::model::LanguageName; use pkmn::model::PokedexName; use pkmn::model::Pokemon; use pkmn::model::Species; use pkmn::model::Type; use pkmn::model::TypeName; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; use crossterm::event::MouseButton; use crossterm::event::MouseEvent; use crossterm::event::MouseEventKind; use tui::layout::Constraint; use tui::layout::Direction; use tui::style::Modifier; use tui::style::Style; use tui::text::Span; use tui::text::Spans; use tui::text::Text; use tui::widgets::Paragraph; use crate::dex::Dex; use crate::ui::component::image::Png; use crate::ui::component::list::Listable; use crate::ui::component::list::PositionUpdate; use crate::ui::component::page::Page; use crate::ui::component::Component; use crate::ui::component::Event; use crate::ui::component::EventArgs; use crate::ui::component::LayoutHintArgs; use crate::ui::component::RenderArgs; use crate::ui::widgets::Spinner; /// A component comprising the main window of the Pokedex, which is essentially /// a wrapper over the `pdex://pokedex/<species>` pages #[derive(Clone, Debug)] pub struct PokedexDetail { pokedex: PokedexName, number: u32, contents: HashMap<u32, Page>, } impl PokedexDetail { pub fn new(pokedex: PokedexName, number: u32) -> Self { Self { pokedex, number, contents: HashMap::new(), } } } impl Component for PokedexDetail { fn process_event(&mut self, args: &mut EventArgs) { if let Event::Message(m) = &args.event { if let Some(update) = m.downcast_ref::<PositionUpdate<Pokedex>>() { self.number = update.index as u32 + 1; } } if let Some(page) = self.contents.get_mut(&self.number) { page.process_event(args) } } fn render(&mut self, args: &mut RenderArgs) { if let Some(page) = self.contents.get_mut(&self.number) { page.render(args); return; } let name = (|| { let pokedex = args.dex.pokedexes.get_named(self.pokedex)?; let entry = pokedex.entries.iter().find(|e| e.number == self.number)?; entry.species.name().map(String::from) })(); let name = match name { Some(n) => n, None => return, }; let mut page = Page::request( format!("pdex://pokemon/{}?pokedex={}", name, self.pokedex.to_str()), Arc::clone(args.url_handler), ) .hide_chrome(true); page.render(args); self.contents.insert(self.number, page); } fn wants_focus(&self) -> bool { true } } /// Displays the sprite of a Pokemon. // TODO: Make this display *all* available sprites, maybe with some kind of // scroller? #[derive(Clone, Debug)] pub struct PokedexSprite { name: String, png: Option<Png>, } impl PokedexSprite { pub fn new(name: String) -> Self { Self { name, png: None } } } impl Component for PokedexSprite { fn render(&mut self, args: &mut RenderArgs) { if let Some(png) = &mut self.png { png.render(args); return; } let blob = (|| { let pokemon = args.dex.pokemon.get(&self.name)?; pokemon .sprites //.other //.get("official-artwork")? .defaults .front_default .clone() })(); let blob = match blob { Some(b) => b, None => return, }; let mut png = Png::new(blob); png.render(args); self.png = Some(png); } } /// A component that displays basic Pokemon information, including name, /// number, genus, height, and weight. #[derive(Clone, Debug)] pub struct PokemonBasics { species: Arc<Species>, pokemon: Arc<Pokemon>, number: u32, } impl PokemonBasics { pub fn new( species: Arc<Species>, pokemon: Arc<Pokemon>, number: u32, ) -> Self { Self { species, pokemon, number, } } } impl Component for PokemonBasics { fn render(&mut self, args: &mut RenderArgs) { let name = self .species .localized_names .get(LanguageName::English) .unwrap_or("???"); let genus = self .species .genus .get(LanguageName::English) .unwrap_or("???"); let (feet, inches) = self.pokemon.height.feet_inches(); let pounds = self.pokemon.weight.pounds(); let gender_ratio = match self.species.gender_ratio { GenderRatio::AllMale => "All Males", GenderRatio::FewFemales => "7:1 M:F", GenderRatio::SomeFemales => "3:1 M:F", GenderRatio::Even => "1:1 M:F", GenderRatio::SomeMales => "1:3 M:F", GenderRatio::FewMales => "1:7 M:F", GenderRatio::AllFemale => "All Females", GenderRatio::Genderless => "Genderless", }; let text = Text::from(vec![ Spans(vec![Span::styled( format!("#{:03} {} - {}", self.number, name, genus), args.style_sheet.unfocused, )]), // TODO: user-controled units. Spans(vec![Span::styled( format!( "H: {}'{}\", W: {:.1} lb, {}", feet, inches, pounds, gender_ratio ), args.style_sheet.unfocused, )]), Spans(vec![Span::styled( format!( "Catch: {}, Exp: {}", self.species.capture_rate, self.pokemon.base_experience ), args.style_sheet.unfocused, )]), ]); Paragraph::new(text).render(args); } } /// A hyperlinked box that displays a given type, which redirects to /// `pdex://type/<type>`. #[derive(Clone, Debug)] pub struct TypeLink(pub TypeName); impl Component for TypeLink { fn wants_focus(&self) -> bool { true } fn process_event(&mut self, args: &mut EventArgs) { match args.event { Event::Key(KeyEvent { code: KeyCode::Enter, .. }) => { args.commands.claim(); args .commands .navigate_to(format!("pdex://type/{}", self.0.to_str())); } Event::Mouse(MouseEvent { kind: MouseEventKind::Up(MouseButton::Left), .. }) => { args.commands.claim(); args .commands .navigate_to(format!("pdex://type/{}", self.0.to_str())); } _ => {} } } fn render(&mut self, args: &mut RenderArgs) { let color = args.style_sheet.type_colors.get(self.0); let (style, chars) = if args.is_focused { let style = args .style_sheet .unfocused .patch(args.style_sheet.focused) .add_modifier(Modifier::BOLD); let chars = ["━", "┃", "┏", "┓", "┗", "┛"]; (style, chars) } else { let style = args.style_sheet.unfocused; let chars = ["─", "│", "┌", "┐", "└", "┘"]; (style, chars) }; let style = style.fg(color); let name = match args.dex.types.get_named(self.0) { Some(x) => { let name = x .localized_names .get(LanguageName::English) .unwrap_or("???"); Span::styled(format!(" {} ", name.to_uppercase()), style) } None => { Spinner::new(args.frame_number).style(style).into_spans().0[0].clone() } }; let width = name.width(); let text = Text::from(vec![ Spans::from(vec![ Span::styled(chars[2], style), Span::styled( iter::repeat(chars[0]).take(width).collect::<String>(), style, ), Span::styled(chars[3], style), ]), Spans::from(vec![ Span::styled(chars[1], style), name, Span::styled(chars[1], style), ]), Spans::from(vec![ Span::styled(chars[4], style), Span::styled( iter::repeat(chars[0]).take(width).collect::<String>(), style, ), Span::styled(chars[5], style), ]), ]); /*let left = Span::styled(if args.is_focused { "<" } else { " " }, style.bg(color)); let right = Span::styled(if args.is_focused { ">" } else { " " }, style.bg(color)); let top = iter::repeat('▄').take(name.width() + 2).collect::<String>(); let bottom = iter::repeat('▀').take(name.width() + 2).collect::<String>(); let text = Text::from(vec![ Spans::from(Span::styled(top, style.fg(color))), Spans::from(vec![left, name, right]), Spans::from(Span::styled(bottom, style.fg(color))), ]);*/ Paragraph::new(text).render(args) } fn layout_hint(&self, args: &LayoutHintArgs) -> Option<Constraint> { if args.direction == Direction::Vertical { return Some(Constraint::Length(3)); } let len = match args.dex.types.get_named(self.0) { Some(x) => { x.localized_names .get(LanguageName::English) .unwrap_or("???") .len() + 5 } None => 3, }; Some(Constraint::Length(len as u16)) } } /// A [`Listable`] that shows all pokemon belonging to a particular Pokedex. #[derive(Clone, Debug)] pub struct Pokedex(pub PokedexName); #[derive(Clone, Debug)] pub struct PokedexItem { number: u32, species: Arc<Species>, first_type: Arc<Type>, second_type: Option<Arc<Type>>, } impl Listable for Pokedex { type Item = PokedexItem; fn count(&mut self, dex: &Dex) -> Option<usize> { Some(dex.pokedexes.get_named(self.0)?.entries.len()) } fn get_item(&mut self, index: usize, dex: &Dex) -> Option<Self::Item> { // TODO: ummm this is quadratic. This should probably be a hashmap or vector // in `pkmn`. let number = index as u32 + 1; let pokedex = dex.pokedexes.get_named(self.0)?; let entry = pokedex.entries.iter().find(|e| e.number == number)?; let species = dex.species.get(entry.species.name()?)?; let default = &species.varieties.iter().find(|v| v.is_default)?.pokemon; let pokemon = dex.pokemon.get(default.name()?)?; let mut types = pokemon .types .iter() .filter_map(|ty| Some((ty.slot, ty.ty.variant()?))) .collect::<Vec<_>>(); types.sort_by_key(|&(i, ..)| i); let (first_type, second_type) = match &*types { &[(_, first)] => (dex.types.get_named(first)?, None), &[(_, first), (_, second)] => ( dex.types.get_named(first)?, Some(dex.types.get_named(second)?), ), _ => return None, }; Some(PokedexItem { number, species, first_type, second_type, }) } fn url_of(&self, _item: &Self::Item) -> Option<String> { None } fn format<'a>(&'a self, item: &'a Self::Item, args: &RenderArgs) -> Text<'a> { let name = item .species .localized_names .get(LanguageName::English) .unwrap_or("???"); let mut spans = Spans::from(vec![Span::raw(format!("#{:03} {:12} ", item.number, name))]); let first_type_name = item .first_type .localized_names .get(LanguageName::English) .unwrap_or("???") .chars() .take(3) .collect::<String>(); spans.0.push(Span::styled( first_type_name, Style::default().fg( args .style_sheet .type_colors .get(item.first_type.name.variant().unwrap_or(TypeName::Unknown)), ), )); if let Some(second_type) = &item.second_type { let second_type_name = second_type .localized_names .get(LanguageName::English) .unwrap_or("???") .chars() .take(3) .collect::<String>(); spans.0.push(Span::raw("·")); spans.0.push(Span::styled( second_type_name, Style::default().fg( args .style_sheet .type_colors .get(second_type.name.variant().unwrap_or(TypeName::Unknown)), ), )); } spans.into() } }
25.518438
80
0.57591
39467fe94027324cecd00f7127b262c2782be653
8,622
//! Abstractions for dealing with the main behaviors we need when //! dealing with container images, while remaining unconcerned about //! which underlying tool is actually performing those tasks. //! //! This allows us to swap out the `docker` CLI for `buildah` if we //! want to create containers as a non-root user, for instance. use crate::error::Result; use clap::{Arg, ArgMatches}; use habitat_core::fs::find_command; use std::{convert::TryFrom, path::{Path, PathBuf}, process::{Command, ExitStatus}, result::Result as StdResult, str::FromStr}; #[cfg(not(windows))] mod buildah; mod docker; #[derive(Debug, Fail)] enum EngineError { #[fail(display = "Container image build failed with exit code: {}", _0)] BuildFailed(ExitStatus), #[fail(display = "Could not find the container engine executable '{}' on the PATH", _0)] ExecutableNotFound(String), #[fail(display = "Could not determine container image ID for: {}", _0)] ImageIdNotFound(String), #[fail(display = "Removing local container images failed with exit code: {}", _0)] RemoveFailed(ExitStatus), #[fail(display = "Container image push failed with exit code: {}", _0)] PushFailed(ExitStatus), #[fail(display = "Unknown Container Engine '{}' was specified.", _0)] UnknownEngine(String), #[fail(display = "Cannot use `--engine=buildah` with `--multi-layer` due to https://github.com/containers/buildah/issues/2215. Please use `--engine=docker` or remove `--multi-layer`.")] BuildahIncompatibleWithMultiLayer, #[cfg(not(windows))] #[fail(display = "{}", _0)] EngineSpecificError(failure::Error), } /// Due to a bug in Buildah, any layers that we create in a /// multi-layer build won't get reused, which eliminates any benefit /// we might get from them. /// /// Until that bug is fixed, we'll prevent using Buildah to create /// multi-layer images, as the confusion arising from generating /// multiple layers but not being able to reuse any of them is /// something that's better to avoid. /// /// When https://github.com/containers/buildah/issues/2215 is fixed, /// we can update our Buildah dependency and remove this check. pub fn fail_if_buildah_and_multilayer(matches: &ArgMatches) -> Result<()> { if matches.value_of("ENGINE") == Some("buildah") && matches.is_present("MULTI_LAYER") { return Err(EngineError::BuildahIncompatibleWithMultiLayer.into()); } Ok(()) } /// Things that can build containers! #[derive(Clone, Copy, Debug)] enum EngineKind { Docker, #[cfg(not(windows))] Buildah, } impl FromStr for EngineKind { type Err = EngineError; fn from_str(s: &str) -> StdResult<Self, Self::Err> { match s { "docker" => Ok(EngineKind::Docker), #[cfg(not(windows))] "buildah" => Ok(EngineKind::Buildah), _ => Err(EngineError::UnknownEngine(s.to_string())), } } } /// Define the CLAP CLI argument for specifying a container build /// engine to use. #[rustfmt::skip] // otherwise the long_help formatting goes crazy pub fn cli_arg<'a, 'b>() -> Arg<'a, 'b> { let arg = Arg::with_name("ENGINE").value_name("ENGINE") .long("engine") .required(true) .env("HAB_PKG_EXPORT_CONTAINER_ENGINE") .takes_value(true) .multiple(false) .default_value("docker") .help("The name of the container creation engine to use."); // TODO (CM): Find a way to tie this more closely to the // Engine enum values. if cfg!(windows) { // Since there is effectively no choice of engine for // Windows, we hide the CLI option and don't document it // any further. arg.possible_values(&["docker"]).hidden(true) } else { arg.long_help( "Using the `docker` engine allows you to use Docker to create your container images. You must ensure that a Docker daemon is running on the host where this command is executed, and that the user executing the command has permission to access the Docker socket. Using the `buildah` engine allows you to create container images as an unprivileged user, and without having to use a Docker daemon. This is the recommended engine for use in CI systems and other environments where security is of particular concern. Please see https://buildah.io for more details. Both engines create equivalent container images. ", ) .possible_values(&["docker", "buildah"]) } } impl TryFrom<&ArgMatches<'_>> for Box<dyn Engine> { type Error = failure::Error; fn try_from(value: &ArgMatches) -> StdResult<Self, Self::Error> { let engine_kind = clap::value_t!(value, "ENGINE", EngineKind).expect("ENGINE is a required option"); match engine_kind { EngineKind::Docker => Ok(Box::new(docker::DockerEngine::new()?)), #[cfg(not(windows))] EngineKind::Buildah => Ok(Box::new(buildah::BuildahEngine::new()?)), } } } pub trait Engine { /// A command that takes a container image reference and returns /// the ID of that image on the first line of standard output. fn image_id_command(&self, image_reference: &str) -> Command; /// A command that removes the specified local container image; fn image_removal_command(&self, image_reference: &str) -> Command; /// A command that pushes the specified container image, using /// configuration stored in `config_dir`. // TODO (CM): accept repository URL information // TODO (CM): worth taking credential / repo information and // handling the config directory stuff internally? fn image_push_command(&self, image_reference: &str, config_dir: &Path) -> Command; fn build_command(&self, build_context: &Path, tags: &[String], memory: Option<&str>) -> Command; /// Retrieve the ID of the given image, which is expected to exist. fn image_id(&self, image_reference: &str) -> Result<String> { let mut cmd = self.image_id_command(image_reference); debug!("Running: {:?}", cmd); let output = cmd.output()?; let stdout = String::from_utf8_lossy(&output.stdout); match stdout.lines().next() { Some(id) => Ok(id.to_string()), None => Err(EngineError::ImageIdNotFound(image_reference.to_string()).into()), } } /// Delete the referenced image in the local image store. fn remove_image(&self, image_reference: &str) -> Result<()> { run(self.image_removal_command(image_reference), EngineError::RemoveFailed) } /// Pushes the specified container image to a remote repository, using /// configuration stored in `config_dir`. // TODO (CM): accept repository URL information // TODO (CM): worth taking credential / repo information and // handling the config directory stuff internally? fn push_image(&self, image_reference: &str, config_dir: &Path) -> Result<()> { run(self.image_push_command(image_reference, config_dir), EngineError::PushFailed) } /// Actually create the image. /// /// `build_context` will serve as the build context directory, and /// a suitable `Dockerfile` is expected to be present in it. The /// image will be tagged with each of `tags`. /// /// `memory` governs how much memory is provided to the build /// process. /// /// Returns the ID of the image that was built. fn build(&self, build_context: &Path, tags: &[String], memory: Option<&str>) -> Result<String> { run(self.build_command(build_context, tags, memory), EngineError::BuildFailed)?; let identifier = tags.first() .expect("There should always be at least one tag"); self.image_id(identifier) } } /// General helper function for actually executing all these commands. /// /// Not part of the trait because nobody need to be calling this from /// outside. fn run<F>(mut cmd: Command, err_fn: F) -> Result<()> where F: Fn(ExitStatus) -> EngineError { debug!("Running: {:?}", &cmd); let exit_status = cmd.spawn()?.wait()?; if !exit_status.success() { return Err(err_fn(exit_status).into()); } Ok(()) } fn resolve_engine_binary(binary_name: &str) -> StdResult<PathBuf, EngineError> { find_command(binary_name).ok_or_else(|| { EngineError::ExecutableNotFound(binary_name.to_string()) }) }
37.815789
189
0.648341
4b631b9fff709938f9737a93e2096a70fe986758
1,526
use crate::{ claim::Claim, crypto::PublicKey, id::AccountId, proto::{self, ToProto, Transaction::TransactionBody_oneof_data}, transaction::Transaction, Client, }; use failure::Error; use query_interface::{interfaces, vtable_for}; use std::any::Any; #[derive(Debug)] pub struct TransactionCryptoAddClaim { account: AccountId, hash: Vec<u8>, keys: Vec<PublicKey>, } interfaces!( TransactionCryptoAddClaim: dyn Any, dyn ToProto<TransactionBody_oneof_data> ); impl TransactionCryptoAddClaim { pub fn new(client: &Client, account: AccountId, hash: Vec<u8>) -> Transaction<Self> { Transaction::new( client, Self { account, hash, keys: Vec::new(), }, ) } } impl Transaction<TransactionCryptoAddClaim> { #[inline] pub fn key(&mut self, key: PublicKey) -> &mut Self { self.inner().keys.push(key); self } } impl ToProto<TransactionBody_oneof_data> for TransactionCryptoAddClaim { fn to_proto(&self) -> Result<TransactionBody_oneof_data, Error> { let mut data = proto::CryptoAddClaim::CryptoAddClaimTransactionBody::new(); data.set_accountID(self.account.to_proto()?); let claim = Claim { account: self.account, hash: self.hash.clone(), keys: self.keys.clone(), }; data.set_claim(claim.to_proto()?); Ok(TransactionBody_oneof_data::cryptoAddClaim(data)) } }
24.612903
89
0.616645
7231d3c3ba59d645af4e85b8f57ccb40f961823f
7,984
use sha2raw::{ Sha512, utils as sha2utils }; use std::marker::PhantomData; use anyhow::{Context, Result}; use generic_array::typenum::Unsigned; use log::*; use merkletree::store::{DiskStore, StoreConfig}; use storage_proofs_core::{ drgraph::Graph, hasher::Hasher, merkle::*, util::{data_at_node_offset, NODE_SIZE}, }; use super::super::{ cache::ParentCache, proof::LayerState, Labels, LabelsCache, StackedBucketGraph, }; #[allow(clippy::type_complexity)] pub fn create_labels_for_encoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>( graph: &StackedBucketGraph<Tree::Hasher>, parents_cache: &mut ParentCache, layers: usize, replica_id: T, config: StoreConfig, ) -> Result<(Labels<Tree>, Vec<LayerState>)> { info!("generate labels"); let layer_states = super::prepare_layers::<Tree>(graph, &config, layers); let layer_size = graph.size() * NODE_SIZE; // NOTE: this means we currently keep 2x sector size around, to improve speed. let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents for (layer, layer_state) in (1..=layers).zip(layer_states.iter()) { info!("generating layer: {}", layer); if layer_state.generated { info!("skipping layer {}, already generated", layer); // load the already generated layer into exp_labels super::read_layer(&layer_state.config, &mut exp_labels)?; continue; } parents_cache.reset()?; if layer == 1 { for node in 0..graph.size() { create_label( graph, Some(parents_cache), &replica_id, &mut layer_labels, layer, node, )?; } } else { for node in 0..graph.size() { create_label_exp( graph, Some(parents_cache), &replica_id, &exp_labels, &mut layer_labels, layer, node, )?; } } // Write the result to disk to avoid keeping it in memory all the time. let layer_config = &layer_state.config; info!(" storing labels on disk"); super::write_layer(&layer_labels, layer_config).context("failed to store labels")?; info!( " generated layer {} store with id {}", layer, layer_config.id ); info!(" setting exp parents"); std::mem::swap(&mut layer_labels, &mut exp_labels); } Ok(( Labels::<Tree> { labels: layer_states.iter().map(|s| s.config.clone()).collect(), _h: PhantomData, }, layer_states, )) } #[allow(clippy::type_complexity)] pub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>( graph: &StackedBucketGraph<Tree::Hasher>, parents_cache: &mut ParentCache, layers: usize, replica_id: T, config: StoreConfig, ) -> Result<LabelsCache<Tree>> { info!("generate labels"); // For now, we require it due to changes in encodings structure. let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> = Vec::with_capacity(layers); let layer_size = graph.size() * NODE_SIZE; // NOTE: this means we currently keep 2x sector size around, to improve speed. let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents for layer in 1..=layers { info!("generating layer: {}", layer); parents_cache.reset()?; if layer == 1 { for node in 0..graph.size() { create_label( graph, Some(parents_cache), &replica_id, &mut layer_labels, layer, node, )?; } } else { for node in 0..graph.size() { create_label_exp( graph, Some(parents_cache), &replica_id, &exp_labels, &mut layer_labels, layer, node, )?; } } // Write the result to disk to avoid keeping it in memory all the time. info!(" storing labels on disk"); super::write_layer(&layer_labels, &config)?; let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> = DiskStore::new_from_disk(graph.size(), Tree::Arity::to_usize(), &config)?; info!(" generated layer {} store with id {}", layer, config.id); info!(" setting exp parents"); std::mem::swap(&mut layer_labels, &mut exp_labels); // Track the layer specific store and StoreConfig for later retrieval. labels.push(layer_store); } assert_eq!( labels.len(), layers, "Invalid amount of layers encoded expected" ); Ok(LabelsCache::<Tree> { labels }) } pub fn create_label<H: Hasher, T: AsRef<[u8]>>( graph: &StackedBucketGraph<H>, cache: Option<&mut ParentCache>, replica_id: T, layer_labels: &mut [u8], layer_index: usize, node: usize, ) -> Result<()> { let mut hasher = Sha512::new(); let mut buffer = [0u8; 64]; let replica_id_expand = sha2utils::bits256_expand_to_bits512(AsRef::<[u8]>::as_ref(&replica_id)); buffer[..4].copy_from_slice(&(layer_index as u32).to_be_bytes()); buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes()); hasher.input(&[&replica_id_expand[..], &buffer[..]][..]); // hash parents for all non 0 nodes let hash = if node > 0 { // prefetch previous node, which is always a parent let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE]; prefetch!(prev.as_ptr() as *const i8); graph.copy_parents_data(node as u32, &*layer_labels, hasher, cache)? } else { hasher.finish() }; // store the newly generated key let start = data_at_node_offset(node); let end = start + NODE_SIZE; layer_labels[start..end].copy_from_slice(&hash[..32]); // strip last two bits, to ensure result is in Fr. layer_labels[end - 1] &= 0b0011_1111; Ok(()) } pub fn create_label_exp<H: Hasher, T: AsRef<[u8]>>( graph: &StackedBucketGraph<H>, cache: Option<&mut ParentCache>, replica_id: T, exp_parents_data: &[u8], layer_labels: &mut [u8], layer_index: usize, node: usize, ) -> Result<()> { let mut hasher = Sha512::new(); let mut buffer = [0u8; 64]; let replica_id_expand = sha2utils::bits256_expand_to_bits512(AsRef::<[u8]>::as_ref(&replica_id)); buffer[0..4].copy_from_slice(&(layer_index as u32).to_be_bytes()); buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes()); hasher.input(&[&replica_id_expand[..], &buffer[..]][..]); // hash parents for all non 0 nodes let hash = if node > 0 { // prefetch previous node, which is always a parent let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE]; prefetch!(prev.as_ptr() as *const i8); graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher, cache)? } else { hasher.finish() }; // store the newly generated key let start = data_at_node_offset(node); let end = start + NODE_SIZE; layer_labels[start..end].copy_from_slice(&hash[..32]); // strip last two bits, to ensure result is in Fr. layer_labels[end - 1] &= 0b0011_1111; Ok(()) }
32.193548
119
0.576528
1ed6b947b99b14f9b73723349566af11d90278d1
231
/* * Library Name: gunrs * * Information: * Gunrs is in work in progress. * * Notes: * - * */ pub struct Radisk{ } impl Radisk { pub fn r (){ } pub fn encode(){ } pub fn decode(){ } }
7.451613
33
0.467532
79d19fbb39f9f69f3800124a22302d503ef3d605
2,204
// hashmap2.rs // A basket of fruits in the form of a hash map is given. The key // represents the name of the fruit and the value represents how many // of that particular fruit is in the basket. You have to put *MORE // THAN 11* fruits in the basket. Three types of fruits - Apple (4), // Mango (2) and Lychee (5) are already given in the basket. You are // not allowed to insert any more of these fruits! // // Make me pass the tests! // // Execute the command `rustlings hint hashmap2` if you need // hints. use std::collections::HashMap; #[derive(Hash, PartialEq, Eq)] enum Fruit { Apple, Banana, Mango, Lychee, Pineapple, } fn fruit_basket(basket: &mut HashMap<Fruit, u32>) { let fruit_kinds = vec![ Fruit::Apple, Fruit::Banana, Fruit::Mango, Fruit::Lychee, Fruit::Pineapple, ]; for fruit in fruit_kinds { // TODO: Put new fruits if not already present. Note that you // are not allowed to put any type of fruit that's already // present! basket.insert(Fruit::Banana, 4); basket.insert(Fruit::Pineapple, 1); } } #[cfg(test)] mod tests { use super::*; fn get_fruit_basket() -> HashMap<Fruit, u32> { let mut basket = HashMap::<Fruit, u32>::new(); basket.insert(Fruit::Apple, 4); basket.insert(Fruit::Mango, 2); basket.insert(Fruit::Lychee, 5); basket } #[test] fn test_given_fruits_are_not_modified() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); assert_eq!(*basket.get(&Fruit::Apple).unwrap(), 4); assert_eq!(*basket.get(&Fruit::Mango).unwrap(), 2); assert_eq!(*basket.get(&Fruit::Lychee).unwrap(), 5); } #[test] fn at_least_five_types_of_fruits() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); let count_fruit_kinds = basket.len(); assert!(count_fruit_kinds >= 5); } #[test] fn greater_than_eleven_fruits() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); let count = basket.values().sum::<u32>(); assert!(count > 11); } }
26.878049
69
0.6098
1d806fa8d6eb4784f0fa9997cde595f0424647cf
8,883
#[macro_use] extern crate clap; extern crate dirs; #[macro_use] extern crate solana; use clap::{App, Arg, ArgMatches, SubCommand}; use solana::logger; use solana::signature::{read_keypair, KeypairUtil}; use solana::wallet::{gen_keypair_file, parse_command, process_command, WalletConfig, WalletError}; use std::error; use std::net::SocketAddr; pub fn parse_args(matches: &ArgMatches) -> Result<WalletConfig, Box<error::Error>> { let network = if let Some(addr) = matches.value_of("network") { addr.parse().or_else(|_| { Err(WalletError::BadParameter( "Invalid network location".to_string(), )) })? } else { socketaddr!("127.0.0.1:8001") }; let timeout = if let Some(secs) = matches.value_of("timeout") { Some(secs.to_string().parse().expect("integer")) } else { None }; let proxy = matches.value_of("proxy").map(|proxy| proxy.to_string()); let mut path = dirs::home_dir().expect("home directory"); let id_path = if matches.is_present("keypair") { matches.value_of("keypair").unwrap() } else { path.extend(&[".config", "solana", "id.json"]); if !path.exists() { gen_keypair_file(path.to_str().unwrap().to_string())?; println!("New keypair generated at: {:?}", path.to_str().unwrap()); } path.to_str().unwrap() }; let id = read_keypair(id_path).or_else(|err| { Err(WalletError::BadParameter(format!( "{}: Unable to open keypair file: {}", err, id_path ))) })?; let command = parse_command(id.pubkey(), &matches)?; Ok(WalletConfig { id, command, network, timeout, proxy, drone_port: None, }) } fn main() -> Result<(), Box<error::Error>> { logger::setup(); let matches = App::new("solana-wallet") .version(crate_version!()) .arg( Arg::with_name("network") .short("n") .long("network") .value_name("HOST:PORT") .takes_value(true) .help("Rendezvous with the network at this gossip entry point; defaults to 127.0.0.1:8001"), ).arg( Arg::with_name("keypair") .short("k") .long("keypair") .value_name("PATH") .takes_value(true) .help("/path/to/id.json"), ).arg( Arg::with_name("timeout") .long("timeout") .value_name("SECS") .takes_value(true) .help("Max seconds to wait to get necessary gossip from the network"), ).arg( Arg::with_name("proxy") .long("proxy") .takes_value(true) .value_name("URL") .help("Address of TLS proxy") .conflicts_with("rpc-port") ).subcommand(SubCommand::with_name("address").about("Get your public key")) .subcommand( SubCommand::with_name("airdrop") .about("Request a batch of tokens") .arg( Arg::with_name("tokens") .index(1) .value_name("NUM") .takes_value(true) .required(true) .help("The number of tokens to request"), ), ).subcommand(SubCommand::with_name("balance").about("Get your balance")) .subcommand( SubCommand::with_name("cancel") .about("Cancel a transfer") .arg( Arg::with_name("process-id") .index(1) .value_name("PROCESS_ID") .takes_value(true) .required(true) .help("The process id of the transfer to cancel"), ), ).subcommand( SubCommand::with_name("confirm") .about("Confirm transaction by signature") .arg( Arg::with_name("signature") .index(1) .value_name("SIGNATURE") .takes_value(true) .required(true) .help("The transaction signature to confirm"), ), ).subcommand( SubCommand::with_name("deploy") .about("Deploy a program") .arg( Arg::with_name("program-location") .index(1) .value_name("PATH") .takes_value(true) .required(true) .help("/path/to/program.o"), ) // TODO: Add "loader" argument; current default is bpf_loader ).subcommand( SubCommand::with_name("get-transaction-count") .about("Get current transaction count") ).subcommand( SubCommand::with_name("pay") .about("Send a payment") .arg( Arg::with_name("to") .index(1) .value_name("PUBKEY") .takes_value(true) .required(true) .help("The pubkey of recipient"), ).arg( Arg::with_name("tokens") .index(2) .value_name("NUM") .takes_value(true) .required(true) .help("The number of tokens to send"), ).arg( Arg::with_name("timestamp") .long("after") .value_name("DATETIME") .takes_value(true) .help("A timestamp after which transaction will execute"), ).arg( Arg::with_name("timestamp-pubkey") .long("require-timestamp-from") .value_name("PUBKEY") .takes_value(true) .requires("timestamp") .help("Require timestamp from this third party"), ).arg( Arg::with_name("witness") .long("require-signature-from") .value_name("PUBKEY") .takes_value(true) .multiple(true) .use_delimiter(true) .help("Any third party signatures required to unlock the tokens"), ).arg( Arg::with_name("cancelable") .long("cancelable") .takes_value(false), ), ).subcommand( SubCommand::with_name("send-signature") .about("Send a signature to authorize a transfer") .arg( Arg::with_name("to") .index(1) .value_name("PUBKEY") .takes_value(true) .required(true) .help("The pubkey of recipient"), ).arg( Arg::with_name("process-id") .index(2) .value_name("PROCESS_ID") .takes_value(true) .required(true) .help("The process id of the transfer to authorize") ) ).subcommand( SubCommand::with_name("send-timestamp") .about("Send a timestamp to unlock a transfer") .arg( Arg::with_name("to") .index(1) .value_name("PUBKEY") .takes_value(true) .required(true) .help("The pubkey of recipient"), ).arg( Arg::with_name("process-id") .index(2) .value_name("PROCESS_ID") .takes_value(true) .required(true) .help("The process id of the transfer to unlock") ).arg( Arg::with_name("datetime") .long("date") .value_name("DATETIME") .takes_value(true) .help("Optional arbitrary timestamp to apply") ) ).get_matches(); let config = parse_args(&matches)?; let result = process_command(&config)?; println!("{}", result); Ok(()) }
37.639831
108
0.441968
0af57bbe24f2474c75ad7866fba741629eb48a5a
1,556
use crate::entities::traits::user::UserStore; use crate::entities::models::{Folder, User}; use crate::entities::error::DataStoreError; use crate::entities::builders::{Builder, FolderBuilder, UserBuilder}; pub struct UserStoreMock; impl UserStoreMock { pub fn new() -> Self { Self } } impl UserStore for UserStoreMock { fn all(&self) -> Result<Vec<User>, DataStoreError> { let users = vec![ factory!(User), factory!(User), factory!(User), factory!(User), factory!(User) ]; Ok(users) } fn find_by_user_id(&self, user_id: i32) -> Result<User, DataStoreError> { let mut user = factory!(User); user.set_id(user_id); Ok(user) } fn save(&self, user: &User) -> Result<User, DataStoreError> { Ok(user.clone()) } fn update(&self, user: &User) -> Result<User, DataStoreError> { Ok(user.clone()) } fn delete(&self, user: &User) -> Result<User, DataStoreError> { Ok(user.clone()) } fn update_password(&self, user: &User) -> Result<User, DataStoreError> { Ok(user.clone()) } fn folders(&self, user: &User) -> Result<Vec<Folder>, DataStoreError> { let folders = vec![ factory!(Folder, user.id(), None), factory!(Folder, user.id(), None), factory!(Folder, user.id(), None), factory!(Folder, user.id(), None), factory!(Folder, user.id(), None), ]; Ok(folders) } }
24.698413
77
0.554627
09e74a9db64c42d7f1ce16a9d051d6fe5bf3125d
52,347
//! Process window events. use std::borrow::Cow; use std::cmp::{max, min}; use std::collections::{HashMap, VecDeque}; use std::error::Error; use std::fmt::Debug; #[cfg(not(any(target_os = "macos", windows)))] use std::fs; use std::path::PathBuf; use std::time::{Duration, Instant}; use std::{env, f32, mem}; use glutin::dpi::PhysicalSize; use glutin::event::{ElementState, Event as GlutinEvent, ModifiersState, MouseButton, WindowEvent}; use glutin::event_loop::{ControlFlow, EventLoop, EventLoopProxy, EventLoopWindowTarget}; use glutin::platform::run_return::EventLoopExtRunReturn; #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] use glutin::platform::unix::EventLoopWindowTargetExtUnix; use glutin::window::WindowId; use log::{error, info}; #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] use wayland_client::{Display as WaylandDisplay, EventQueue}; use crossfont::{self, Size}; use alacritty_terminal::config::LOG_TARGET_CONFIG; use alacritty_terminal::event::{Event as TerminalEvent, EventListener, Notify}; use alacritty_terminal::event_loop::Notifier; use alacritty_terminal::grid::{Dimensions, Scroll}; use alacritty_terminal::index::{Boundary, Column, Direction, Line, Point, Side}; use alacritty_terminal::selection::{Selection, SelectionType}; use alacritty_terminal::term::search::{Match, RegexSearch}; use alacritty_terminal::term::{ClipboardType, SizeInfo, Term, TermMode}; #[cfg(not(windows))] use alacritty_terminal::tty; use crate::cli::Options as CliOptions; use crate::clipboard::Clipboard; use crate::config::ui_config::{HintAction, HintInternalAction}; use crate::config::{self, Config}; use crate::daemon::start_daemon; use crate::display::hint::HintMatch; use crate::display::window::Window; use crate::display::{self, Display}; use crate::input::{self, ActionContext as _, FONT_SIZE_STEP}; #[cfg(target_os = "macos")] use crate::macos; use crate::message_bar::{Message, MessageBuffer}; use crate::scheduler::{Scheduler, TimerId, Topic}; use crate::window_context::WindowContext; /// Duration after the last user input until an unlimited search is performed. pub const TYPING_SEARCH_DELAY: Duration = Duration::from_millis(500); /// Maximum number of lines for the blocking search while still typing the search regex. const MAX_SEARCH_WHILE_TYPING: Option<usize> = Some(1000); /// Maximum number of search terms stored in the history. const MAX_SEARCH_HISTORY_SIZE: usize = 255; /// Alacritty events. #[derive(Debug, Clone)] pub struct Event { /// Limit event to a specific window. window_id: Option<WindowId>, /// Event payload. payload: EventType, } impl Event { pub fn new<I: Into<Option<WindowId>>>(payload: EventType, window_id: I) -> Self { Self { window_id: window_id.into(), payload } } } impl From<Event> for GlutinEvent<'_, Event> { fn from(event: Event) -> Self { GlutinEvent::UserEvent(event) } } /// Alacritty events. #[derive(Debug, Clone)] pub enum EventType { DprChanged(f64, (u32, u32)), Terminal(TerminalEvent), ConfigReload(PathBuf), Message(Message), Scroll(Scroll), CreateWindow, BlinkCursor, SearchNext, } impl From<TerminalEvent> for EventType { fn from(event: TerminalEvent) -> Self { Self::Terminal(event) } } /// Regex search state. pub struct SearchState { /// Search direction. pub direction: Direction, /// Current position in the search history. pub history_index: Option<usize>, /// Change in display offset since the beginning of the search. display_offset_delta: i32, /// Search origin in viewport coordinates relative to original display offset. origin: Point, /// Focused match during active search. focused_match: Option<Match>, /// Search regex and history. /// /// During an active search, the first element is the user's current input. /// /// While going through history, the [`SearchState::history_index`] will point to the element /// in history which is currently being previewed. history: VecDeque<String>, /// Compiled search automatons. dfas: Option<RegexSearch>, } impl SearchState { /// Search regex text if a search is active. pub fn regex(&self) -> Option<&String> { self.history_index.and_then(|index| self.history.get(index)) } /// Direction of the search from the search origin. pub fn direction(&self) -> Direction { self.direction } /// Focused match during vi-less search. pub fn focused_match(&self) -> Option<&Match> { self.focused_match.as_ref() } /// Active search dfas. pub fn dfas(&self) -> Option<&RegexSearch> { self.dfas.as_ref() } /// Search regex text if a search is active. fn regex_mut(&mut self) -> Option<&mut String> { self.history_index.and_then(move |index| self.history.get_mut(index)) } } impl Default for SearchState { fn default() -> Self { Self { direction: Direction::Right, display_offset_delta: Default::default(), focused_match: Default::default(), history_index: Default::default(), history: Default::default(), origin: Default::default(), dfas: Default::default(), } } } pub struct ActionContext<'a, N, T> { pub notifier: &'a mut N, pub terminal: &'a mut Term<T>, pub clipboard: &'a mut Clipboard, pub mouse: &'a mut Mouse, pub received_count: &'a mut usize, pub suppress_chars: &'a mut bool, pub modifiers: &'a mut ModifiersState, pub display: &'a mut Display, pub message_buffer: &'a mut MessageBuffer, pub config: &'a mut Config, pub event_loop: &'a EventLoopWindowTarget<Event>, pub event_proxy: &'a EventLoopProxy<Event>, pub scheduler: &'a mut Scheduler, pub search_state: &'a mut SearchState, pub font_size: &'a mut Size, pub dirty: &'a mut bool, } impl<'a, N: Notify + 'a, T: EventListener> input::ActionContext<T> for ActionContext<'a, N, T> { #[inline] fn write_to_pty<B: Into<Cow<'static, [u8]>>>(&self, val: B) { self.notifier.notify(val); } /// Request a redraw. #[inline] fn mark_dirty(&mut self) { *self.dirty = true; } #[inline] fn size_info(&self) -> SizeInfo { self.display.size_info } fn scroll(&mut self, scroll: Scroll) { let old_offset = self.terminal.grid().display_offset() as i32; self.terminal.scroll_display(scroll); // Keep track of manual display offset changes during search. if self.search_active() { let display_offset = self.terminal.grid().display_offset(); self.search_state.display_offset_delta += old_offset - display_offset as i32; } // Update selection. if self.terminal.mode().contains(TermMode::VI) && self.terminal.selection.as_ref().map_or(true, |s| !s.is_empty()) { self.update_selection(self.terminal.vi_mode_cursor.point, Side::Right); } else if self.mouse.left_button_state == ElementState::Pressed || self.mouse.right_button_state == ElementState::Pressed { let display_offset = self.terminal.grid().display_offset(); let point = self.mouse.point(&self.size_info(), display_offset); self.update_selection(point, self.mouse.cell_side); } self.copy_selection(ClipboardType::Selection); *self.dirty = true; } // Copy text selection. fn copy_selection(&mut self, ty: ClipboardType) { let text = match self.terminal.selection_to_string().filter(|s| !s.is_empty()) { Some(text) => text, None => return, }; if ty == ClipboardType::Selection && self.config.selection.save_to_clipboard { self.clipboard.store(ClipboardType::Clipboard, text.clone()); } self.clipboard.store(ty, text); } fn selection_is_empty(&self) -> bool { self.terminal.selection.as_ref().map(Selection::is_empty).unwrap_or(true) } fn clear_selection(&mut self) { self.terminal.selection = None; *self.dirty = true; } fn update_selection(&mut self, mut point: Point, side: Side) { let mut selection = match self.terminal.selection.take() { Some(selection) => selection, None => return, }; // Treat motion over message bar like motion over the last line. point.line = min(point.line, self.terminal.bottommost_line()); // Update selection. selection.update(point, side); // Move vi cursor and expand selection. if self.terminal.mode().contains(TermMode::VI) && !self.search_active() { self.terminal.vi_mode_cursor.point = point; selection.include_all(); } self.terminal.selection = Some(selection); *self.dirty = true; } fn start_selection(&mut self, ty: SelectionType, point: Point, side: Side) { self.terminal.selection = Some(Selection::new(ty, point, side)); *self.dirty = true; self.copy_selection(ClipboardType::Selection); } fn toggle_selection(&mut self, ty: SelectionType, point: Point, side: Side) { match &mut self.terminal.selection { Some(selection) if selection.ty == ty && !selection.is_empty() => { self.clear_selection(); }, Some(selection) if !selection.is_empty() => { selection.ty = ty; *self.dirty = true; self.copy_selection(ClipboardType::Selection); }, _ => self.start_selection(ty, point, side), } } #[inline] fn mouse_mode(&self) -> bool { self.terminal.mode().intersects(TermMode::MOUSE_MODE) && !self.terminal.mode().contains(TermMode::VI) } #[inline] fn mouse_mut(&mut self) -> &mut Mouse { self.mouse } #[inline] fn mouse(&self) -> &Mouse { self.mouse } #[inline] fn received_count(&mut self) -> &mut usize { &mut self.received_count } #[inline] fn suppress_chars(&mut self) -> &mut bool { &mut self.suppress_chars } #[inline] fn modifiers(&mut self) -> &mut ModifiersState { &mut self.modifiers } #[inline] fn window(&mut self) -> &mut Window { &mut self.display.window } #[inline] fn display(&mut self) -> &mut Display { &mut self.display } #[inline] fn terminal(&self) -> &Term<T> { self.terminal } #[inline] fn terminal_mut(&mut self) -> &mut Term<T> { self.terminal } fn spawn_new_instance(&mut self) { let mut env_args = env::args(); let alacritty = env_args.next().unwrap(); #[cfg(unix)] let mut args = { // Use working directory of controlling process, or fallback to initial shell. let mut pid = unsafe { libc::tcgetpgrp(tty::master_fd()) }; if pid < 0 { pid = tty::child_pid(); } #[cfg(not(any(target_os = "macos", target_os = "freebsd")))] let link_path = format!("/proc/{}/cwd", pid); #[cfg(target_os = "freebsd")] let link_path = format!("/compat/linux/proc/{}/cwd", pid); #[cfg(not(target_os = "macos"))] let cwd = fs::read_link(link_path); #[cfg(target_os = "macos")] let cwd = macos::proc::cwd(pid); // Add the current working directory as parameter. cwd.map(|path| vec!["--working-directory".into(), path]).unwrap_or_default() }; #[cfg(not(unix))] let mut args: Vec<PathBuf> = Vec::new(); let working_directory_set = !args.is_empty(); // Reuse the arguments passed to Alacritty for the new instance. while let Some(arg) = env_args.next() { // Drop working directory from existing parameters. if working_directory_set && arg == "--working-directory" { let _ = env_args.next(); continue; } args.push(arg.into()); } start_daemon(&alacritty, &args); } fn create_new_window(&mut self) { let _ = self.event_proxy.send_event(Event::new(EventType::CreateWindow, None)); } fn change_font_size(&mut self, delta: f32) { *self.font_size = max(*self.font_size + delta, Size::new(FONT_SIZE_STEP)); let font = self.config.ui_config.font.clone().with_size(*self.font_size); self.display.pending_update.set_font(font); *self.dirty = true; } fn reset_font_size(&mut self) { *self.font_size = self.config.ui_config.font.size(); self.display.pending_update.set_font(self.config.ui_config.font.clone()); *self.dirty = true; } #[inline] fn pop_message(&mut self) { if !self.message_buffer.is_empty() { self.display.pending_update.dirty = true; self.message_buffer.pop(); *self.dirty = true; } } #[inline] fn start_search(&mut self, direction: Direction) { // Only create new history entry if the previous regex wasn't empty. if self.search_state.history.get(0).map_or(true, |regex| !regex.is_empty()) { self.search_state.history.push_front(String::new()); self.search_state.history.truncate(MAX_SEARCH_HISTORY_SIZE); } self.search_state.history_index = Some(0); self.search_state.direction = direction; self.search_state.focused_match = None; // Store original search position as origin and reset location. if self.terminal.mode().contains(TermMode::VI) { self.search_state.origin = self.terminal.vi_mode_cursor.point; self.search_state.display_offset_delta = 0; // Adjust origin for content moving upward on search start. if self.terminal.grid().cursor.point.line + 1 == self.terminal.screen_lines() { self.search_state.origin.line -= 1; } } else { let viewport_top = Line(-(self.terminal.grid().display_offset() as i32)) - 1; let viewport_bottom = viewport_top + self.terminal.bottommost_line(); let last_column = self.terminal.last_column(); self.search_state.origin = match direction { Direction::Right => Point::new(viewport_top, Column(0)), Direction::Left => Point::new(viewport_bottom, last_column), }; } self.display.pending_update.dirty = true; *self.dirty = true; } #[inline] fn confirm_search(&mut self) { // Just cancel search when not in vi mode. if !self.terminal.mode().contains(TermMode::VI) { self.cancel_search(); return; } // Force unlimited search if the previous one was interrupted. let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id()); if self.scheduler.scheduled(timer_id) { self.goto_match(None); } self.exit_search(); } #[inline] fn cancel_search(&mut self) { if self.terminal.mode().contains(TermMode::VI) { // Recover pre-search state in vi mode. self.search_reset_state(); } else if let Some(focused_match) = &self.search_state.focused_match { // Create a selection for the focused match. let start = *focused_match.start(); let end = *focused_match.end(); self.start_selection(SelectionType::Simple, start, Side::Left); self.update_selection(end, Side::Right); self.copy_selection(ClipboardType::Selection); } self.search_state.dfas = None; self.exit_search(); } #[inline] fn search_input(&mut self, c: char) { match self.search_state.history_index { Some(0) => (), // When currently in history, replace active regex with history on change. Some(index) => { self.search_state.history[0] = self.search_state.history[index].clone(); self.search_state.history_index = Some(0); }, None => return, } let regex = &mut self.search_state.history[0]; match c { // Handle backspace/ctrl+h. '\x08' | '\x7f' => { let _ = regex.pop(); }, // Add ascii and unicode text. ' '..='~' | '\u{a0}'..='\u{10ffff}' => regex.push(c), // Ignore non-printable characters. _ => return, } if !self.terminal.mode().contains(TermMode::VI) { // Clear selection so we do not obstruct any matches. self.terminal.selection = None; } self.update_search(); } #[inline] fn search_pop_word(&mut self) { if let Some(regex) = self.search_state.regex_mut() { *regex = regex.trim_end().to_owned(); regex.truncate(regex.rfind(' ').map(|i| i + 1).unwrap_or(0)); self.update_search(); } } /// Go to the previous regex in the search history. #[inline] fn search_history_previous(&mut self) { let index = match &mut self.search_state.history_index { None => return, Some(index) if *index + 1 >= self.search_state.history.len() => return, Some(index) => index, }; *index += 1; self.update_search(); } /// Go to the previous regex in the search history. #[inline] fn search_history_next(&mut self) { let index = match &mut self.search_state.history_index { Some(0) | None => return, Some(index) => index, }; *index -= 1; self.update_search(); } #[inline] fn advance_search_origin(&mut self, direction: Direction) { // Use focused match as new search origin if available. if let Some(focused_match) = &self.search_state.focused_match { let new_origin = match direction { Direction::Right => focused_match.end().add(self.terminal, Boundary::None, 1), Direction::Left => focused_match.start().sub(self.terminal, Boundary::None, 1), }; self.terminal.scroll_to_point(new_origin); self.search_state.display_offset_delta = 0; self.search_state.origin = new_origin; } // Search for the next match using the supplied direction. let search_direction = mem::replace(&mut self.search_state.direction, direction); self.goto_match(None); self.search_state.direction = search_direction; // If we found a match, we set the search origin right in front of it to make sure that // after modifications to the regex the search is started without moving the focused match // around. let focused_match = match &self.search_state.focused_match { Some(focused_match) => focused_match, None => return, }; // Set new origin to the left/right of the match, depending on search direction. let new_origin = match self.search_state.direction { Direction::Right => *focused_match.start(), Direction::Left => *focused_match.end(), }; // Store the search origin with display offset by checking how far we need to scroll to it. let old_display_offset = self.terminal.grid().display_offset() as i32; self.terminal.scroll_to_point(new_origin); let new_display_offset = self.terminal.grid().display_offset() as i32; self.search_state.display_offset_delta = new_display_offset - old_display_offset; // Store origin and scroll back to the match. self.terminal.scroll_display(Scroll::Delta(-self.search_state.display_offset_delta)); self.search_state.origin = new_origin; } /// Find the next search match. fn search_next(&mut self, origin: Point, direction: Direction, side: Side) -> Option<Match> { self.search_state .dfas .as_ref() .and_then(|dfas| self.terminal.search_next(dfas, origin, direction, side, None)) } #[inline] fn search_direction(&self) -> Direction { self.search_state.direction } #[inline] fn search_active(&self) -> bool { self.search_state.history_index.is_some() } /// Handle keyboard typing start. /// /// This will temporarily disable some features like terminal cursor blinking or the mouse /// cursor. /// /// All features are re-enabled again automatically. #[inline] fn on_typing_start(&mut self) { // Disable cursor blinking. let timer_id = TimerId::new(Topic::BlinkCursor, self.display.window.id()); if let Some(timer) = self.scheduler.unschedule(timer_id) { let interval = Duration::from_millis(self.config.cursor.blink_interval()); self.scheduler.schedule(timer.event, interval, true, timer.id); self.display.cursor_hidden = false; *self.dirty = true; } // Hide mouse cursor. if self.config.ui_config.mouse.hide_when_typing { self.display.window.set_mouse_visible(false); } } /// Process a new character for keyboard hints. fn hint_input(&mut self, c: char) { if let Some(hint) = self.display.hint_state.keyboard_input(self.terminal, c) { self.mouse.block_hint_launcher = false; self.trigger_hint(&hint); } *self.dirty = true; } /// Trigger a hint action. fn trigger_hint(&mut self, hint: &HintMatch) { if self.mouse.block_hint_launcher { return; } match &hint.action { // Launch an external program. HintAction::Command(command) => { let text = self.terminal.bounds_to_string(*hint.bounds.start(), *hint.bounds.end()); let mut args = command.args().to_vec(); args.push(text); start_daemon(command.program(), &args); }, // Copy the text to the clipboard. HintAction::Action(HintInternalAction::Copy) => { let text = self.terminal.bounds_to_string(*hint.bounds.start(), *hint.bounds.end()); self.clipboard.store(ClipboardType::Clipboard, text); }, // Write the text to the PTY/search. HintAction::Action(HintInternalAction::Paste) => { let text = self.terminal.bounds_to_string(*hint.bounds.start(), *hint.bounds.end()); self.paste(&text); }, // Select the text. HintAction::Action(HintInternalAction::Select) => { self.start_selection(SelectionType::Simple, *hint.bounds.start(), Side::Left); self.update_selection(*hint.bounds.end(), Side::Right); self.copy_selection(ClipboardType::Selection); }, // Move the vi mode cursor. HintAction::Action(HintInternalAction::MoveViModeCursor) => { // Enter vi mode if we're not in it already. if !self.terminal.mode().contains(TermMode::VI) { self.terminal.toggle_vi_mode(); } self.terminal.vi_goto_point(*hint.bounds.start()); }, } } /// Expand the selection to the current mouse cursor position. #[inline] fn expand_selection(&mut self) { let selection_type = match self.mouse().click_state { ClickState::Click => { if self.modifiers().ctrl() { SelectionType::Block } else { SelectionType::Simple } }, ClickState::DoubleClick => SelectionType::Semantic, ClickState::TripleClick => SelectionType::Lines, ClickState::None => return, }; // Load mouse point, treating message bar and padding as the closest cell. let display_offset = self.terminal().grid().display_offset(); let point = self.mouse().point(&self.size_info(), display_offset); let cell_side = self.mouse().cell_side; let selection = match &mut self.terminal_mut().selection { Some(selection) => selection, None => return, }; selection.ty = selection_type; self.update_selection(point, cell_side); // Move vi mode cursor to mouse click position. if self.terminal().mode().contains(TermMode::VI) && !self.search_active() { self.terminal_mut().vi_mode_cursor.point = point; } } /// Paste a text into the terminal. fn paste(&mut self, text: &str) { if self.search_active() { for c in text.chars() { self.search_input(c); } } else if self.terminal().mode().contains(TermMode::BRACKETED_PASTE) { self.write_to_pty(&b"\x1b[200~"[..]); self.write_to_pty(text.replace("\x1b", "").into_bytes()); self.write_to_pty(&b"\x1b[201~"[..]); } else { // In non-bracketed (ie: normal) mode, terminal applications cannot distinguish // pasted data from keystrokes. // In theory, we should construct the keystrokes needed to produce the data we are // pasting... since that's neither practical nor sensible (and probably an impossible // task to solve in a general way), we'll just replace line breaks (windows and unix // style) with a single carriage return (\r, which is what the Enter key produces). self.write_to_pty(text.replace("\r\n", "\r").replace("\n", "\r").into_bytes()); } } /// Toggle the vi mode status. #[inline] fn toggle_vi_mode(&mut self) { if !self.terminal.mode().contains(TermMode::VI) { self.clear_selection(); } self.cancel_search(); self.terminal.toggle_vi_mode(); *self.dirty = true; } fn message(&self) -> Option<&Message> { self.message_buffer.message() } fn config(&self) -> &Config { self.config } fn event_loop(&self) -> &EventLoopWindowTarget<Event> { self.event_loop } fn clipboard_mut(&mut self) -> &mut Clipboard { self.clipboard } fn scheduler_mut(&mut self) -> &mut Scheduler { self.scheduler } } impl<'a, N: Notify + 'a, T: EventListener> ActionContext<'a, N, T> { fn update_search(&mut self) { let regex = match self.search_state.regex() { Some(regex) => regex, None => return, }; // Hide cursor while typing into the search bar. if self.config.ui_config.mouse.hide_when_typing { self.display.window.set_mouse_visible(false); } if regex.is_empty() { // Stop search if there's nothing to search for. self.search_reset_state(); self.search_state.dfas = None; } else { // Create search dfas for the new regex string. self.search_state.dfas = RegexSearch::new(regex).ok(); // Update search highlighting. self.goto_match(MAX_SEARCH_WHILE_TYPING); } *self.dirty = true; } /// Reset terminal to the state before search was started. fn search_reset_state(&mut self) { // Unschedule pending timers. let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id()); self.scheduler.unschedule(timer_id); // Clear focused match. self.search_state.focused_match = None; // The viewport reset logic is only needed for vi mode, since without it our origin is // always at the current display offset instead of at the vi cursor position which we need // to recover to. if !self.terminal.mode().contains(TermMode::VI) { return; } // Reset display offset and cursor position. self.terminal.scroll_display(Scroll::Delta(self.search_state.display_offset_delta)); self.search_state.display_offset_delta = 0; self.terminal.vi_mode_cursor.point = self.search_state.origin.grid_clamp(self.terminal, Boundary::Grid); *self.dirty = true; } /// Jump to the first regex match from the search origin. fn goto_match(&mut self, mut limit: Option<usize>) { let dfas = match &self.search_state.dfas { Some(dfas) => dfas, None => return, }; // Limit search only when enough lines are available to run into the limit. limit = limit.filter(|&limit| limit <= self.terminal.total_lines()); // Jump to the next match. let direction = self.search_state.direction; let clamped_origin = self.search_state.origin.grid_clamp(self.terminal, Boundary::Grid); match self.terminal.search_next(dfas, clamped_origin, direction, Side::Left, limit) { Some(regex_match) => { let old_offset = self.terminal.grid().display_offset() as i32; if self.terminal.mode().contains(TermMode::VI) { // Move vi cursor to the start of the match. self.terminal.vi_goto_point(*regex_match.start()); } else { // Select the match when vi mode is not active. self.terminal.scroll_to_point(*regex_match.start()); } // Update the focused match. self.search_state.focused_match = Some(regex_match); // Store number of lines the viewport had to be moved. let display_offset = self.terminal.grid().display_offset(); self.search_state.display_offset_delta += old_offset - display_offset as i32; // Since we found a result, we require no delayed re-search. let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id()); self.scheduler.unschedule(timer_id); }, // Reset viewport only when we know there is no match, to prevent unnecessary jumping. None if limit.is_none() => self.search_reset_state(), None => { // Schedule delayed search if we ran into our search limit. let timer_id = TimerId::new(Topic::DelayedSearch, self.display.window.id()); if !self.scheduler.scheduled(timer_id) { let event = Event::new(EventType::SearchNext, self.display.window.id()); self.scheduler.schedule(event, TYPING_SEARCH_DELAY, false, timer_id); } // Clear focused match. self.search_state.focused_match = None; }, } *self.dirty = true; } /// Cleanup the search state. fn exit_search(&mut self) { self.display.pending_update.dirty = true; self.search_state.history_index = None; *self.dirty = true; // Clear focused match. self.search_state.focused_match = None; } /// Update the cursor blinking state. fn update_cursor_blinking(&mut self) { // Get config cursor style. let mut cursor_style = self.config.cursor.style; if self.terminal.mode().contains(TermMode::VI) { cursor_style = self.config.cursor.vi_mode_style.unwrap_or(cursor_style); }; // Check terminal cursor style. let terminal_blinking = self.terminal.cursor_style().blinking; let blinking = cursor_style.blinking_override().unwrap_or(terminal_blinking); // Update cursor blinking state. let timer_id = TimerId::new(Topic::BlinkCursor, self.display.window.id()); self.scheduler.unschedule(timer_id); if blinking && self.terminal.is_focused { let event = Event::new(EventType::BlinkCursor, self.display.window.id()); let interval = Duration::from_millis(self.config.cursor.blink_interval()); self.scheduler.schedule(event, interval, true, timer_id); } else { self.display.cursor_hidden = false; *self.dirty = true; } } } #[derive(Debug, Eq, PartialEq)] pub enum ClickState { None, Click, DoubleClick, TripleClick, } /// State of the mouse. #[derive(Debug)] pub struct Mouse { pub left_button_state: ElementState, pub middle_button_state: ElementState, pub right_button_state: ElementState, pub last_click_timestamp: Instant, pub last_click_button: MouseButton, pub click_state: ClickState, pub scroll_px: f64, pub cell_side: Side, pub lines_scrolled: f32, pub block_hint_launcher: bool, pub hint_highlight_dirty: bool, pub inside_text_area: bool, pub x: usize, pub y: usize, } impl Default for Mouse { fn default() -> Mouse { Mouse { last_click_timestamp: Instant::now(), last_click_button: MouseButton::Left, left_button_state: ElementState::Released, middle_button_state: ElementState::Released, right_button_state: ElementState::Released, click_state: ClickState::None, cell_side: Side::Left, hint_highlight_dirty: Default::default(), block_hint_launcher: Default::default(), inside_text_area: Default::default(), lines_scrolled: Default::default(), scroll_px: Default::default(), x: Default::default(), y: Default::default(), } } } impl Mouse { /// Convert mouse pixel coordinates to viewport point. /// /// If the coordinates are outside of the terminal grid, like positions inside the padding, the /// coordinates will be clamped to the closest grid coordinates. #[inline] pub fn point(&self, size: &SizeInfo, display_offset: usize) -> Point { let col = self.x.saturating_sub(size.padding_x() as usize) / (size.cell_width() as usize); let col = min(Column(col), size.last_column()); let line = self.y.saturating_sub(size.padding_y() as usize) / (size.cell_height() as usize); let line = min(line, size.bottommost_line().0 as usize); display::viewport_to_point(display_offset, Point::new(line, col)) } } impl input::Processor<EventProxy, ActionContext<'_, Notifier, EventProxy>> { /// Handle events from glutin. /// /// Doesn't take self mutably due to borrow checking. pub fn handle_event(&mut self, event: GlutinEvent<'_, Event>) { match event { GlutinEvent::UserEvent(Event { payload, .. }) => match payload { EventType::DprChanged(scale_factor, (width, height)) => { let display_update_pending = &mut self.ctx.display.pending_update; // Push current font to update its DPR. let font = self.ctx.config.ui_config.font.clone(); display_update_pending.set_font(font.with_size(*self.ctx.font_size)); // Resize to event's dimensions, since no resize event is emitted on Wayland. display_update_pending.set_dimensions(PhysicalSize::new(width, height)); self.ctx.window().dpr = scale_factor; *self.ctx.dirty = true; }, EventType::SearchNext => self.ctx.goto_match(None), EventType::Scroll(scroll) => self.ctx.scroll(scroll), EventType::BlinkCursor => { self.ctx.display.cursor_hidden ^= true; *self.ctx.dirty = true; }, EventType::Message(message) => { self.ctx.message_buffer.push(message); self.ctx.display.pending_update.dirty = true; *self.ctx.dirty = true; }, EventType::Terminal(event) => match event { TerminalEvent::Title(title) => { let ui_config = &self.ctx.config.ui_config; if ui_config.window.dynamic_title { self.ctx.window().set_title(&title); } }, TerminalEvent::ResetTitle => { let ui_config = &self.ctx.config.ui_config; if ui_config.window.dynamic_title { self.ctx.display.window.set_title(&ui_config.window.title); } }, TerminalEvent::Wakeup => *self.ctx.dirty = true, TerminalEvent::Bell => { // Set window urgency. if self.ctx.terminal.mode().contains(TermMode::URGENCY_HINTS) { let focused = self.ctx.terminal.is_focused; self.ctx.window().set_urgent(!focused); } // Ring visual bell. self.ctx.display.visual_bell.ring(); // Execute bell command. if let Some(bell_command) = &self.ctx.config.ui_config.bell.command { start_daemon(bell_command.program(), bell_command.args()); } }, TerminalEvent::ClipboardStore(clipboard_type, content) => { self.ctx.clipboard.store(clipboard_type, content); }, TerminalEvent::ClipboardLoad(clipboard_type, format) => { let text = format(self.ctx.clipboard.load(clipboard_type).as_str()); self.ctx.write_to_pty(text.into_bytes()); }, TerminalEvent::ColorRequest(index, format) => { let text = format(self.ctx.display.colors[index]); self.ctx.write_to_pty(text.into_bytes()); }, TerminalEvent::PtyWrite(text) => self.ctx.write_to_pty(text.into_bytes()), TerminalEvent::MouseCursorDirty => self.reset_mouse_cursor(), TerminalEvent::Exit => (), TerminalEvent::CursorBlinkingChange => self.ctx.update_cursor_blinking(), }, EventType::ConfigReload(_) | EventType::CreateWindow => (), }, GlutinEvent::RedrawRequested(_) => *self.ctx.dirty = true, GlutinEvent::WindowEvent { event, .. } => { match event { WindowEvent::CloseRequested => self.ctx.terminal.exit(), WindowEvent::Resized(size) => { // Minimizing the window sends a Resize event with zero width and // height. But there's no need to ever actually resize to this. // ConPTY has issues when resizing down to zero size and back. #[cfg(windows)] if size.width == 0 && size.height == 0 { return; } self.ctx.display.pending_update.set_dimensions(size); *self.ctx.dirty = true; }, WindowEvent::KeyboardInput { input, is_synthetic: false, .. } => { self.key_input(input); }, WindowEvent::ModifiersChanged(modifiers) => self.modifiers_input(modifiers), WindowEvent::ReceivedCharacter(c) => self.received_char(c), WindowEvent::MouseInput { state, button, .. } => { self.ctx.window().set_mouse_visible(true); self.mouse_input(state, button); *self.ctx.dirty = true; }, WindowEvent::CursorMoved { position, .. } => { self.ctx.window().set_mouse_visible(true); self.mouse_moved(position); }, WindowEvent::MouseWheel { delta, phase, .. } => { self.ctx.window().set_mouse_visible(true); self.mouse_wheel_input(delta, phase); }, WindowEvent::Focused(is_focused) => { self.ctx.terminal.is_focused = is_focused; *self.ctx.dirty = true; if is_focused { self.ctx.window().set_urgent(false); } else { self.ctx.window().set_mouse_visible(true); } self.ctx.update_cursor_blinking(); self.on_focus_change(is_focused); }, WindowEvent::DroppedFile(path) => { let path: String = path.to_string_lossy().into(); self.ctx.write_to_pty((path + " ").into_bytes()); }, WindowEvent::CursorLeft { .. } => { self.ctx.mouse.inside_text_area = false; if self.ctx.display().highlighted_hint.is_some() { *self.ctx.dirty = true; } }, WindowEvent::KeyboardInput { is_synthetic: true, .. } | WindowEvent::TouchpadPressure { .. } | WindowEvent::ScaleFactorChanged { .. } | WindowEvent::CursorEntered { .. } | WindowEvent::AxisMotion { .. } | WindowEvent::HoveredFileCancelled | WindowEvent::Destroyed | WindowEvent::ThemeChanged(_) | WindowEvent::HoveredFile(_) | WindowEvent::Touch(_) | WindowEvent::Moved(_) => (), } }, GlutinEvent::Suspended { .. } | GlutinEvent::NewEvents { .. } | GlutinEvent::DeviceEvent { .. } | GlutinEvent::MainEventsCleared | GlutinEvent::RedrawEventsCleared | GlutinEvent::Resumed | GlutinEvent::LoopDestroyed => (), } } } /// The event processor. /// /// Stores some state from received events and dispatches actions when they are /// triggered. pub struct Processor { #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] wayland_event_queue: Option<EventQueue>, windows: HashMap<WindowId, WindowContext>, cli_options: CliOptions, config: Config, } impl Processor { /// Create a new event processor. /// /// Takes a writer which is expected to be hooked up to the write end of a PTY. pub fn new( config: Config, cli_options: CliOptions, _event_loop: &EventLoop<Event>, ) -> Processor { // Initialize Wayland event queue, to handle Wayland callbacks. #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] let wayland_event_queue = _event_loop.wayland_display().map(|display| { let display = unsafe { WaylandDisplay::from_external_display(display as _) }; display.create_event_queue() }); Processor { windows: HashMap::new(), cli_options, config, #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] wayland_event_queue, } } /// Create a new terminal window. pub fn create_window( &mut self, event_loop: &EventLoopWindowTarget<Event>, proxy: EventLoopProxy<Event>, ) -> Result<(), Box<dyn Error>> { let window_context = WindowContext::new( &self.config, event_loop, proxy, #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] self.wayland_event_queue.as_ref(), )?; self.windows.insert(window_context.id(), window_context); Ok(()) } /// Run the event loop. pub fn run(&mut self, mut event_loop: EventLoop<Event>) { let proxy = event_loop.create_proxy(); let mut scheduler = Scheduler::new(proxy.clone()); // NOTE: Since this takes a pointer to the winit event loop, it MUST be dropped first. #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] let mut clipboard = unsafe { Clipboard::new(event_loop.wayland_display()) }; #[cfg(any(not(feature = "wayland"), target_os = "macos", windows))] let mut clipboard = Clipboard::new(); event_loop.run_return(|event, event_loop, control_flow| { if self.config.ui_config.debug.print_events { info!("glutin event: {:?}", event); } // Ignore all events we do not care about. if Self::skip_event(&event) { return; } match event { // Check for shutdown. GlutinEvent::UserEvent(Event { window_id: Some(window_id), payload: EventType::Terminal(TerminalEvent::Exit), }) => { // Remove the closed terminal. let window_context = match self.windows.remove(&window_id) { Some(window_context) => window_context, None => return, }; // Unschedule pending events. scheduler.unschedule_window(window_context.id()); // Shutdown if no more terminals are open. if self.windows.is_empty() { // Write ref tests of last window to disk. if self.config.ui_config.debug.ref_test { window_context.write_ref_test_results(); } *control_flow = ControlFlow::Exit; } }, // Process all pending events. GlutinEvent::RedrawEventsCleared => { *control_flow = match scheduler.update() { Some(instant) => ControlFlow::WaitUntil(instant), None => ControlFlow::Wait, }; // Check for pending frame callbacks on Wayland. #[cfg(all(feature = "wayland", not(any(target_os = "macos", windows))))] if let Some(wayland_event_queue) = self.wayland_event_queue.as_mut() { wayland_event_queue .dispatch_pending(&mut (), |_, _, _| {}) .expect("failed to dispatch wayland event queue"); } // Dispatch event to all windows. for window_context in self.windows.values_mut() { window_context.handle_event( event_loop, &proxy, &mut self.config, &mut clipboard, &mut scheduler, GlutinEvent::RedrawEventsCleared, ); } }, // Process config update. GlutinEvent::UserEvent(Event { payload: EventType::ConfigReload(path), .. }) => { // Clear config logs from message bar for all terminals. for window_context in self.windows.values_mut() { if !window_context.message_buffer.is_empty() { window_context.message_buffer.remove_target(LOG_TARGET_CONFIG); window_context.display.pending_update.dirty = true; } } // Load config and update each terminal. if let Ok(config) = config::reload(&path, &self.cli_options) { let old_config = mem::replace(&mut self.config, config); for window_context in self.windows.values_mut() { window_context.update_config(&old_config, &self.config); } } }, // Create a new terminal window. GlutinEvent::UserEvent(Event { payload: EventType::CreateWindow, .. }) => { if let Err(err) = self.create_window(event_loop, proxy.clone()) { error!("Could not open window: {:?}", err); } }, // Process events affecting all windows. GlutinEvent::UserEvent(event @ Event { window_id: None, .. }) => { for window_context in self.windows.values_mut() { window_context.handle_event( event_loop, &proxy, &mut self.config, &mut clipboard, &mut scheduler, event.clone().into(), ); } }, // Process window-specific events. GlutinEvent::WindowEvent { window_id, .. } | GlutinEvent::UserEvent(Event { window_id: Some(window_id), .. }) | GlutinEvent::RedrawRequested(window_id) => { if let Some(window_context) = self.windows.get_mut(&window_id) { window_context.handle_event( event_loop, &proxy, &mut self.config, &mut clipboard, &mut scheduler, event, ); } }, _ => (), } }); } /// Check if an event is irrelevant and can be skipped. fn skip_event(event: &GlutinEvent<'_, Event>) -> bool { match event { GlutinEvent::WindowEvent { event, .. } => matches!( event, WindowEvent::KeyboardInput { is_synthetic: true, .. } | WindowEvent::TouchpadPressure { .. } | WindowEvent::CursorEntered { .. } | WindowEvent::AxisMotion { .. } | WindowEvent::HoveredFileCancelled | WindowEvent::Destroyed | WindowEvent::HoveredFile(_) | WindowEvent::Touch(_) | WindowEvent::Moved(_) ), GlutinEvent::Suspended { .. } | GlutinEvent::NewEvents { .. } | GlutinEvent::MainEventsCleared | GlutinEvent::LoopDestroyed => true, _ => false, } } } #[derive(Debug, Clone)] pub struct EventProxy { proxy: EventLoopProxy<Event>, window_id: WindowId, } impl EventProxy { pub fn new(proxy: EventLoopProxy<Event>, window_id: WindowId) -> Self { Self { proxy, window_id } } /// Send an event to the event loop. pub fn send_event(&self, event: EventType) { let _ = self.proxy.send_event(Event::new(event, self.window_id)); } } impl EventListener for EventProxy { fn send_event(&self, event: TerminalEvent) { let _ = self.proxy.send_event(Event::new(event.into(), self.window_id)); } }
37.686825
100
0.562668
90c0bc870ad5029976067f1359dd70353d5a0199
1,269
use std::env::current_dir; use std::fs::create_dir_all; use cosmwasm_schema::{export_schema, export_schema_with_title, remove_schemas, schema_for}; pub use tg4::{AdminResponse, MemberListResponse, MemberResponse, TotalPointsResponse}; pub use tg4_engagement::msg::{ DelegatedResponse, ExecuteMsg, InstantiateMsg, PreauthResponse, QueryMsg, RewardsResponse, SudoMsg, }; fn main() { let mut out_dir = current_dir().unwrap(); out_dir.push("schema"); create_dir_all(&out_dir).unwrap(); remove_schemas(&out_dir).unwrap(); export_schema_with_title(&schema_for!(InstantiateMsg), &out_dir, "InstantiateMsg"); export_schema_with_title(&schema_for!(ExecuteMsg), &out_dir, "ExecuteMsg"); export_schema_with_title(&schema_for!(QueryMsg), &out_dir, "QueryMsg"); export_schema(&schema_for!(AdminResponse), &out_dir); export_schema(&schema_for!(MemberListResponse), &out_dir); export_schema(&schema_for!(MemberResponse), &out_dir); export_schema(&schema_for!(TotalPointsResponse), &out_dir); export_schema(&schema_for!(PreauthResponse), &out_dir); export_schema(&schema_for!(SudoMsg), &out_dir); export_schema(&schema_for!(RewardsResponse), &out_dir); export_schema(&schema_for!(DelegatedResponse), &out_dir); }
42.3
94
0.752561
cc5bda2d4104f5488505e720ae891d59857a8dda
1,090
use crate::metadata::{Metadata, MetadataState}; /// The result of requesting type metadata. /// /// This is generally the return value of a function. /// /// For performance and ABI matching across Swift/C++, functions returning /// this type must use `SWIFT_CC` so that the components are returned as /// separate values. /// /// Note that Rust currently does not support the Swift calling convention /// (`swiftcall`), so care must be taken to ensure such functions return this /// value correctly. #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct MetadataResponse { /// The requested metadata. pub value: *const Metadata, /// The current state of the metadata returned. /// /// Always use this instead of trying to inspect the metadata directly to /// see if it satisfies the request. An incomplete metadata may be getting /// initialized concurrently. But this can generally be ignored if the /// metadata request was for abstract metadata or if the request is /// blocking. pub state: MetadataState, }
37.586207
78
0.712844
ebc1ed5587fe30a9b8f6675e6ac51b79f1a9725f
3,615
#[warn( clippy::cast_precision_loss, clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::cast_possible_wrap )] #[allow(clippy::no_effect, clippy::unnecessary_operation)] fn main() { // Test clippy::cast_precision_loss let x0 = 1i32; x0 as f32; let x1 = 1i64; x1 as f32; x1 as f64; let x2 = 1u32; x2 as f32; let x3 = 1u64; x3 as f32; x3 as f64; // Test clippy::cast_possible_truncation 1f32 as i32; 1f32 as u32; 1f64 as f32; 1i32 as i8; 1i32 as u8; 1f64 as isize; 1f64 as usize; // Test clippy::cast_possible_wrap 1u8 as i8; 1u16 as i16; 1u32 as i32; 1u64 as i64; 1usize as isize; // Test clippy::cast_sign_loss 1i32 as u32; -1i32 as u32; 1isize as usize; -1isize as usize; 0i8 as u8; i8::MAX as u8; i16::MAX as u16; i32::MAX as u32; i64::MAX as u64; i128::MAX as u128; (-1i8).abs() as u8; (-1i16).abs() as u16; (-1i32).abs() as u32; (-1i64).abs() as u64; (-1isize).abs() as usize; (-1i8).checked_abs().unwrap() as u8; (-1i16).checked_abs().unwrap() as u16; (-1i32).checked_abs().unwrap() as u32; (-1i64).checked_abs().unwrap() as u64; (-1isize).checked_abs().unwrap() as usize; (-1i8).rem_euclid(1i8) as u8; (-1i8).rem_euclid(1i8) as u16; (-1i16).rem_euclid(1i16) as u16; (-1i16).rem_euclid(1i16) as u32; (-1i32).rem_euclid(1i32) as u32; (-1i32).rem_euclid(1i32) as u64; (-1i64).rem_euclid(1i64) as u64; (-1i64).rem_euclid(1i64) as u128; (-1isize).rem_euclid(1isize) as usize; (1i8).rem_euclid(-1i8) as u8; (1i8).rem_euclid(-1i8) as u16; (1i16).rem_euclid(-1i16) as u16; (1i16).rem_euclid(-1i16) as u32; (1i32).rem_euclid(-1i32) as u32; (1i32).rem_euclid(-1i32) as u64; (1i64).rem_euclid(-1i64) as u64; (1i64).rem_euclid(-1i64) as u128; (1isize).rem_euclid(-1isize) as usize; (-1i8).checked_rem_euclid(1i8).unwrap() as u8; (-1i8).checked_rem_euclid(1i8).unwrap() as u16; (-1i16).checked_rem_euclid(1i16).unwrap() as u16; (-1i16).checked_rem_euclid(1i16).unwrap() as u32; (-1i32).checked_rem_euclid(1i32).unwrap() as u32; (-1i32).checked_rem_euclid(1i32).unwrap() as u64; (-1i64).checked_rem_euclid(1i64).unwrap() as u64; (-1i64).checked_rem_euclid(1i64).unwrap() as u128; (-1isize).checked_rem_euclid(1isize).unwrap() as usize; (1i8).checked_rem_euclid(-1i8).unwrap() as u8; (1i8).checked_rem_euclid(-1i8).unwrap() as u16; (1i16).checked_rem_euclid(-1i16).unwrap() as u16; (1i16).checked_rem_euclid(-1i16).unwrap() as u32; (1i32).checked_rem_euclid(-1i32).unwrap() as u32; (1i32).checked_rem_euclid(-1i32).unwrap() as u64; (1i64).checked_rem_euclid(-1i64).unwrap() as u64; (1i64).checked_rem_euclid(-1i64).unwrap() as u128; (1isize).checked_rem_euclid(-1isize).unwrap() as usize; // no lint for `cast_possible_truncation` // with `signum` method call (see issue #5395) let x: i64 = 5; let _ = x.signum() as i32; let s = x.signum(); let _ = s as i32; // Test for signed min (-99999999999i64).min(1) as i8; // should be linted because signed // Test for various operations that remove enough bits for the result to fit (999999u64 & 1) as u8; (999999u64 % 15) as u8; (999999u64 / 0x1_0000_0000_0000) as u16; ({ 999999u64 >> 56 }) as u8; ({ let x = 999999u64; x.min(1) }) as u8; 999999u64.clamp(0, 255) as u8; 999999u64.clamp(0, 256) as u8; // should still be linted }
30.378151
80
0.6213
674b4be641715f409c00c9942fe953082c49f53c
10,737
// Copyright 2021 TiKV Project Authors. Licensed under Apache-2.0. // #[PerformanceCriticalPath] use crate::{util, RocksEngine, RocksWriteBatch}; use engine_traits::{ Error, Iterable, KvEngine, MiscExt, Mutable, Peekable, RaftEngine, RaftEngineReadOnly, RaftLogBatch, RaftLogGCTask, Result, SyncMutable, WriteBatch, WriteBatchExt, WriteOptions, CF_DEFAULT, RAFT_LOG_MULTI_GET_CNT, }; use kvproto::raft_serverpb::RaftLocalState; use protobuf::Message; use raft::eraftpb::Entry; use tikv_util::{box_err, box_try}; impl RaftEngineReadOnly for RocksEngine { fn get_raft_state(&self, raft_group_id: u64) -> Result<Option<RaftLocalState>> { let key = keys::raft_state_key(raft_group_id); self.get_msg_cf(CF_DEFAULT, &key) } fn get_entry(&self, raft_group_id: u64, index: u64) -> Result<Option<Entry>> { let key = keys::raft_log_key(raft_group_id, index); self.get_msg_cf(CF_DEFAULT, &key) } fn fetch_entries_to( &self, region_id: u64, low: u64, high: u64, max_size: Option<usize>, buf: &mut Vec<Entry>, ) -> Result<usize> { let (max_size, mut total_size, mut count) = (max_size.unwrap_or(usize::MAX), 0, 0); if high - low <= RAFT_LOG_MULTI_GET_CNT { // If election happens in inactive regions, they will just try to fetch one empty log. for i in low..high { if total_size > 0 && total_size >= max_size { break; } let key = keys::raft_log_key(region_id, i); match self.get_value(&key) { Ok(None) => return Err(Error::EntriesCompacted), Ok(Some(v)) => { let mut entry = Entry::default(); entry.merge_from_bytes(&v)?; assert_eq!(entry.get_index(), i); buf.push(entry); total_size += v.len(); count += 1; } Err(e) => return Err(box_err!(e)), } } return Ok(count); } let (mut check_compacted, mut compacted, mut next_index) = (true, false, low); let start_key = keys::raft_log_key(region_id, low); let end_key = keys::raft_log_key(region_id, high); self.scan( &start_key, &end_key, true, // fill_cache |_, value| { let mut entry = Entry::default(); entry.merge_from_bytes(value)?; if check_compacted { if entry.get_index() != low { compacted = true; // May meet gap or has been compacted. return Ok(false); } check_compacted = false; } else { assert_eq!(entry.get_index(), next_index); } next_index += 1; buf.push(entry); total_size += value.len(); count += 1; Ok(total_size < max_size) }, )?; // If we get the correct number of entries, returns. // Or the total size almost exceeds max_size, returns. if count == (high - low) as usize || total_size >= max_size { return Ok(count); } if compacted { return Err(Error::EntriesCompacted); } // Here means we don't fetch enough entries. Err(Error::EntriesUnavailable) } fn get_all_entries_to(&self, region_id: u64, buf: &mut Vec<Entry>) -> Result<()> { let start_key = keys::raft_log_key(region_id, 0); let end_key = keys::raft_log_key(region_id, u64::MAX); self.scan( &start_key, &end_key, false, // fill_cache |_, value| { let mut entry = Entry::default(); entry.merge_from_bytes(value)?; buf.push(entry); Ok(true) }, )?; Ok(()) } } impl RocksEngine { fn gc_impl( &self, raft_group_id: u64, mut from: u64, to: u64, raft_wb: &mut RocksWriteBatch, ) -> Result<usize> { if from == 0 { let start_key = keys::raft_log_key(raft_group_id, 0); let prefix = keys::raft_log_prefix(raft_group_id); match self.seek(&start_key)? { Some((k, _)) if k.starts_with(&prefix) => from = box_try!(keys::raft_log_index(&k)), // No need to gc. _ => return Ok(0), } } if from >= to { return Ok(0); } for idx in from..to { let key = keys::raft_log_key(raft_group_id, idx); raft_wb.delete(&key)?; if raft_wb.count() >= Self::WRITE_BATCH_MAX_KEYS * 2 { raft_wb.write()?; raft_wb.clear(); } } Ok((to - from) as usize) } } // FIXME: RaftEngine should probably be implemented generically // for all KvEngines, but is currently implemented separately for // every engine. impl RaftEngine for RocksEngine { type LogBatch = RocksWriteBatch; fn log_batch(&self, capacity: usize) -> Self::LogBatch { RocksWriteBatch::with_capacity(self.as_inner().clone(), capacity) } fn sync(&self) -> Result<()> { self.sync_wal() } fn consume(&self, batch: &mut Self::LogBatch, sync_log: bool) -> Result<usize> { let bytes = batch.data_size(); let mut opts = WriteOptions::default(); opts.set_sync(sync_log); batch.write_opt(&opts)?; batch.clear(); Ok(bytes) } fn consume_and_shrink( &self, batch: &mut Self::LogBatch, sync_log: bool, max_capacity: usize, shrink_to: usize, ) -> Result<usize> { let data_size = self.consume(batch, sync_log)?; if data_size > max_capacity { *batch = self.write_batch_with_cap(shrink_to); } Ok(data_size) } fn clean( &self, raft_group_id: u64, mut first_index: u64, state: &RaftLocalState, batch: &mut Self::LogBatch, ) -> Result<()> { batch.delete(&keys::raft_state_key(raft_group_id))?; if first_index == 0 { let seek_key = keys::raft_log_key(raft_group_id, 0); let prefix = keys::raft_log_prefix(raft_group_id); fail::fail_point!("engine_rocks_raft_engine_clean_seek", |_| Ok(())); if let Some((key, _)) = self.seek(&seek_key)? { if !key.starts_with(&prefix) { // No raft logs for the raft group. return Ok(()); } first_index = match keys::raft_log_index(&key) { Ok(index) => index, Err(_) => return Ok(()), }; } else { return Ok(()); } } if first_index <= state.last_index { for index in first_index..=state.last_index { let key = keys::raft_log_key(raft_group_id, index); batch.delete(&key)?; } } Ok(()) } fn append(&self, raft_group_id: u64, entries: Vec<Entry>) -> Result<usize> { let mut wb = RocksWriteBatch::new(self.as_inner().clone()); let buf = Vec::with_capacity(1024); wb.append_impl(raft_group_id, &entries, buf)?; self.consume(&mut wb, false) } fn put_raft_state(&self, raft_group_id: u64, state: &RaftLocalState) -> Result<()> { self.put_msg(&keys::raft_state_key(raft_group_id), state) } fn batch_gc(&self, groups: Vec<RaftLogGCTask>) -> Result<usize> { let mut total = 0; let mut raft_wb = self.write_batch_with_cap(4 * 1024); for task in groups { total += self.gc_impl(task.raft_group_id, task.from, task.to, &mut raft_wb)?; } // TODO: disable WAL here. if !WriteBatch::is_empty(&raft_wb) { raft_wb.write()?; } Ok(total) } fn gc(&self, raft_group_id: u64, from: u64, to: u64) -> Result<usize> { let mut raft_wb = self.write_batch_with_cap(1024); let total = self.gc_impl(raft_group_id, from, to, &mut raft_wb)?; // TODO: disable WAL here. if !WriteBatch::is_empty(&raft_wb) { raft_wb.write()?; } Ok(total) } fn purge_expired_files(&self) -> Result<Vec<u64>> { Ok(vec![]) } fn has_builtin_entry_cache(&self) -> bool { false } fn flush_metrics(&self, instance: &str) { KvEngine::flush_metrics(self, instance) } fn reset_statistics(&self) { KvEngine::reset_statistics(self) } fn dump_stats(&self) -> Result<String> { MiscExt::dump_stats(self) } fn get_engine_size(&self) -> Result<u64> { let handle = util::get_cf_handle(self.as_inner(), CF_DEFAULT)?; let used_size = util::get_engine_cf_used_size(self.as_inner(), handle); Ok(used_size) } } impl RaftLogBatch for RocksWriteBatch { fn append(&mut self, raft_group_id: u64, entries: Vec<Entry>) -> Result<()> { if let Some(max_size) = entries.iter().map(|e| e.compute_size()).max() { let ser_buf = Vec::with_capacity(max_size as usize); return self.append_impl(raft_group_id, &entries, ser_buf); } Ok(()) } fn cut_logs(&mut self, raft_group_id: u64, from: u64, to: u64) { for index in from..to { let key = keys::raft_log_key(raft_group_id, index); self.delete(&key).unwrap(); } } fn put_raft_state(&mut self, raft_group_id: u64, state: &RaftLocalState) -> Result<()> { self.put_msg(&keys::raft_state_key(raft_group_id), state) } fn persist_size(&self) -> usize { self.data_size() } fn is_empty(&self) -> bool { WriteBatch::is_empty(self) } fn merge(&mut self, src: Self) { WriteBatch::<RocksEngine>::merge(self, src); } } impl RocksWriteBatch { fn append_impl( &mut self, raft_group_id: u64, entries: &[Entry], mut ser_buf: Vec<u8>, ) -> Result<()> { for entry in entries { let key = keys::raft_log_key(raft_group_id, entry.get_index()); ser_buf.clear(); entry.write_to_vec(&mut ser_buf).unwrap(); self.put(&key, &ser_buf)?; } Ok(()) } }
31.955357
100
0.533482
23517107d1f7e7725785ba3f9434129d25ffa6cf
1,160
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. #![recursion_limit = "200"] #[macro_use] extern crate slog_global; #[macro_use] extern crate prometheus; #[macro_use] extern crate lazy_static; #[macro_use] extern crate quick_error; #[macro_use] extern crate serde_derive; #[allow(unused_extern_crates)] extern crate tikv_alloc; use std::sync::Arc; pub mod rocks; pub use crate::rocks::{CFHandle, DBIterator, Range, ReadOptions, WriteOptions, DB}; mod errors; pub use crate::errors::*; mod iterable; pub use crate::iterable::*; pub const DATA_KEY_PREFIX_LEN: usize = 1; #[derive(Clone, Debug)] pub struct Engines { pub kv: Arc<DB>, pub raft: Arc<DB>, pub shared_block_cache: bool, } impl Engines { pub fn new(kv_engine: Arc<DB>, raft_engine: Arc<DB>, shared_block_cache: bool) -> Engines { Engines { kv: kv_engine, raft: raft_engine, shared_block_cache, } } pub fn sync_kv(&self) -> Result<()> { self.kv.sync_wal().map_err(Error::RocksDb) } pub fn sync_raft(&self) -> Result<()> { self.raft.sync_wal().map_err(Error::RocksDb) } }
21.886792
95
0.663793
1e50610ea61096566abb4d429a0e3293e1616902
16,442
// Miniscript // Written in 2020 by rust-miniscript developers // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the CC0 Public Domain Dedication // along with this software. // If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. // //! # Segwit Output Descriptors //! //! Implementation of Segwit Descriptors. Contains the implementation //! of wsh, wpkh and sortedmulti inside wsh. use std::{fmt, str::FromStr}; use bitcoin::{self, Script}; use crate::expression::{self, FromTree}; use crate::miniscript::context::{ScriptContext, ScriptContextError}; use crate::policy::{semantic, Liftable}; use crate::util::varint_len; use crate::{ Error, ForEach, ForEachKey, Miniscript, MiniscriptKey, Satisfier, Segwitv0, ToPublicKey, TranslatePk, }; use super::{ checksum::{desc_checksum, verify_checksum}, DescriptorTrait, SortedMultiVec, }; /// A Segwitv0 wsh descriptor #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct Wsh<Pk: MiniscriptKey> { /// underlying miniscript inner: WshInner<Pk>, } impl<Pk: MiniscriptKey> Wsh<Pk> { /// Get the Inner pub fn into_inner(self) -> WshInner<Pk> { self.inner } /// Get a reference to inner pub fn as_inner(&self) -> &WshInner<Pk> { &self.inner } /// Create a new wsh descriptor pub fn new(ms: Miniscript<Pk, Segwitv0>) -> Result<Self, Error> { // do the top-level checks Segwitv0::top_level_checks(&ms)?; Ok(Self { inner: WshInner::Ms(ms), }) } /// Create a new sortedmulti wsh descriptor pub fn new_sortedmulti(k: usize, pks: Vec<Pk>) -> Result<Self, Error> { // The context checks will be carried out inside new function for // sortedMultiVec Ok(Self { inner: WshInner::SortedMulti(SortedMultiVec::new(k, pks)?), }) } /// Get the descriptor without the checksum pub fn to_string_no_checksum(&self) -> String { match self.inner { WshInner::SortedMulti(ref smv) => format!("wsh({})", smv), WshInner::Ms(ref ms) => format!("wsh({})", ms), } } } impl<Pk: MiniscriptKey + ToPublicKey> Wsh<Pk> { /// Obtain the corresponding script pubkey for this descriptor /// Non failing verion of [`DescriptorTrait::script_pubkey`] for this descriptor pub fn spk(&self) -> Script { self.inner_script().to_v0_p2wsh() } /// Obtain the corresponding script pubkey for this descriptor /// Non failing verion of [`DescriptorTrait::address`] for this descriptor pub fn addr(&self, network: bitcoin::Network) -> bitcoin::Address { match self.inner { WshInner::SortedMulti(ref smv) => bitcoin::Address::p2wsh(&smv.encode(), network), WshInner::Ms(ref ms) => bitcoin::Address::p2wsh(&ms.encode(), network), } } /// Obtain the underlying miniscript for this descriptor /// Non failing verion of [`DescriptorTrait::explicit_script`] for this descriptor pub fn inner_script(&self) -> Script { match self.inner { WshInner::SortedMulti(ref smv) => smv.encode(), WshInner::Ms(ref ms) => ms.encode(), } } /// Obtain the pre bip-340 signature script code for this descriptor /// Non failing verion of [`DescriptorTrait::script_code`] for this descriptor pub fn ecdsa_sighash_script_code(&self) -> Script { self.inner_script() } } /// Wsh Inner #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] pub enum WshInner<Pk: MiniscriptKey> { /// Sorted Multi SortedMulti(SortedMultiVec<Pk, Segwitv0>), /// Wsh Miniscript Ms(Miniscript<Pk, Segwitv0>), } impl<Pk: MiniscriptKey> Liftable<Pk> for Wsh<Pk> { fn lift(&self) -> Result<semantic::Policy<Pk>, Error> { match self.inner { WshInner::SortedMulti(ref smv) => smv.lift(), WshInner::Ms(ref ms) => ms.lift(), } } } impl<Pk> FromTree for Wsh<Pk> where Pk: MiniscriptKey + FromStr, Pk::Hash: FromStr, <Pk as FromStr>::Err: ToString, <<Pk as MiniscriptKey>::Hash as FromStr>::Err: ToString, { fn from_tree(top: &expression::Tree) -> Result<Self, Error> { if top.name == "wsh" && top.args.len() == 1 { let top = &top.args[0]; if top.name == "sortedmulti" { return Ok(Wsh { inner: WshInner::SortedMulti(SortedMultiVec::from_tree(&top)?), }); } let sub = Miniscript::from_tree(&top)?; Segwitv0::top_level_checks(&sub)?; Ok(Wsh { inner: WshInner::Ms(sub), }) } else { Err(Error::Unexpected(format!( "{}({} args) while parsing wsh descriptor", top.name, top.args.len(), ))) } } } impl<Pk: MiniscriptKey> fmt::Debug for Wsh<Pk> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.inner { WshInner::SortedMulti(ref smv) => write!(f, "wsh({:?})", smv), WshInner::Ms(ref ms) => write!(f, "wsh({:?})", ms), } } } impl<Pk: MiniscriptKey> fmt::Display for Wsh<Pk> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let desc = self.to_string_no_checksum(); let checksum = desc_checksum(&desc).map_err(|_| fmt::Error)?; write!(f, "{}#{}", &desc, &checksum) } } impl<Pk> FromStr for Wsh<Pk> where Pk: MiniscriptKey + FromStr, Pk::Hash: FromStr, <Pk as FromStr>::Err: ToString, <<Pk as MiniscriptKey>::Hash as FromStr>::Err: ToString, { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let desc_str = verify_checksum(s)?; let top = expression::Tree::from_str(desc_str)?; Wsh::<Pk>::from_tree(&top) } } impl<Pk: MiniscriptKey> DescriptorTrait<Pk> for Wsh<Pk> { fn sanity_check(&self) -> Result<(), Error> { match self.inner { WshInner::SortedMulti(ref smv) => smv.sanity_check()?, WshInner::Ms(ref ms) => ms.sanity_check()?, } Ok(()) } fn address(&self, network: bitcoin::Network) -> Result<bitcoin::Address, Error> where Pk: ToPublicKey, { Ok(self.addr(network)) } fn script_pubkey(&self) -> Script where Pk: ToPublicKey, { self.spk() } fn unsigned_script_sig(&self) -> Script where Pk: ToPublicKey, { Script::new() } fn explicit_script(&self) -> Result<Script, Error> where Pk: ToPublicKey, { Ok(self.inner_script()) } fn get_satisfaction<S>(&self, satisfier: S) -> Result<(Vec<Vec<u8>>, Script), Error> where Pk: ToPublicKey, S: Satisfier<Pk>, { let mut witness = match self.inner { WshInner::SortedMulti(ref smv) => smv.satisfy(satisfier)?, WshInner::Ms(ref ms) => ms.satisfy(satisfier)?, }; let witness_script = self.inner_script(); witness.push(witness_script.into_bytes()); let script_sig = Script::new(); Ok((witness, script_sig)) } fn get_satisfaction_mall<S>(&self, satisfier: S) -> Result<(Vec<Vec<u8>>, Script), Error> where Pk: ToPublicKey, S: Satisfier<Pk>, { let mut witness = match self.inner { WshInner::SortedMulti(ref smv) => smv.satisfy(satisfier)?, WshInner::Ms(ref ms) => ms.satisfy_malleable(satisfier)?, }; witness.push(self.inner_script().into_bytes()); let script_sig = Script::new(); Ok((witness, script_sig)) } fn max_satisfaction_weight(&self) -> Result<usize, Error> { let (script_size, max_sat_elems, max_sat_size) = match self.inner { WshInner::SortedMulti(ref smv) => ( smv.script_size(), smv.max_satisfaction_witness_elements(), smv.max_satisfaction_size(), ), WshInner::Ms(ref ms) => ( ms.script_size(), ms.max_satisfaction_witness_elements()?, ms.max_satisfaction_size()?, ), }; Ok(4 + // scriptSig length byte varint_len(script_size) + script_size + varint_len(max_sat_elems) + max_sat_size) } fn script_code(&self) -> Result<Script, Error> where Pk: ToPublicKey, { Ok(self.ecdsa_sighash_script_code()) } } impl<Pk: MiniscriptKey> ForEachKey<Pk> for Wsh<Pk> { fn for_each_key<'a, F: FnMut(ForEach<'a, Pk>) -> bool>(&'a self, pred: F) -> bool where Pk: 'a, Pk::Hash: 'a, { match self.inner { WshInner::SortedMulti(ref smv) => smv.for_each_key(pred), WshInner::Ms(ref ms) => ms.for_each_key(pred), } } } impl<P, Q> TranslatePk<P, Q> for Wsh<P> where P: MiniscriptKey, Q: MiniscriptKey, { type Output = Wsh<Q>; fn translate_pk<Fpk, Fpkh, E>(&self, mut fpk: Fpk, mut fpkh: Fpkh) -> Result<Self::Output, E> where Fpk: FnMut(&P) -> Result<Q, E>, Fpkh: FnMut(&P::Hash) -> Result<Q::Hash, E>, { let inner = match self.inner { WshInner::SortedMulti(ref smv) => WshInner::SortedMulti(smv.translate_pk(&mut fpk)?), WshInner::Ms(ref ms) => WshInner::Ms(ms.translate_pk(&mut fpk, &mut fpkh)?), }; Ok(Wsh { inner: inner }) } } /// A bare Wpkh descriptor at top level #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] pub struct Wpkh<Pk: MiniscriptKey> { /// underlying publickey pk: Pk, } impl<Pk: MiniscriptKey> Wpkh<Pk> { /// Create a new Wpkh descriptor pub fn new(pk: Pk) -> Result<Self, Error> { // do the top-level checks if pk.is_uncompressed() { Err(Error::ContextError(ScriptContextError::CompressedOnly( pk.to_string(), ))) } else { Ok(Self { pk: pk }) } } /// Get the inner key pub fn into_inner(self) -> Pk { self.pk } /// Get the inner key pub fn as_inner(&self) -> &Pk { &self.pk } /// Get the descriptor without the checksum pub fn to_string_no_checksum(&self) -> String { format!("wpkh({})", self.pk) } } impl<Pk: MiniscriptKey + ToPublicKey> Wpkh<Pk> { /// Obtain the corresponding script pubkey for this descriptor /// Non failing verion of [`DescriptorTrait::script_pubkey`] for this descriptor pub fn spk(&self) -> Script { let addr = bitcoin::Address::p2wpkh(&self.pk.to_public_key(), bitcoin::Network::Bitcoin) .expect("wpkh descriptors have compressed keys"); addr.script_pubkey() } /// Obtain the corresponding script pubkey for this descriptor /// Non failing verion of [`DescriptorTrait::address`] for this descriptor pub fn addr(&self, network: bitcoin::Network) -> bitcoin::Address { bitcoin::Address::p2wpkh(&self.pk.to_public_key(), network) .expect("Rust Miniscript types don't allow uncompressed pks in segwit descriptors") } /// Obtain the underlying miniscript for this descriptor /// Non failing verion of [`DescriptorTrait::explicit_script`] for this descriptor pub fn inner_script(&self) -> Script { self.spk() } /// Obtain the pre bip-340 signature script code for this descriptor /// Non failing verion of [`DescriptorTrait::script_code`] for this descriptor pub fn ecdsa_sighash_script_code(&self) -> Script { // For SegWit outputs, it is defined by bip-0143 (quoted below) and is different from // the previous txo's scriptPubKey. // The item 5: // - For P2WPKH witness program, the scriptCode is `0x1976a914{20-byte-pubkey-hash}88ac`. let addr = bitcoin::Address::p2pkh(&self.pk.to_public_key(), bitcoin::Network::Bitcoin); addr.script_pubkey() } } impl<Pk: MiniscriptKey> fmt::Debug for Wpkh<Pk> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "wpkh({:?})", self.pk) } } impl<Pk: MiniscriptKey> fmt::Display for Wpkh<Pk> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let desc = self.to_string_no_checksum(); let checksum = desc_checksum(&desc).map_err(|_| fmt::Error)?; write!(f, "{}#{}", &desc, &checksum) } } impl<Pk: MiniscriptKey> Liftable<Pk> for Wpkh<Pk> { fn lift(&self) -> Result<semantic::Policy<Pk>, Error> { Ok(semantic::Policy::KeyHash(self.pk.to_pubkeyhash())) } } impl<Pk> FromTree for Wpkh<Pk> where Pk: MiniscriptKey + FromStr, Pk::Hash: FromStr, <Pk as FromStr>::Err: ToString, <<Pk as MiniscriptKey>::Hash as FromStr>::Err: ToString, { fn from_tree(top: &expression::Tree) -> Result<Self, Error> { if top.name == "wpkh" && top.args.len() == 1 { Ok(Wpkh::new(expression::terminal(&top.args[0], |pk| { Pk::from_str(pk) })?)?) } else { Err(Error::Unexpected(format!( "{}({} args) while parsing wpkh descriptor", top.name, top.args.len(), ))) } } } impl<Pk> FromStr for Wpkh<Pk> where Pk: MiniscriptKey + FromStr, Pk::Hash: FromStr, <Pk as FromStr>::Err: ToString, <<Pk as MiniscriptKey>::Hash as FromStr>::Err: ToString, { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let desc_str = verify_checksum(s)?; let top = expression::Tree::from_str(desc_str)?; Self::from_tree(&top) } } impl<Pk: MiniscriptKey> DescriptorTrait<Pk> for Wpkh<Pk> { fn sanity_check(&self) -> Result<(), Error> { if self.pk.is_uncompressed() { Err(Error::ContextError(ScriptContextError::CompressedOnly( self.pk.to_string(), ))) } else { Ok(()) } } fn address(&self, network: bitcoin::Network) -> Result<bitcoin::Address, Error> where Pk: ToPublicKey, { Ok(self.addr(network)) } fn script_pubkey(&self) -> Script where Pk: ToPublicKey, { self.spk() } fn unsigned_script_sig(&self) -> Script where Pk: ToPublicKey, { Script::new() } fn explicit_script(&self) -> Result<Script, Error> where Pk: ToPublicKey, { Ok(self.inner_script()) } fn get_satisfaction<S>(&self, satisfier: S) -> Result<(Vec<Vec<u8>>, Script), Error> where Pk: ToPublicKey, S: Satisfier<Pk>, { if let Some(sig) = satisfier.lookup_ecdsa_sig(&self.pk) { let sig_vec = sig.to_vec(); let script_sig = Script::new(); let witness = vec![sig_vec, self.pk.to_public_key().to_bytes()]; Ok((witness, script_sig)) } else { Err(Error::MissingSig(self.pk.to_public_key())) } } fn get_satisfaction_mall<S>(&self, satisfier: S) -> Result<(Vec<Vec<u8>>, Script), Error> where Pk: ToPublicKey, S: Satisfier<Pk>, { self.get_satisfaction(satisfier) } fn max_satisfaction_weight(&self) -> Result<usize, Error> { Ok(4 + 1 + 73 + Segwitv0::pk_len(&self.pk)) } fn script_code(&self) -> Result<Script, Error> where Pk: ToPublicKey, { Ok(self.ecdsa_sighash_script_code()) } } impl<Pk: MiniscriptKey> ForEachKey<Pk> for Wpkh<Pk> { fn for_each_key<'a, F: FnMut(ForEach<'a, Pk>) -> bool>(&'a self, mut pred: F) -> bool where Pk: 'a, Pk::Hash: 'a, { pred(ForEach::Key(&self.pk)) } } impl<P, Q> TranslatePk<P, Q> for Wpkh<P> where P: MiniscriptKey, Q: MiniscriptKey, { type Output = Wpkh<Q>; fn translate_pk<Fpk, Fpkh, E>(&self, mut fpk: Fpk, _fpkh: Fpkh) -> Result<Self::Output, E> where Fpk: FnMut(&P) -> Result<Q, E>, Fpkh: FnMut(&P::Hash) -> Result<Q::Hash, E>, { Ok(Wpkh::new(fpk(&self.pk)?).expect("Uncompressed keys in Wpkh")) } }
30.058501
101
0.582228
280e6da69166cbf1e80b601b34ceece0a2f1962e
18,229
use crate::{hashmap::NP_HashMap}; use alloc::string::String; use crate::pointer::{NP_Vtable}; use crate::{pointer::{NP_Cursor}, schema::{NP_Parsed_Schema, NP_Schema_Addr}}; use crate::{memory::{NP_Memory}, pointer::{NP_Value}, error::NP_Error, schema::{NP_Schema, NP_TypeKeys}, json_flex::{JSMAP, NP_JSON}}; use alloc::string::ToString; use alloc::vec::Vec; use alloc::boxed::Box; use alloc::borrow::ToOwned; use core::{result::Result, hint::unreachable_unchecked}; /// The data type for tables in NoProto buffers. /// #[doc(hidden)] #[derive(Debug)] pub struct NP_Table<'table> { index: usize, v_table: Option<&'table mut NP_Vtable>, v_table_addr: usize, v_table_index: usize, table: NP_Cursor } #[allow(missing_docs)] impl<'table> NP_Table<'table> { #[inline(always)] pub fn select(mut table_cursor: NP_Cursor, key: &str, make_path: bool, memory: &NP_Memory) -> Result<Option<NP_Cursor>, NP_Error> { match &memory.schema[table_cursor.schema_addr] { NP_Parsed_Schema::Table { columns, columns_mapped, .. } => { match columns_mapped.get(key) { Some(x) => { let v_table = *x / 4; // which vtable let v_table_idx = *x % 4; // which index on the selected vtable let mut table_value = table_cursor.get_value(memory); if table_value.get_addr_value() == 0 { if make_path { table_cursor = Self::make_first_vtable(table_cursor, memory)?; } else { return Ok(None); } } table_value = table_cursor.get_value(memory); let mut seek_vtable = 0usize; let mut vtable_address = table_value.get_addr_value() as usize; while seek_vtable < v_table { let this_vtable = Self::get_vtable(vtable_address, memory); let next_vtable = this_vtable.get_next(); if next_vtable == 0 { vtable_address = Self::make_next_vtable(this_vtable, memory)?; } else { vtable_address = next_vtable as usize; } seek_vtable += 1; } let item_address = vtable_address + (v_table_idx * 2); Ok(Some(NP_Cursor::new(item_address, columns[*x].2, table_cursor.schema_addr))) }, None => Ok(None) } }, _ => Err(NP_Error::new("unreachable")) } } #[inline(always)] pub fn make_first_vtable<'make>(table_cursor: NP_Cursor, memory: &'make NP_Memory) -> Result<NP_Cursor, NP_Error> { let first_vtable_addr = memory.malloc_borrow(&[0u8; 10])?; let table_value = table_cursor.get_value(memory); table_value.set_addr_value(first_vtable_addr as u16); Ok(table_cursor) } #[inline(always)] pub fn make_next_vtable<'make>(prev_vtable: &'make mut NP_Vtable, memory: &'make NP_Memory) -> Result<usize, NP_Error> { let vtable_addr = memory.malloc_borrow(&[0u8; 10])?; prev_vtable.set_next(vtable_addr as u16); Ok(vtable_addr) } #[inline(always)] pub fn new_iter(cursor: &NP_Cursor, memory: &'table NP_Memory) -> Self { let table_value = cursor.get_value(memory); let addr_value = table_value.get_addr_value() as usize; Self { table: cursor.clone(), v_table: if addr_value == 0 { None } else { Some(Self::get_vtable(addr_value, memory)) }, v_table_addr: addr_value, v_table_index: 0, index: 0, } } #[inline(always)] pub fn get_vtable<'vtable>(v_table_addr: usize, memory: &'vtable NP_Memory) -> &'vtable mut NP_Vtable { if v_table_addr > memory.read_bytes().len() { // attack unsafe { &mut *(memory.write_bytes().as_ptr() as *mut NP_Vtable) } } else { // normal operation unsafe { &mut *(memory.write_bytes().as_ptr().add(v_table_addr) as *mut NP_Vtable) } } } #[inline(always)] pub fn step_iter(&mut self, memory: &'table NP_Memory) -> Option<(usize, &'table str, Option<NP_Cursor>)> { match &memory.schema[self.table.schema_addr] { NP_Parsed_Schema::Table { columns, .. } => { if columns.len() <= self.index { return None; } let v_table = self.index / 4; // which vtable let v_table_idx = self.index % 4; // which index on the selected vtable if self.v_table_index > v_table { self.v_table_index = v_table; match &self.v_table { Some(vtable) => { let next_vtable = vtable.get_next() as usize; if next_vtable > 0 { self.v_table = Some(Self::get_vtable(next_vtable, memory)); self.v_table_addr = next_vtable; } else { self.v_table = None; self.v_table_addr = 0; } }, _ => {} } } let this_index = self.index; self.index += 1; if self.v_table_addr != 0 { let item_address = self.v_table_addr + (v_table_idx * 2); Some((this_index, columns[this_index].1.as_str(), Some(NP_Cursor::new(item_address, columns[this_index].2, self.table.schema_addr)))) } else { Some((this_index, columns[this_index].1.as_str(), None)) } }, _ => None } } } impl<'value> NP_Value<'value> for NP_Table<'value> { fn type_idx() -> (&'value str, NP_TypeKeys) { ("table", NP_TypeKeys::Table) } fn self_type_idx(&self) -> (&'value str, NP_TypeKeys) { ("table", NP_TypeKeys::Table) } fn from_bytes_to_schema(mut schema: Vec<NP_Parsed_Schema>, address: usize, bytes: &Vec<u8>) -> (bool, Vec<NP_Parsed_Schema>) { let column_len = bytes[address + 1]; let mut parsed_columns: Vec<(u8, String, NP_Schema_Addr)> = Vec::new(); let table_schema_addr = schema.len(); schema.push(NP_Parsed_Schema::Table { i: NP_TypeKeys::Table, sortable: false, columns_mapped: NP_HashMap::new(), columns: Vec::new() }); let mut schema_parsed = schema; let mut offset = address + 2; let mut hash_map = NP_HashMap::new(); for x in 0..column_len as usize { let col_name_len = bytes[offset] as usize; let col_name_bytes = &bytes[(offset + 1)..(offset + 1 + col_name_len)]; let col_name = unsafe { core::str::from_utf8_unchecked(col_name_bytes) }; offset += 1 + col_name_len; let schema_size = u16::from_be_bytes([ bytes[offset], bytes[offset + 1] ]) as usize; let column_addr = schema_parsed.len(); let (_, schema) = NP_Schema::from_bytes(schema_parsed, offset + 2, bytes); schema_parsed = schema; parsed_columns.push((x as u8, col_name.to_string(), column_addr)); hash_map.insert(col_name, x).unwrap_or_default(); offset += schema_size + 2; } schema_parsed[table_schema_addr] = NP_Parsed_Schema::Table { i: NP_TypeKeys::Table, columns_mapped: hash_map, sortable: false, columns: parsed_columns }; (false, schema_parsed) } fn schema_to_json(schema: &Vec<NP_Parsed_Schema>, address: usize)-> Result<NP_JSON, NP_Error> { let mut schema_json = JSMAP::new(); schema_json.insert("type".to_owned(), NP_JSON::String(Self::type_idx().0.to_string())); let columns: Vec<NP_JSON> = match &schema[address] { NP_Parsed_Schema::Table { columns, .. } => { columns.into_iter().map(|column| { let mut cols: Vec<NP_JSON> = Vec::new(); cols.push(NP_JSON::String(column.1.to_string())); cols.push(NP_Schema::_type_to_json(&schema, column.2).unwrap_or(NP_JSON::Null)); NP_JSON::Array(cols) }).collect() }, _ => Vec::new() }; schema_json.insert("columns".to_owned(), NP_JSON::Array(columns)); Ok(NP_JSON::Dictionary(schema_json)) } fn get_size(cursor: &NP_Cursor, memory: &'value NP_Memory<'value>) -> Result<usize, NP_Error> { let c_value = cursor.get_value(memory); if c_value.get_addr_value() == 0 { return Ok(0) } let mut acc_size = 0usize; let mut nex_vtable = c_value.get_addr_value() as usize; while nex_vtable > 0 { acc_size += 10; let vtable = Self::get_vtable(nex_vtable, memory); nex_vtable = vtable.get_next() as usize; } let mut table = Self::new_iter(&cursor, memory); while let Some((_index, _key, item)) = table.step_iter(memory) { if let Some(real) = item { let add_size = NP_Cursor::calc_size(&real, memory)?; if add_size > 2 { // scalar cursor is part of vtable acc_size += add_size - 2; } } } Ok(acc_size) } fn to_json(cursor: &NP_Cursor, memory: &'value NP_Memory) -> NP_JSON { let c_value = cursor.get_value(memory); if c_value.get_addr_value() == 0 { return NP_JSON::Null }; let mut json_map = JSMAP::new(); let mut table = Self::new_iter(&cursor, memory); while let Some((_index, key, item)) = table.step_iter(memory) { if let Some(real) = item { json_map.insert(String::from(key), NP_Cursor::json_encode(&real, memory)); } else { json_map.insert(String::from(key), NP_JSON::Null); } } NP_JSON::Dictionary(json_map) } fn do_compact(from_cursor: NP_Cursor, from_memory: &'value NP_Memory, mut to_cursor: NP_Cursor, to_memory: &'value NP_Memory) -> Result<NP_Cursor, NP_Error> where Self: 'value + Sized { let from_value = from_cursor.get_value(from_memory); if from_value.get_addr_value() == 0 { return Ok(to_cursor) } to_cursor = Self::make_first_vtable(to_cursor, to_memory)?; let to_cursor_value = to_cursor.get_value(to_memory); let mut last_real_vtable = to_cursor_value.get_addr_value() as usize; let mut last_vtable_idx = 0usize; let c: Vec<(u8, String, usize)>; let col_schemas = match &from_memory.schema[from_cursor.schema_addr] { NP_Parsed_Schema::Table { columns, .. } => { columns }, _ => { c = Vec::new(); &c } }; let mut table = Self::new_iter(&from_cursor, from_memory); while let Some((idx, _key, item)) = table.step_iter(from_memory) { if let Some(real) = item { let v_table = idx / 4; // which vtable let v_table_idx = idx % 4; // which index on the selected vtable if last_vtable_idx < v_table { let vtable_data = Self::get_vtable(last_real_vtable, to_memory); last_real_vtable = Self::make_next_vtable(vtable_data, to_memory)?; last_vtable_idx += 1; } let item_addr = last_real_vtable + (v_table_idx * 2); NP_Cursor::compact(real.clone(), from_memory, NP_Cursor::new(item_addr, col_schemas[idx].2, to_cursor.schema_addr), to_memory)?; } } Ok(to_cursor) } fn from_json_to_schema(mut schema: Vec<NP_Parsed_Schema>, json_schema: &Box<NP_JSON>) -> Result<(bool, Vec<u8>, Vec<NP_Parsed_Schema>), NP_Error> { let mut schema_bytes: Vec<u8> = Vec::new(); schema_bytes.push(NP_TypeKeys::Table as u8); let schema_table_addr = schema.len(); schema.push(NP_Parsed_Schema::Table { i: NP_TypeKeys::Table, sortable: false, columns: Vec::new(), columns_mapped: NP_HashMap::new() }); let mut columns_mapped = NP_HashMap::new(); let mut columns: Vec<(u8, String, NP_Schema_Addr)> = Vec::new(); let mut column_data: Vec<(String, Vec<u8>)> = Vec::new(); let mut schema_parsed: Vec<NP_Parsed_Schema> = schema; match &json_schema["columns"] { NP_JSON::Array(cols) => { let mut x: u8 = 0; for col in cols { let column_name = match &col[0] { NP_JSON::String(x) => x.clone(), _ => "".to_owned() }; if column_name.len() > 255 { return Err(NP_Error::new("Table column names cannot be longer than 255 characters!")) } let column_schema_addr = schema_parsed.len(); columns.push((x, column_name.clone(), column_schema_addr)); let (_is_sortable, column_type, schema_p) = NP_Schema::from_json(schema_parsed, &Box::new(col[1].clone()))?; schema_parsed = schema_p; columns_mapped.insert(column_name.as_str(), x as usize)?; column_data.push((column_name, column_type)); x += 1; } }, _ => { return Err(NP_Error::new("Tables require a 'columns' property that is an array of schemas!")) } } schema_parsed[schema_table_addr] = NP_Parsed_Schema::Table { i: NP_TypeKeys::Table, sortable: false, columns: columns, columns_mapped }; if column_data.len() > 255 { return Err(NP_Error::new("Tables cannot have more than 255 columns!")) } if column_data.len() == 0 { return Err(NP_Error::new("Tables must have at least one column!")) } // number of columns schema_bytes.push(column_data.len() as u8); for col in column_data { // colum name let bytes = col.0.as_bytes().to_vec(); schema_bytes.push(bytes.len() as u8); schema_bytes.extend(bytes); if col.1.len() > u16::max as usize { return Err(NP_Error::new("Schema overflow error!")) } // column type schema_bytes.extend((col.1.len() as u16).to_be_bytes().to_vec()); schema_bytes.extend(col.1); } return Ok((false, schema_bytes, schema_parsed)) } fn schema_default(_schema: &NP_Parsed_Schema) -> Option<Self> { None } } #[test] fn schema_parsing_works() -> Result<(), NP_Error> { let schema = "{\"type\":\"table\",\"columns\":[[\"age\",{\"type\":\"uint8\"}],[\"tags\",{\"type\":\"list\",\"of\":{\"type\":\"string\"}}],[\"name\",{\"type\":\"string\",\"size\":10}]]}"; let factory = crate::NP_Factory::new(schema)?; assert_eq!(schema, factory.schema.to_json()?.stringify()); Ok(()) } #[test] fn set_clear_value_and_compaction_works() -> Result<(), NP_Error> { let schema = "{\"type\":\"table\",\"columns\":[[\"age\",{\"type\":\"uint8\"}],[\"name\",{\"type\":\"string\"}]]}"; let factory = crate::NP_Factory::new(schema)?; // compaction removes cleared values let mut buffer = factory.empty_buffer(None); buffer.set(&["name"], "hello")?; assert_eq!(buffer.get::<&str>(&["name"])?, Some("hello")); assert_eq!(buffer.calc_bytes()?.after_compaction, buffer.calc_bytes()?.current_buffer); assert_eq!(buffer.calc_bytes()?.after_compaction, 20usize); buffer.del(&[])?; buffer.compact(None)?; assert_eq!(buffer.calc_bytes()?.current_buffer, 3usize); // good values are preserved through compaction let mut buffer = factory.empty_buffer(None); buffer.set(&["name"], "hello")?; assert_eq!(buffer.get::<&str>(&["name"])?, Some("hello")); assert_eq!(buffer.calc_bytes()?.current_buffer, 20usize); buffer.compact(None)?; assert_eq!(buffer.get::<&str>(&["name"])?, Some("hello")); assert_eq!(buffer.calc_bytes()?.current_buffer, 20usize); Ok(()) } #[test] fn test_vtables() -> Result<(), NP_Error> { let factory = crate::NP_Factory::new(r#"{ "type": "table", "columns": [ ["age", {"type": "u8"}], ["name", {"type": "string"}], ["color", {"type": "string"}], ["car", {"type": "string"}], ["rating", {"type": "u8"}] ] }"#)?; // compaction removes cleared values let mut buffer = factory.empty_buffer(None); buffer.set(&["age"], 20u8)?; buffer.set(&["name"], "hello")?; buffer.set(&["color"], "blue")?; buffer.set(&["car"], "Chevy")?; buffer.set(&["rating"], 98u8)?; let new_buffer = factory.open_buffer(buffer.close()); assert_eq!(new_buffer.get::<u8>(&["age"])?.unwrap(), 20u8); assert_eq!(new_buffer.get::<&str>(&["name"])?.unwrap(), "hello"); assert_eq!(new_buffer.get::<&str>(&["color"])?.unwrap(), "blue"); assert_eq!(new_buffer.get::<&str>(&["car"])?.unwrap(), "Chevy"); assert_eq!(new_buffer.get::<u8>(&["rating"])?.unwrap(), 98u8); Ok(()) }
36.312749
190
0.534259
612747b19eb751da41da52ce8abd172afe0c2058
35,035
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] use qlog::QlogStreamer; use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, AuthenticationStatus, Cipher, ResumptionToken, }; use neqo_http3::{ self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, }; use neqo_qpack::QpackSettings; use neqo_transport::{ stream_id::StreamIndex, Connection, ConnectionId, ConnectionParameters, EmptyConnectionIdGenerator, Error as TransportError, QuicVersion, StreamType, }; use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::fs::{File, OpenOptions}; use std::io::{self, ErrorKind, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs, UdpSocket}; use std::path::PathBuf; use std::process::exit; use std::rc::Rc; use std::time::Instant; use structopt::StructOpt; use url::{Origin, Url}; #[derive(Debug)] pub enum ClientError { Http3Error(neqo_http3::Error), IoError(io::Error), QlogError, TransportError(neqo_transport::Error), } impl From<io::Error> for ClientError { fn from(err: io::Error) -> Self { Self::IoError(err) } } impl From<neqo_http3::Error> for ClientError { fn from(err: neqo_http3::Error) -> Self { Self::Http3Error(err) } } impl From<qlog::Error> for ClientError { fn from(_err: qlog::Error) -> Self { Self::QlogError } } impl From<neqo_transport::Error> for ClientError { fn from(err: neqo_transport::Error) -> Self { Self::TransportError(err) } } type Res<T> = Result<T, ClientError>; /// Track whether a key update is needed. #[derive(Debug, PartialEq, Eq)] struct KeyUpdateState(bool); impl KeyUpdateState { pub fn maybe_update<F, E>(&mut self, update_fn: F) -> Res<()> where F: FnOnce() -> Result<(), E>, E: Into<ClientError>, { if self.0 { if let Err(e) = update_fn() { let e = e.into(); match e { ClientError::TransportError(TransportError::KeyUpdateBlocked) | ClientError::Http3Error(Error::TransportError( TransportError::KeyUpdateBlocked, )) => (), _ => return Err(e), } } else { println!("Keys updated"); self.0 = false; } } Ok(()) } fn needed(&self) -> bool { self.0 } } #[derive(Debug, StructOpt)] #[structopt( name = "neqo-client", about = "A basic QUIC HTTP/0.9 and HTTP/3 client." )] pub struct Args { #[structopt(short = "a", long, default_value = "h3-29")] /// ALPN labels to negotiate. /// /// This client still only does HTTP/3 no matter what the ALPN says. alpn: String, urls: Vec<Url>, #[structopt(short = "m", default_value = "GET")] method: String, #[structopt(short = "h", long, number_of_values = 2)] header: Vec<String>, #[structopt(name = "encoder-table-size", long, default_value = "16384")] max_table_size_encoder: u64, #[structopt(name = "decoder-table-size", long, default_value = "16384")] max_table_size_decoder: u64, #[structopt(name = "max-blocked-streams", short = "b", long, default_value = "10")] max_blocked_streams: u16, #[structopt(name = "max-push", short = "p", long, default_value = "10")] max_concurrent_push_streams: u64, #[structopt(name = "use-old-http", short = "o", long)] /// Use http 0.9 instead of HTTP/3 use_old_http: bool, #[structopt(name = "download-in-series", long)] /// Download resources in series using separate connections. /// Only works with old HTTP (that is, `-o`). download_in_series: bool, #[structopt(name = "concurrency", long, default_value = "100")] /// The maximum number of requests to have outstanding at one time. concurrency: usize, #[structopt(name = "output-read-data", long)] /// Output received data to stdout output_read_data: bool, #[structopt(name = "qlog-dir", long)] /// Enable QLOG logging and QLOG traces to this directory qlog_dir: Option<PathBuf>, #[structopt(name = "output-dir", long)] /// Save contents of fetched URLs to a directory output_dir: Option<PathBuf>, #[structopt(name = "qns-test", long)] /// Enable special behavior for use with QUIC Network Simulator qns_test: Option<String>, #[structopt(short = "r", long)] /// Client attemps to resume connections when there are multiple connections made. /// Use this for 0-RTT: the stack always attempts 0-RTT on resumption. resume: bool, #[structopt(name = "key-update", long)] /// Attempt to initiate a key update immediately after confirming the connection. key_update: bool, #[structopt(short = "c", long, number_of_values = 1)] /// The set of TLS cipher suites to enable. /// From: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256. ciphers: Vec<String>, #[structopt(subcommand)] quic_parameters: QuicParameters, } impl Args { fn get_ciphers(&self) -> Vec<Cipher> { self.ciphers .iter() .filter_map(|c| match c.as_str() { "TLS_AES_128_GCM_SHA256" => Some(TLS_AES_128_GCM_SHA256), "TLS_AES_256_GCM_SHA384" => Some(TLS_AES_256_GCM_SHA384), "TLS_CHACHA20_POLY1305_SHA256" => Some(TLS_CHACHA20_POLY1305_SHA256), _ => None, }) .collect::<Vec<_>>() } } #[derive(Debug, StructOpt)] struct QuicParameters { #[structopt(long, default_value = "16")] /// Set the MAX_STREAMS_BIDI limit. max_streams_bidi: u64, #[structopt(long, default_value = "16")] /// Set the MAX_STREAMS_UNI limit. max_streams_uni: u64, } impl QuicParameters { fn get(&self) -> ConnectionParameters { ConnectionParameters::default() .max_streams(StreamType::BiDi, StreamIndex::new(self.max_streams_bidi)) .max_streams(StreamType::UniDi, StreamIndex::new(self.max_streams_uni)) } } fn emit_datagram(socket: &UdpSocket, d: Datagram) -> io::Result<()> { let sent = socket.send_to(&d[..], d.destination())?; if sent != d.len() { eprintln!("Unable to send all {} bytes of datagram", d.len()); } Ok(()) } fn get_output_file( url: &Url, output_dir: &Option<PathBuf>, all_paths: &mut Vec<PathBuf>, ) -> Option<File> { if let Some(ref dir) = output_dir { let mut out_path = dir.clone(); let url_path = if url.path() == "/" { // If no path is given... call it "root"? "root" } else { // Omit leading slash &url.path()[1..] }; out_path.push(url_path); if all_paths.contains(&out_path) { eprintln!("duplicate path {}", out_path.display()); return None; } eprintln!("Saving {} to {:?}", url.clone().into_string(), out_path); let f = match OpenOptions::new() .write(true) .create(true) .truncate(true) .open(&out_path) { Err(_) => return None, Ok(f) => f, }; all_paths.push(out_path); Some(f) } else { None } } fn process_loop( local_addr: &SocketAddr, socket: &UdpSocket, client: &mut Http3Client, handler: &mut Handler, ) -> Res<neqo_http3::Http3State> { let buf = &mut [0u8; 2048]; loop { if let Http3State::Closed(..) = client.state() { return Ok(client.state()); } let mut exiting = !handler.handle(client)?; loop { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(&socket, dgram) { eprintln!("UDP write error: {}", e); client.close(Instant::now(), 0, e.to_string()); exiting = true; break; } } Output::Callback(duration) => { socket.set_read_timeout(Some(duration)).unwrap(); break; } Output::None => { // Not strictly necessary, since we're about to exit socket.set_read_timeout(None).unwrap(); exiting = true; break; } } } if exiting { return Ok(client.state()); } match socket.recv_from(&mut buf[..]) { Err(ref err) if err.kind() == ErrorKind::WouldBlock || err.kind() == ErrorKind::Interrupted => {} Err(err) => { eprintln!("UDP error: {}", err); exit(1) } Ok((sz, remote)) => { if sz == buf.len() { eprintln!("Received more than {} bytes", buf.len()); continue; } if sz > 0 { let d = Datagram::new(remote, *local_addr, &buf[..sz]); client.process_input(d, Instant::now()); handler.maybe_key_update(client)?; } } }; } } struct Handler<'a> { streams: HashMap<u64, Option<File>>, url_queue: VecDeque<Url>, all_paths: Vec<PathBuf>, args: &'a Args, key_update: KeyUpdateState, } impl<'a> Handler<'a> { fn download_urls(&mut self, client: &mut Http3Client) { loop { if self.url_queue.is_empty() { break; } if self.streams.len() >= self.args.concurrency { break; } if !self.download_next(client) { break; } } } fn download_next(&mut self, client: &mut Http3Client) -> bool { if self.key_update.needed() { println!("Deferring requests until first key update"); return false; } let url = self .url_queue .pop_front() .expect("download_next called with empty queue"); match client.fetch( Instant::now(), &self.args.method, &url.scheme(), &url.host_str().unwrap(), &url.path(), &to_headers(&self.args.header), ) { Ok(client_stream_id) => { println!( "Successfully created stream id {} for {}", client_stream_id, url ); let _ = client.stream_close_send(client_stream_id); let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); self.streams.insert(client_stream_id, out_file); true } e @ Err(Error::TransportError(TransportError::StreamLimitError)) | e @ Err(Error::StreamLimitError) | e @ Err(Error::Unavailable) => { println!("Cannot create stream {:?}", e); self.url_queue.push_front(url); false } Err(e) => { panic!("Can't create stream {}", e); } } } fn maybe_key_update(&mut self, c: &mut Http3Client) -> Res<()> { self.key_update.maybe_update(|| c.initiate_key_update())?; self.download_urls(c); Ok(()) } fn done(&mut self) -> bool { self.streams.is_empty() && self.url_queue.is_empty() } fn handle(&mut self, client: &mut Http3Client) -> Res<bool> { while let Some(event) = client.next_event() { match event { Http3ClientEvent::AuthenticationNeeded => { client.authenticated(AuthenticationStatus::Ok, Instant::now()); } Http3ClientEvent::HeaderReady { stream_id, headers, fin, .. } => match self.streams.get(&stream_id) { Some(out_file) => { if out_file.is_none() { println!("READ HEADERS[{}]: fin={} {:?}", stream_id, fin, headers); } } None => { println!("Data on unexpected stream: {}", stream_id); return Ok(false); } }, Http3ClientEvent::DataReadable { stream_id } => { let mut stream_done = false; match self.streams.get_mut(&stream_id) { None => { println!("Data on unexpected stream: {}", stream_id); return Ok(false); } Some(out_file) => loop { let mut data = vec![0; 4096]; let (sz, fin) = client .read_response_data(Instant::now(), stream_id, &mut data) .expect("Read should succeed"); if let Some(out_file) = out_file { if sz > 0 { out_file.write_all(&data[..sz])?; } } else if !self.args.output_read_data { println!("READ[{}]: {} bytes", stream_id, sz); } else if let Ok(txt) = String::from_utf8(data.clone()) { println!("READ[{}]: {}", stream_id, txt); } else { println!("READ[{}]: 0x{}", stream_id, hex(&data)); } if fin { if out_file.is_none() { println!("<FIN[{}]>", stream_id); } stream_done = true; break; } if sz == 0 { break; } }, } if stream_done { self.streams.remove(&stream_id); self.download_urls(client); if self.done() { client.close(Instant::now(), 0, "kthxbye!"); return Ok(false); } } } Http3ClientEvent::StateChange(Http3State::Connected) | Http3ClientEvent::RequestsCreatable => { self.download_urls(client); } _ => { println!("Unhandled event {:?}", event); } } } Ok(true) } } fn to_headers(values: &[impl AsRef<str>]) -> Vec<Header> { values .iter() .scan(None, |state, value| { if let Some(name) = state.take() { *state = None; Some((name, value.as_ref().to_string())) // TODO use a real type } else { *state = Some(value.as_ref().to_string()); None } }) .collect() } fn client( args: &Args, socket: UdpSocket, local_addr: SocketAddr, remote_addr: SocketAddr, hostname: &str, urls: &[Url], ) -> Res<()> { let quic_protocol = match args.alpn.as_str() { "h3-27" => QuicVersion::Draft27, "h3-28" => QuicVersion::Draft28, "h3-29" => QuicVersion::Draft29, "h3-30" => QuicVersion::Draft30, "h3-31" => QuicVersion::Draft31, "h3-32" => QuicVersion::Draft32, _ => QuicVersion::default(), }; let mut transport = Connection::new_client( hostname, &[&args.alpn], Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), local_addr, remote_addr, args.quic_parameters.get().quic_version(quic_protocol), )?; let ciphers = args.get_ciphers(); if !ciphers.is_empty() { transport.set_ciphers(&ciphers)?; } let mut client = Http3Client::new_with_conn( transport, &Http3Parameters { qpack_settings: QpackSettings { max_table_size_encoder: args.max_table_size_encoder, max_table_size_decoder: args.max_table_size_decoder, max_blocked_streams: args.max_blocked_streams, }, max_concurrent_push_streams: args.max_concurrent_push_streams, }, ); let qlog = qlog_new(args, hostname, client.connection_id())?; client.set_qlog(qlog); let key_update = KeyUpdateState(args.key_update); let mut h = Handler { streams: HashMap::new(), url_queue: VecDeque::from(urls.to_vec()), all_paths: Vec::new(), args: &args, key_update, }; process_loop(&local_addr, &socket, &mut client, &mut h)?; Ok(()) } fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res<NeqoQlog> { if let Some(qlog_dir) = &args.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); let filename = format!("{}-{}.qlog", hostname, cid); qlog_path.push(filename); let f = OpenOptions::new() .write(true) .create(true) .truncate(true) .open(&qlog_path)?; let streamer = QlogStreamer::new( qlog::QLOG_VERSION.to_string(), Some("Example qlog".to_string()), Some("Example qlog description".to_string()), None, std::time::Instant::now(), common::qlog::new_trace(Role::Client), Box::new(f), ); Ok(NeqoQlog::enabled(streamer, qlog_path)?) } else { Ok(NeqoQlog::disabled()) } } fn main() -> Res<()> { init(); let mut args = Args::from_args(); if let Some(testcase) = args.qns_test.as_ref() { match testcase.as_str() { "http3" => {} "handshake" | "transfer" | "retry" => { args.use_old_http = true; } "zerortt" | "resumption" => { if args.urls.len() < 2 { eprintln!("Warning: resumption tests won't work without >1 URL"); exit(127); } args.use_old_http = true; args.resume = true; } "multiconnect" => { args.use_old_http = true; args.download_in_series = true; } "chacha20" => { args.use_old_http = true; args.ciphers.clear(); args.ciphers .extend_from_slice(&[String::from("TLS_CHACHA20_POLY1305_SHA256")]); } "keyupdate" => { args.use_old_http = true; args.key_update = true; } _ => exit(127), } } let mut urls_by_origin: HashMap<Origin, Vec<Url>> = HashMap::new(); for url in &args.urls { let entry = urls_by_origin.entry(url.origin()).or_default(); entry.push(url.clone()); } for ((_scheme, host, port), mut urls) in urls_by_origin.into_iter().filter_map(|(k, v)| match k { Origin::Tuple(s, h, p) => Some(((s, h, p), v)), Origin::Opaque(x) => { eprintln!("Opaque origin {:?}", x); None } }) { let addrs: Vec<_> = format!("{}:{}", host, port).to_socket_addrs()?.collect(); let remote_addr = *addrs.first().unwrap(); let local_addr = match remote_addr { SocketAddr::V4(..) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from([0; 4])), 0), SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; let socket = match UdpSocket::bind(local_addr) { Err(e) => { eprintln!("Unable to bind UDP socket: {}", e); exit(1) } Ok(s) => s, }; let real_local = socket.local_addr().unwrap(); println!( "{} Client connecting: {:?} -> {:?}", if args.use_old_http { "H9" } else { "H3" }, real_local, remote_addr, ); if !args.use_old_http { client( &args, socket, real_local, remote_addr, &format!("{}", host), &urls, )?; } else if !args.download_in_series { let token = if args.resume { // Download first URL using a separate connection, save the token and use it for // the remaining URLs if urls.len() < 2 { eprintln!("Warning: resumption tests won't work without >1 URL"); exit(127) } let first_url = urls.remove(0); old::old_client( &args, &socket, real_local, remote_addr, &format!("{}", host), &[first_url], None, )? } else { None }; old::old_client( &args, &socket, real_local, remote_addr, &format!("{}", host), &urls, token, )?; } else { let mut token: Option<ResumptionToken> = None; for url in urls { token = old::old_client( &args, &socket, real_local, remote_addr, &format!("{}", host), &[url], token, )?; } } } Ok(()) } mod old { use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::fs::File; use std::io::{ErrorKind, Write}; use std::net::{SocketAddr, UdpSocket}; use std::path::PathBuf; use std::process::exit; use std::rc::Rc; use std::time::Instant; use url::Url; use super::{qlog_new, KeyUpdateState, Res}; use neqo_common::{event::Provider, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, QuicVersion, State, StreamType, }; use super::{emit_datagram, get_output_file, Args}; struct HandlerOld<'b> { streams: HashMap<u64, Option<File>>, url_queue: VecDeque<Url>, all_paths: Vec<PathBuf>, args: &'b Args, token: Option<ResumptionToken>, key_update: KeyUpdateState, } impl<'b> HandlerOld<'b> { fn download_urls(&mut self, client: &mut Connection) { loop { if self.url_queue.is_empty() { break; } if self.streams.len() >= self.args.concurrency { break; } if !self.download_next(client) { break; } } } fn download_next(&mut self, client: &mut Connection) -> bool { if self.key_update.needed() { println!("Deferring requests until after first key update"); return false; } let url = self .url_queue .pop_front() .expect("download_next called with empty queue"); match client.stream_create(StreamType::BiDi) { Ok(client_stream_id) => { println!("Created stream {} for {}", client_stream_id, url); let req = format!("GET {}\r\n", url.path()); client .stream_send(client_stream_id, req.as_bytes()) .unwrap(); let _ = client.stream_close_send(client_stream_id); let out_file = get_output_file(&url, &self.args.output_dir, &mut self.all_paths); self.streams.insert(client_stream_id, out_file); true } Err(e @ Error::StreamLimitError) | Err(e @ Error::ConnectionState) => { println!("Cannot create stream {:?}", e); self.url_queue.push_front(url); false } Err(e) => { panic!("Error creating stream {:?}", e); } } } /// Read and maybe print received data from a stream. // Returns bool: was fin received? fn read_from_stream( client: &mut Connection, stream_id: u64, output_read_data: bool, maybe_out_file: &mut Option<File>, ) -> Res<bool> { let mut data = vec![0; 4096]; loop { let (sz, fin) = client.stream_recv(stream_id, &mut data)?; if sz == 0 { return Ok(fin); } if let Some(out_file) = maybe_out_file { out_file.write_all(&data[..sz])?; } else if !output_read_data { println!("READ[{}]: {} bytes", stream_id, sz); } else { println!( "READ[{}]: {}", stream_id, String::from_utf8(data.clone()).unwrap() ) } if fin { return Ok(true); } } } fn maybe_key_update(&mut self, c: &mut Connection) -> Res<()> { self.key_update.maybe_update(|| c.initiate_key_update())?; self.download_urls(c); Ok(()) } fn read(&mut self, client: &mut Connection, stream_id: u64) -> Res<bool> { let mut maybe_maybe_out_file = self.streams.get_mut(&stream_id); match &mut maybe_maybe_out_file { None => { println!("Data on unexpected stream: {}", stream_id); return Ok(false); } Some(maybe_out_file) => { let fin_recvd = Self::read_from_stream( client, stream_id, self.args.output_read_data, maybe_out_file, )?; if fin_recvd { if maybe_out_file.is_none() { println!("<FIN[{}]>", stream_id); } self.streams.remove(&stream_id); self.download_urls(client); if self.streams.is_empty() && self.url_queue.is_empty() { return Ok(false); } } } } Ok(true) } /// Just in case we didn't get a resumption token event, this /// iterates through events until one is found. fn get_token(&mut self, client: &mut Connection) { for event in client.events() { if let ConnectionEvent::ResumptionToken(token) = event { self.token = Some(token); } } } fn handle(&mut self, client: &mut Connection) -> Res<bool> { while let Some(event) = client.next_event() { match event { ConnectionEvent::AuthenticationNeeded => { client.authenticated(AuthenticationStatus::Ok, Instant::now()); } ConnectionEvent::RecvStreamReadable { stream_id } => { if !self.read(client, stream_id)? { self.get_token(client); client.close(Instant::now(), 0, "kthxbye!"); return Ok(false); }; } ConnectionEvent::SendStreamWritable { stream_id } => { println!("stream {} writable", stream_id) } ConnectionEvent::SendStreamComplete { stream_id } => { println!("stream {} complete", stream_id); } ConnectionEvent::SendStreamCreatable { stream_type } => { println!("stream {:?} creatable", stream_type); if stream_type == StreamType::BiDi { self.download_urls(client); } } ConnectionEvent::StateChange(State::WaitInitial) | ConnectionEvent::StateChange(State::Handshaking) | ConnectionEvent::StateChange(State::Connected) => { println!("{:?}", event); self.download_urls(client); } ConnectionEvent::StateChange(State::Confirmed) => { self.maybe_key_update(client)?; } ConnectionEvent::ResumptionToken(token) => { self.token = Some(token); } _ => { println!("Unhandled event {:?}", event); } } } Ok(true) } } fn process_loop_old( local_addr: &SocketAddr, socket: &UdpSocket, client: &mut Connection, handler: &mut HandlerOld, ) -> Res<State> { let buf = &mut [0u8; 2048]; loop { if let State::Closed(..) = client.state() { return Ok(client.state().clone()); } let mut exiting = !handler.handle(client)?; loop { match client.process_output(Instant::now()) { Output::Datagram(dgram) => { if let Err(e) = emit_datagram(&socket, dgram) { eprintln!("UDP write error: {}", e); client.close(Instant::now(), 0, e.to_string()); exiting = true; break; } } Output::Callback(duration) => { socket.set_read_timeout(Some(duration)).unwrap(); break; } Output::None => { // Not strictly necessary, since we're about to exit socket.set_read_timeout(None).unwrap(); exiting = true; break; } } } if exiting { return Ok(client.state().clone()); } match socket.recv_from(&mut buf[..]) { Err(err) => { if err.kind() != ErrorKind::WouldBlock && err.kind() != ErrorKind::Interrupted { eprintln!("UDP error: {}", err); exit(1); } } Ok((sz, addr)) => { if sz == buf.len() { eprintln!("Received more than {} bytes", buf.len()); continue; } if sz > 0 { let d = Datagram::new(addr, *local_addr, &buf[..sz]); client.process_input(d, Instant::now()); handler.maybe_key_update(client)?; } } } } } pub fn old_client( args: &Args, socket: &UdpSocket, local_addr: SocketAddr, remote_addr: SocketAddr, origin: &str, urls: &[Url], token: Option<ResumptionToken>, ) -> Res<Option<ResumptionToken>> { let (quic_protocol, alpn) = match args.alpn.as_str() { "hq-27" => (QuicVersion::Draft27, "hq-27"), "hq-28" => (QuicVersion::Draft28, "hq-28"), "hq-30" => (QuicVersion::Draft30, "hq-30"), "hq-31" => (QuicVersion::Draft31, "hq-31"), "hq-32" => (QuicVersion::Draft32, "hq-32"), _ => (QuicVersion::Draft29, "hq-29"), }; let mut client = Connection::new_client( origin, &[alpn], Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), local_addr, remote_addr, args.quic_parameters.get().quic_version(quic_protocol), )?; if let Some(tok) = token { client.enable_resumption(Instant::now(), tok)?; } let ciphers = args.get_ciphers(); if !ciphers.is_empty() { client.set_ciphers(&ciphers)?; } client.set_qlog(qlog_new(args, origin, &client.odcid().unwrap())?); let key_update = KeyUpdateState(args.key_update); let mut h = HandlerOld { streams: HashMap::new(), url_queue: VecDeque::from(urls.to_vec()), all_paths: Vec::new(), args: &args, token: None, key_update, }; process_loop_old(&local_addr, &socket, &mut client, &mut h)?; let token = if args.resume { // If we haven't received an event, take a token if there is one. // Lots of servers don't provide NEW_TOKEN, but a session ticket // without NEW_TOKEN is better than nothing. h.token .or_else(|| client.take_resumption_token(Instant::now())) } else { None }; Ok(token) } }
33.083097
100
0.475896
9b64d54edd1237ac927dbcf668321bde4997dfec
1,627
/* * DMNTK - Decision Model and Notation Toolkit * * MIT license * * Copyright (c) 2018-2022 Dariusz Depta Engos Software * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Apache license, Version 2.0 * * Copyright (c) 2018-2022 Dariusz Depta Engos Software * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use super::super::*; use crate::model_evaluator::ModelEvaluator; use std::sync::Arc; lazy_static! { static ref MODEL_EVALUATOR: Arc<ModelEvaluator> = build_model_evaluator(dmntk_examples::DMN_3_0077); } #[test] fn _0001() { let ctx = context(r#"{}"#); assert_decision(&MODEL_EVALUATOR, "decision_001", &ctx, r#"null([division] division by zero)"#); }
35.369565
102
0.736939
26343d3d115c49f0bca77d72216e466d8c924f9c
986
use gdnative::export::hint::*; use gdnative::prelude::*; fn test_hint() -> StringHint { StringHint::File(EnumHint::new(vec![])) } fn test_before_get(_this: &Foo, _owner: TRef<Node>) {} fn test_before_set(_this: &mut Foo, _owner: TRef<Node>) {} fn test_after_get(_this: &Foo, _owner: TRef<Node>) {} fn test_after_set(_this: &mut Foo, _owner: TRef<Node>) {} #[derive(Default, NativeClass)] #[inherit(Node)] struct Foo { #[property] bar: String, // hint #[property(hint = "test_hint")] prop_hint: String, // before get & set #[property(before_get = "test_before_get")] prop_before_get: String, #[property(before_set = "test_before_set")] prop_before_set: String, // after get & set #[property(after_get = "test_after_get")] prop_after_get: String, #[property(after_set = "test_after_set")] prop_after_set: String, } #[methods] impl Foo { fn new(_owner: &Node) -> Self { Foo::default() } } fn main() {}
22.930233
58
0.639959
fe3ef728f43357cb9d8a8a490b1e65a06a7a8c96
20,114
// Copyright (c) 2015-2017 Ivo Wetzel // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // STD Dependencies ----------------------------------------------------------- use std::thread; use std::time::{Duration, Instant}; use std::io::ErrorKind; use std::sync::mpsc::TryRecvError; // Internal Dependencies ------------------------------------------------------ use super::MockSocket; use ::{ BinaryRateLimiter, ConnectionID, Config, MessageKind, NoopPacketModifier, Server, ServerEvent }; // Macros --------------------------------------------------------------------- macro_rules! assert_millis_since { ($start:expr, $target:expr, $difference:expr) => { { let duration = $start.elapsed(); let millis = (duration.subsec_nanos() / 1000000) as u64; let actual = (duration.as_secs() * 1000 + millis) as i64; let min = $target - $difference; let max = $target + $difference; if actual < min || actual > max { panic!(format!("Value {} not in range {} - {}", actual, min, max)); } } } } // Tests ---------------------------------------------------------------------- #[test] fn test_server_configuration() { let config = Config { send_rate: 30, .. Config::default() }; let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(config); assert_eq!(server.config(), config); let config = Config { send_rate: 60, .. Config::default() }; server.set_config(config); assert_eq!(server.config(), config); } #[test] fn test_server_internals() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config { send_rate: 30, .. Config::default() }); assert_eq!(server.socket().unwrap_err().kind(), ErrorKind::NotConnected); } #[test] fn test_server_disconnected() { let conn_id = ConnectionID(0); let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config { send_rate: 30, .. Config::default() }); assert_eq!(server.bytes_sent(), 0); assert_eq!(server.bytes_received(), 0); assert_eq!(server.local_addr().unwrap_err().kind(), ErrorKind::AddrNotAvailable); assert_eq!(server.connection(&conn_id).unwrap_err().kind(), ErrorKind::NotConnected); assert_eq!(server.connections().len(), 0); assert_eq!(server.accept_receive(), Err(TryRecvError::Disconnected)); assert_eq!(server.send(false).unwrap_err().kind(), ErrorKind::NotConnected); assert_eq!(server.shutdown().unwrap_err().kind(), ErrorKind::NotConnected); } #[test] fn test_server_listen_shutdown() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); assert!(server.listen("127.0.0.1:1234").is_ok()); assert_eq!(server.listen("127.0.0.1:1234").unwrap_err().kind(), ErrorKind::AlreadyExists); assert!(server.socket().is_ok()); assert!(server.shutdown().is_ok()); assert_eq!(server.socket().unwrap_err().kind(), ErrorKind::NotConnected); assert_eq!(server.shutdown().unwrap_err().kind(), ErrorKind::NotConnected); } #[test] fn test_server_flow_without_connections() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); assert!(server.listen("127.0.0.1:1234").is_ok()); assert_eq!(server.accept_receive(), Err(TryRecvError::Empty)); assert!(server.send(false).is_ok()); assert!(server.shutdown().is_ok()); } #[test] fn test_server_flush_without_delay() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); let start = Instant::now(); for _ in 0..5 { server.accept_receive().ok(); server.send(false).ok(); } assert_millis_since!(start, 0, 16); } #[test] fn test_server_connection() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept a incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)) ]); assert_eq!(server.connections().keys().collect::<Vec<&ConnectionID>>(), vec![&ConnectionID(151521030)]); // Accept another incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:2000", vec![ 1, 2, 3, 4, 4, 0, 0, 1, 0, 0, 0, 0, 0, 0 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(67108865)) ]); { let mut keys = server.connections().keys().collect::<Vec<&ConnectionID>>(); keys.sort(); assert_eq!(keys, vec![ &ConnectionID(67108865), &ConnectionID(151521030) ]); } // Switch the first connection to another peer address server.socket().unwrap().mock_receive(vec![ ("255.1.1.2:1003", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); // Shutdown and drop all connections server.shutdown().ok(); assert_eq!(server_events(&mut server), vec![]); // All connections should have been removed assert!(server.connections().keys().collect::<Vec<&ConnectionID>>().is_empty()); assert!(server.connection(&ConnectionID(151521030)).is_err()); assert!(server.connection(&ConnectionID(67108865)).is_err()); } #[test] fn test_server_connection_address_remap() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept a incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)) ]); // Test send to initial address server.send(false).ok(); server.socket().unwrap().assert_sent(vec![ ("255.1.1.1:1000", [ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ].to_vec()) ]); // Receive from same connection id but different address server.socket().unwrap().mock_receive(vec![ ("255.1.1.4:2000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 1, 0, 0, 0, 0, 0 ]) ]); // Trigger receival and address re-map server.accept_receive().ok(); // Check send to new address server.send(false).ok(); server.socket().unwrap().assert_sent(vec![ ("255.1.1.4:2000", [ 1, 2, 3, 4, 9, 8, 7, 6, 1, 1, 0, 0, 0, 1 ].to_vec()) ]); // Re-map should not happen for packets with older sequence numbers server.socket().unwrap().mock_receive(vec![ ("255.1.1.8:4000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); server.accept_receive().ok(); server.send(false).ok(); // Verify send to first re-mapped address server.socket().unwrap().assert_sent(vec![ ("255.1.1.4:2000", [ 1, 2, 3, 4, 9, 8, 7, 6, 2, 1, 0, 0, 0, 1 ].to_vec()) ]); assert_eq!(server_events(&mut server), vec![ // There should be no connection event here since the connection id // got remapped to the new peer address ]); } #[test] fn test_server_reset_events() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept a incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); // Fetch events from the connections server.accept_receive().ok(); server.send(false).ok(); // Shutdown should clear events server.shutdown().ok(); // Re-bind and make events accesible again server.listen("127.0.0.1:1234").ok(); assert!(server_events(&mut server).is_empty()); } #[test] fn test_server_send() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept a incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)) ]); assert!(server.connection(&ConnectionID(1)).is_err()); // Send via connection handle server.connection(&ConnectionID(151521030)).unwrap().send(MessageKind::Instant, b"Foo".to_vec()); server.connection(&ConnectionID(151521030)).unwrap().send(MessageKind::Instant, b"Bar".to_vec()); // Check connections map entries assert_eq!(server.connections().keys().collect::<Vec<&ConnectionID>>(), vec![&ConnectionID(151521030)]); // Stats should not be updated before send is called assert_eq!(server.bytes_sent(), 0); assert_eq!(server.bytes_received(), 0); // No messages should be send before send is called server.socket().unwrap().assert_sent_none(); // Both messages should be send after the send call server.send(false).ok(); server.socket().unwrap().assert_sent(vec![("255.1.1.1:1000", [ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 70, 111, 111, 0, 0, 0, 3, 66, 97, 114 ].to_vec())]); // Stats should be updated after send call assert_eq!(server.bytes_sent(), 28); assert_eq!(server.bytes_received(), 14); // Switch connection to new address server.socket().unwrap().mock_receive(vec![ ("255.1.1.2:1001", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 1, 0, 0, 0, 0, 0 ]) ]); server.accept_receive().ok(); // Send to new address server.connection(&ConnectionID(151521030)).unwrap().send(MessageKind::Instant, b"Baz".to_vec()); // Check connections map entries assert_eq!(server.connections().keys().collect::<Vec<&ConnectionID>>(), vec![&ConnectionID(151521030)]); // Message should be send to the new address of the connection server.send(false).ok(); server.socket().unwrap().assert_sent(vec![ ("255.1.1.2:1001", [ 1, 2, 3, 4, 9, 8, 7, 6, 1, 1, 0, 0, 0, 1, 0, 0, 0, 3, 66, 97, 122 ].to_vec()) ]); assert_eq!(server.bytes_sent(), 49); assert_eq!(server.bytes_received(), 28); // Shutdown and reset stats server.shutdown().ok(); assert_eq!(server.bytes_sent(), 0); assert_eq!(server.bytes_received(), 0); } #[test] fn test_server_receive() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept incoming connections server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 97, 122 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 70, 111, 111 ]) ]); server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 97, 122 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 70, 111, 111 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)), ServerEvent::Message(ConnectionID(151521030), b"Baz".to_vec()), ServerEvent::Connection(ConnectionID(84214017)), ServerEvent::Message(ConnectionID(84214017), b"Foo".to_vec()) ]); // Should ignore duplicates server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 97, 122 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 70, 111, 111 ]) ]); assert!(server_events(&mut server).is_empty()); // Receive additional messages server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 1, 0, 0, 0, 0, 0, 1, 0, 0, 3, 70, 111, 111 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 97, 122 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Message(ConnectionID(151521030), b"Foo".to_vec()), ServerEvent::Message(ConnectionID(84214017), b"Baz".to_vec()) ]); } #[test] fn test_server_connection_close() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Accept incoming connections server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 0, 0, 0, 0, 0, 0 ]) ]); server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 66, 97, 122 ]), ("255.1.1.2:2000", vec![ 1, 2, 3, 4, 5, 5, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 70, 111, 111 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)), ServerEvent::Connection(ConnectionID(84214017)), ]); // Receive closure packet server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 128, // Most distant sequence numbers 85, 85, 85, 85 // ack bitfield with every second bit set ]) ]); // Expect closure by remote assert_eq!(server_events(&mut server), vec![ ServerEvent::ConnectionClosed(ConnectionID(151521030), true) ]); // Connection should still exist after next send() call server.send(false).ok(); assert!(server.connection(&ConnectionID(151521030)).is_ok()); // But should be gone after second to next send() call server.send(false).ok(); assert!(server.connection(&ConnectionID(151521030)).is_err()); // Close via connection handle server.connection(&ConnectionID(84214017)).unwrap().close(); // Connection should still be there during closing threshold assert_eq!(server_events(&mut server), vec![]); // Expect connection to be dropped after closing threshold thread::sleep(Duration::from_millis(165)); assert_eq!(server_events(&mut server), vec![ ServerEvent::ConnectionClosed(ConnectionID(84214017), false) ]); } #[test] fn test_server_connection_loss() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config { connection_drop_threshold: Duration::from_millis(100), .. Config::default() }); server.listen("127.0.0.1:1234").ok(); // Accept a incoming connection server.socket().unwrap().mock_receive(vec![ ("255.1.1.1:1000", vec![ 1, 2, 3, 4, 9, 8, 7, 6, 0, 0, 0, 0, 0, 0 ]) ]); assert_eq!(server_events(&mut server), vec![ ServerEvent::Connection(ConnectionID(151521030)) ]); assert!(server.connection(&ConnectionID(151521030)).is_ok()); assert_eq!(server.connections().keys().collect::<Vec<&ConnectionID>>(), vec![&ConnectionID(151521030)]); // Let the connection attempt time out thread::sleep(Duration::from_millis(200)); let events = server_events(&mut server); // Connection should still be there when fetching the events assert_eq!(events, vec![ServerEvent::ConnectionLost(ConnectionID(151521030), true)]); assert!(server.connection(&ConnectionID(151521030)).is_ok()); assert_eq!(server.connections().len(), 1); // But connection be gone after next send() call server.send(false).ok(); assert!(server.connection(&ConnectionID(151521030)).is_err()); assert_eq!(server.connections().len(), 0); // We expect no additional packets to be send once the connection was lost server.socket().unwrap().assert_sent_none(); } #[test] #[cfg(target_os = "linux")] fn test_server_flush_auto_delay() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); let start = Instant::now(); for _ in 0..5 { server.accept_receive().ok(); server.send(true).ok(); } assert_millis_since!(start, 167, 33); } #[test] #[cfg(target_os = "linux")] fn test_server_auto_delay_with_load() { let mut server = Server::<MockSocket, BinaryRateLimiter, NoopPacketModifier>::new(Config::default()); server.listen("127.0.0.1:1234").ok(); // Without load let start = Instant::now(); for _ in 0..10 { server.accept_receive().ok(); server.send(true).ok(); } assert_millis_since!(start, 330, 16); // With load let start = Instant::now(); for _ in 0..10 { server.accept_receive().ok(); thread::sleep(Duration::from_millis(10)); server.send(true).ok(); } assert_millis_since!(start, 330, 16); // With more load let start = Instant::now(); for _ in 0..10 { server.accept_receive().ok(); thread::sleep(Duration::from_millis(20)); server.send(true).ok(); } assert_millis_since!(start, 330, 16); } // TODO test congestion state changes // TODO test packet lost events // Helpers -------------------------------------------------------------------- fn server_events(server: &mut Server<MockSocket, BinaryRateLimiter, NoopPacketModifier>) -> Vec<ServerEvent> { server.send(false).ok(); let mut events = Vec::new(); while let Ok(event) = server.accept_receive() { events.push(event); } events }
27.217862
110
0.544447
f5dd2a40cc4cb8e15d1c152dcf307326344680c0
149
// compile-flags: -Z sanitizer=address -C target-feature=+crt-static --target x86_64-unknown-linux-gnu #![feature(no_core)] #![no_core] #![no_main]
24.833333
102
0.718121
4b14d4d1e53f2af56469f5196df1e5a0fb6425a1
1,111
use std::path::{Path, PathBuf}; use std::process::{Child, Command}; pub fn setup_input_path(test_image_path: &str) -> PathBuf { Path::new(env!("CARGO_MANIFEST_DIR")) .join("resources") .join(test_image_path) } pub fn setup_output_path(test_output_path: &str) -> PathBuf { Path::new(env!("CARGO_MANIFEST_DIR")) .join("target") .join(test_output_path) } /// In and output path prefixes are pre-defined. pub fn command(input: &str, output: &str, args: &str) -> Child { let input = setup_input_path(&input); let input = input.to_str().unwrap(); let output = setup_output_path(&output); let output = output.to_str().unwrap(); let mut command = Command::new("cargo"); let mut arguments = vec!["run", "--", "-i", input, "-o", output]; let provided: Vec<&str> = args.split_whitespace().collect(); arguments.extend(provided); command.args(arguments); command.spawn().expect("Couldn't spawn child process.") } pub const DEFAULT_IN: &str = "rainbow_8x6.bmp"; macro_rules! assert_not { ($e:expr) => { assert!(!$e) }; }
27.775
69
0.636364
d52df395980ebc9482d737c5f1051cd52868b02c
145
mod basic_auth; mod password; mod session; pub use basic_auth::BasicAuth; pub use password::Password; pub use session::{Session, SessionToken};
18.125
41
0.77931
ff4d8807d8fa3f81a83836977ed54327f5454006
10,203
use crate::{ math::{ field, polynom }, processor::OpCode, stark::TraceState, utils::hasher::ARK, BASE_CYCLE_LENGTH, HASH_STATE_WIDTH }; use super::utils::{ are_equal, is_zero, is_binary, binary_not, extend_constants, EvaluationResult, enforce_stack_copy, enforce_left_shift, enforce_right_shift, }; mod input; use input::{ enforce_push, enforce_read, enforce_read2 }; mod arithmetic; use arithmetic::{ enforce_add, enforce_mul, enforce_inv, enforce_neg, enforce_not, enforce_and, enforce_or, enforce_rotateleft32, enforce_xor32 }; mod manipulation; use manipulation::{ enforce_dup, enforce_dup2, enforce_dup4, enforce_pad2, enforce_drop, enforce_drop4, enforce_swap, enforce_swap2, enforce_swap4, enforce_roll4, enforce_roll8, }; mod comparison; use comparison::{ enforce_assert, enforce_asserteq, enforce_eq, enforce_cmp, enforce_binacc }; mod conditional; use conditional::{ enforce_choose, enforce_choose2, enforce_cswap2 }; mod hash; use hash::{ enforce_rescr }; // CONSTANTS // ================================================================================================ pub const NUM_AUX_CONSTRAINTS: usize = 2; const AUX_CONSTRAINT_DEGREES: [usize; NUM_AUX_CONSTRAINTS] = [7, 7]; const STACK_TRANSITION_DEGREE: usize = 7; // degree for all stack register transition constraints // TYPES AND INTERFACES // ================================================================================================ pub struct Stack { trace_length : usize, cycle_length : usize, ark_values : Vec<[u128; 2 * HASH_STATE_WIDTH]>, ark_polys : Vec<Vec<u128>>, constraint_degrees : Vec<usize>, } // STACK CONSTRAINT EVALUATOR IMPLEMENTATION // ================================================================================================ impl Stack { pub fn new(trace_length: usize, extension_factor: usize, stack_depth: usize) -> Stack { // build an array of constraint degrees for the stack let mut degrees = Vec::from(&AUX_CONSTRAINT_DEGREES[..]); degrees.resize(stack_depth + NUM_AUX_CONSTRAINTS, STACK_TRANSITION_DEGREE); // determine extended cycle length let cycle_length = BASE_CYCLE_LENGTH * extension_factor; // extend rounds constants by the specified extension factor let (ark_polys, ark_evaluations) = extend_constants(&ARK, extension_factor); let ark_values = transpose_ark_constants(ark_evaluations, cycle_length); return Stack { trace_length, cycle_length, ark_values, ark_polys, constraint_degrees: degrees, }; } pub fn constraint_degrees(&self) -> &[usize] { return &self.constraint_degrees; } // EVALUATOR FUNCTIONS // -------------------------------------------------------------------------------------------- /// Evaluates stack transition constraints at the specified step of the evaluation domain and /// saves the evaluations into `result`. pub fn evaluate(&self, current: &TraceState, next: &TraceState, step: usize, result: &mut [u128]) { // determine round constants at the specified step let ark = self.ark_values[step % self.cycle_length]; // evaluate transition constraints for the stack enforce_constraints(current, next, &ark, result); } /// Evaluates stack transition constraints at the specified x coordinate and saves the /// evaluations into `result`. Unlike the function above, this function can evaluate constraints /// at any out-of-domain point, but it is much slower than the previous function. pub fn evaluate_at(&self, current: &TraceState, next: &TraceState, x: u128, result: &mut [u128]) { // map x to the corresponding coordinate in constant cycles let num_cycles = (self.trace_length / BASE_CYCLE_LENGTH) as u128; let x = field::exp(x, num_cycles); // determine round constants at the specified x coordinate let mut ark = [field::ZERO; 2 * HASH_STATE_WIDTH]; for i in 0..ark.len() { ark[i] = polynom::eval(&self.ark_polys[i], x); } // evaluate transition constraints for the stack enforce_constraints(current, next, &ark, result); } } // HELPER FUNCTIONS // ================================================================================================ fn enforce_constraints(current: &TraceState, next: &TraceState, ark: &[u128], result: &mut [u128]) { // split constraint evaluation result into aux constraints and stack constraints let (aux, result) = result.split_at_mut(NUM_AUX_CONSTRAINTS); // get user stack registers from current and next steps let old_stack = current.user_stack(); let new_stack = next.user_stack(); // initialize a vector to hold stack constraint evaluations; this is needed because // constraint evaluator functions assume that the stack is at least 8 items deep; while // it may actually be smaller than that let mut evaluations = vec![field::ZERO; old_stack.len()]; // 1 ----- enforce constraints for low-degree operations -------------------------------------- let ld_flags = current.ld_op_flags(); // assertion operations enforce_assert (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Assert.ld_index()]); enforce_asserteq(&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::AssertEq.ld_index()]); // input operations enforce_read (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Read.ld_index()]); enforce_read2 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Read2.ld_index()]); // stack manipulation operations enforce_dup (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Dup.ld_index()]); enforce_dup2 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Dup2.ld_index()]); enforce_dup4 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Dup4.ld_index()]); enforce_pad2 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Pad2.ld_index()]); enforce_drop (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Drop.ld_index()]); enforce_drop4 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Drop4.ld_index()]); enforce_swap (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Swap.ld_index()]); enforce_swap2 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Swap2.ld_index()]); enforce_swap4 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Swap4.ld_index()]); enforce_roll4 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Roll4.ld_index()]); enforce_roll8 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Roll8.ld_index()]); // arithmetic and boolean operations enforce_add (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Add.ld_index()]); enforce_mul (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Mul.ld_index()]); // enforce_rotateleft32 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::RotateLeft32.ld_index()]); // enforce_xor32 (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Xor32.ld_index()]); enforce_inv (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Inv.ld_index()]); enforce_neg (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::Neg.ld_index()]); enforce_not (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Not.ld_index()]); enforce_and (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::And.ld_index()]); enforce_or (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Or.ld_index()]); // comparison operations enforce_eq (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Eq.ld_index()]); enforce_binacc (&mut evaluations, old_stack, new_stack, ld_flags[OpCode::BinAcc.ld_index()]); // conditional selection operations enforce_choose (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Choose.ld_index()]); enforce_choose2 (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::Choose2.ld_index()]); enforce_cswap2 (&mut evaluations, aux, old_stack, new_stack, ld_flags[OpCode::CSwap2.ld_index()]); // 2 ----- enforce constraints for high-degree operations -------------------------------------- let hd_flags = current.hd_op_flags(); enforce_push (&mut evaluations, old_stack, new_stack, hd_flags[OpCode::Push.hd_index() ]); enforce_cmp (&mut evaluations, old_stack, new_stack, hd_flags[OpCode::Cmp.hd_index() ]); enforce_rescr (&mut evaluations, old_stack, new_stack, ark, hd_flags[OpCode::RescR.hd_index()]); // 3 ----- enforce constraints for composite operations --------------------------------------- // BEGIN and NOOP have "composite" opcodes where all 7 opcode bits are set to either 1s or 0s; // thus, the flags for these operations are computed separately by multiplying all opcodes; // this results in flag degree of 7 for each operation, but since both operations enforce the // same constraints (the stack doesn't change), higher degree terms cancel out, and we // end up with overall constraint degree of (6 + 1 = 7) for both operations. enforce_stack_copy(&mut evaluations, old_stack, new_stack, 0, current.begin_flag()); enforce_stack_copy(&mut evaluations, old_stack, new_stack, 0, current.noop_flag()); // 4 ----- copy evaluations into the result --------------------------------------------------- result.copy_from_slice(&evaluations[..result.len()]); } fn transpose_ark_constants(constants: Vec<Vec<u128>>, cycle_length: usize) -> Vec<[u128; 2 * HASH_STATE_WIDTH]> { let mut values = Vec::new(); for i in 0..cycle_length { values.push([field::ZERO; 2 * HASH_STATE_WIDTH]); for j in 0..(2 * HASH_STATE_WIDTH) { values[i][j] = constants[j][i]; } } return values; }
48.818182
121
0.648143
e96905d7673125c288859526fcbabeaf152f79d7
17,103
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Type that represents a path in a file system. Objects of this type own a String that holds the //! "full" path and provide an iterator that goes over individual components of the path. This //! approach is used to allow passing the path string, from one `open()` method to the next, //! without the need to copy the path itself. use { fidl_fuchsia_io::{MAX_FILENAME, MAX_PATH}, fuchsia_zircon::Status, }; #[derive(Clone, Debug, PartialEq)] pub struct Path { is_dir: bool, inner: String, next: usize, } impl Path { /// Creates an empty path. pub fn empty() -> Path { Path { is_dir: false, inner: String::new(), next: 0 } } /// Splits a `path` string into components, also checking if it is in a canonical form, /// disallowing any "." and ".." components, as well as empty component names. pub fn validate_and_split<Source>(path: Source) -> Result<Path, Status> where Source: Into<String>, { let path = path.into(); // Make sure that we don't accept paths longer than POSIX's PATH_MAX, plus one character // which accounts for the null terminator. if (path.len() as u64) > std::cmp::min(MAX_PATH, libc::PATH_MAX as u64 - 1) { return Err(Status::BAD_PATH); } match path.as_str() { "" => { // `path.split('/')` below will split an empty string into one component that is an // empty string. But we disallow empty components. Empty path is a special case // anyways. Ok(Path { is_dir: false, inner: path, next: 0 }) } "/" => { // Need to have a special case for this, as well. Otherwise code below will treat // this path as having one empty component - string before the first '/'. Also, // `into_string` does not return the trailing '/' when the last component has been // reached. One can argue for a path like this it has already have happened. Ok(Path { is_dir: true, inner: path, next: 1 }) } _ => { let is_dir = path.ends_with('/'); // Disallow empty components, ".", and ".."s. Path is expected to be // canonicalized. See fxbug.dev/28436 for discussion of empty components. { let mut check = path.split('/'); // Allow trailing slash to indicate a directory. if is_dir { let _ = check.next_back(); } if check.any(|c| { c.is_empty() || c == ".." || c == "." || (c.len() as u64) > MAX_FILENAME }) { return Err(Status::BAD_PATH); } } Ok(Path { is_dir, inner: path, next: 0 }) } } } /// Returns `true` when there are no more compoenents left in this `Path`. pub fn is_empty(&self) -> bool { self.next >= self.inner.len() } /// Returns `true` if the original string contained '/' as the last symbol. pub fn is_dir(&self) -> bool { self.is_dir } /// Returns `true` when the path contains only one component - that is, it is not empty and /// contains not `/` characters. pub fn is_single_component(&self) -> bool { let end = if self.is_dir { self.inner.len() - 1 } else { self.inner.len() }; self.next < self.inner.len() && self.inner[self.next..end].find('/').is_none() } /// Returns a reference to a portion of the string that names the next component, and move the /// internal pointer to point to the next component. See also [`Path::peek()`]. /// /// Also see [`Path::next_with_ref()`] if you want to use `self` while holding a reference to /// the returned name. pub fn next(&mut self) -> Option<&str> { self.next_with_ref().1 } /// Rust does not allow usage of `self` while the returned reference is alive, even when the /// reference is actually shared. See, for example, /// /// https://internals.rust-lang.org/t/relaxing-the-borrow-checker-for-fn-mut-self-t/3256 /// /// for additional details. So if the caller wants to call any other methods on the `path` /// after calling `next()` while still holding a reference to the returned name they can use /// this method as a workaround. When Rust is extended to cover this use case, `next_with_ref` /// should be merged into [`Self::next()`]. pub fn next_with_ref(&mut self) -> (&Self, Option<&str>) { match self.inner[self.next..].find('/') { Some(i) => { let from = self.next; self.next = self.next + i + 1; (self, Some(&self.inner[from..from + i])) } None => { if self.next >= self.inner.len() { (self, None) } else { let from = self.next; self.next = self.inner.len(); (self, Some(&self.inner[from..])) } } } } /// Returns a referenc to a position of the string that names the next component, without /// moving the internal pointer. So calling `peek()` multiple times in a row would return the /// same result. See also [`Self::next()`]. pub fn peek(&self) -> Option<&str> { match self.inner[self.next..].find('/') { Some(i) => Some(&self.inner[self.next..self.next + i]), None => { if self.next >= self.inner.len() { None } else { Some(&self.inner[self.next..]) } } } } /// Converts this `Path` into a `String` holding the rest of the path. If [`Self::next()`] was /// called, this would cause reallocation. pub fn into_string(self) -> String { if self.next == 0 { self.inner } else { self.inner.split_at(self.next).1.to_string() } } } impl AsRef<str> for Path { fn as_ref(&self) -> &str { &self.inner } } #[cfg(test)] mod tests { use super::*; macro_rules! simple_construction_test { (path: $str:expr, $path:ident => $body:block) => { match Path::validate_and_split($str) { Ok($path) => $body, Err(status) => panic!("'{}' construction failed: {}", stringify!(path), status), } }; (path: $str:expr, mut $path:ident => $body:block) => { match Path::validate_and_split($str) { Ok(mut $path) => $body, Err(status) => panic!("'{}' construction failed: {}", stringify!(path), status), } }; } macro_rules! negative_construction_test { (path: $path:expr, $details:expr) => { match Path::validate_and_split($path) { Ok(path) => { panic!("Constructed '{}' with {}: {:?}", stringify!($path), $details, path) } Err(status) => assert_eq!(status, Status::BAD_PATH), } }; } #[test] fn empty() { simple_construction_test! { path: "", mut path => { assert!(path.is_empty()); assert!(!path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn forward_slash_only() { simple_construction_test! { path: "/", mut path => { assert!(path.is_empty()); assert!(path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn one_component_short() { simple_construction_test! { path: "a", mut path => { assert!(!path.is_empty()); assert!(!path.is_dir()); assert!(path.is_single_component()); assert_eq!(path.peek(), Some("a")); assert_eq!(path.peek(), Some("a")); assert_eq!(path.next(), Some("a")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn one_component() { simple_construction_test! { path: "some", mut path => { assert!(!path.is_empty()); assert!(!path.is_dir()); assert!(path.is_single_component()); assert_eq!(path.peek(), Some("some")); assert_eq!(path.peek(), Some("some")); assert_eq!(path.next(), Some("some")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn one_component_dir() { simple_construction_test! { path: "some/", mut path => { assert!(!path.is_empty()); assert!(path.is_dir()); assert!(path.is_single_component()); assert_eq!(path.peek(), Some("some")); assert_eq!(path.peek(), Some("some")); assert_eq!(path.next(), Some("some")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn two_component_short() { simple_construction_test! { path: "a/b", mut path => { assert!(!path.is_empty()); assert!(!path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("a")); assert_eq!(path.peek(), Some("a")); assert_eq!(path.next(), Some("a")); assert_eq!(path.peek(), Some("b")); assert_eq!(path.peek(), Some("b")); assert_eq!(path.next(), Some("b")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn two_component() { simple_construction_test! { path: "some/path", mut path => { assert!(!path.is_empty()); assert!(!path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("some")); assert_eq!(path.peek(), Some("some")); assert_eq!(path.next(), Some("some")); assert_eq!(path.peek(), Some("path")); assert_eq!(path.peek(), Some("path")); assert_eq!(path.next(), Some("path")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn two_component_dir() { simple_construction_test! { path: "some/path/", mut path => { assert!(!path.is_empty()); assert!(path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("some")); assert_eq!(path.peek(), Some("some")); assert_eq!(path.next(), Some("some")); assert_eq!(path.peek(), Some("path")); assert_eq!(path.peek(), Some("path")); assert_eq!(path.next(), Some("path")); assert_eq!(path.peek(), None); assert_eq!(path.next(), None); assert_eq!(path.into_string(), String::new()); } }; } #[test] fn into_string_half_way() { simple_construction_test! { path: "into/string/half/way", mut path => { assert!(!path.is_empty()); assert!(!path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("into")); assert_eq!(path.peek(), Some("into")); assert_eq!(path.next(), Some("into")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.next(), Some("string")); assert_eq!(path.peek(), Some("half")); assert_eq!(path.peek(), Some("half")); assert_eq!(path.into_string(), "half/way".to_string()); } }; } #[test] fn into_string_half_way_dir() { simple_construction_test! { path: "into/string/half/way/", mut path => { assert!(!path.is_empty()); assert!(path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("into")); assert_eq!(path.peek(), Some("into")); assert_eq!(path.next(), Some("into")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.next(), Some("string")); assert_eq!(path.peek(), Some("half")); assert_eq!(path.peek(), Some("half")); assert_eq!(path.into_string(), "half/way/".to_string()); } }; } #[test] fn into_string_dir_last_component() { simple_construction_test! { path: "into/string/", mut path => { assert!(!path.is_empty()); assert!(path.is_dir()); assert!(!path.is_single_component()); assert_eq!(path.peek(), Some("into")); assert_eq!(path.peek(), Some("into")); assert_eq!(path.next(), Some("into")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.peek(), Some("string")); assert_eq!(path.next(), Some("string")); assert_eq!(path.peek(), None); assert_eq!(path.into_string(), "".to_string()); } }; } #[test] fn no_empty_components() { negative_construction_test! { path: "//", "empty components" }; } #[test] fn no_absolute_paths() { negative_construction_test! { path: "/a/b/c", "\"absolute\" path" }; } #[test] fn dot_components() { negative_construction_test! { path: "a/./b", "'.' components" }; } #[test] fn dot_dot_components() { negative_construction_test! { path: "a/../b", "'..' components" }; } #[test] fn too_long_filename() { let string = "a".repeat(MAX_FILENAME as usize + 1); negative_construction_test! { path: &string, "filename too long" }; } #[test] fn too_long_path() { let filename = "a".repeat(MAX_FILENAME as usize); let mut path = String::new(); while path.len() < MAX_PATH as usize { path.push('/'); path.push_str(&filename); } assert_eq!(path.len(), MAX_PATH as usize); negative_construction_test! { path: &path, "path too long" }; } #[test] fn long_path() { let mut path = "a/".repeat((MAX_PATH as usize - 1) / 2); if path.len() < MAX_PATH as usize - 1 { path.push('a'); } assert_eq!(path.len(), MAX_PATH as usize - 1); simple_construction_test! { path: &path, mut path => { assert!(!path.is_empty()); assert_eq!(path.next(), Some("a")); } }; } #[test] fn long_filename() { let string = "a".repeat(MAX_FILENAME as usize); simple_construction_test! { path: &string, mut path => { assert!(!path.is_empty()); assert!(path.is_single_component()); assert_eq!(path.next(), Some(string.as_str())); } }; } }
34.832994
99
0.485529
629cdc69ea651efcef58cda581b936a2244f50cd
1,038
use crate::prelude::*; use nu_engine::WholeStreamCommand; use nu_errors::ShellError; use nu_protocol::{ReturnSuccess, Value}; use rand::seq::SliceRandom; use rand::thread_rng; pub struct Shuffle; #[async_trait] impl WholeStreamCommand for Shuffle { fn name(&self) -> &str { "shuffle" } fn usage(&self) -> &str { "Shuffle rows randomly." } async fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> { shuffle(args).await } } async fn shuffle(args: CommandArgs) -> Result<OutputStream, ShellError> { let input = args.input; let mut values: Vec<Value> = input.collect().await; values.shuffle(&mut thread_rng()); Ok(futures::stream::iter(values.into_iter().map(ReturnSuccess::value)).to_output_stream()) } #[cfg(test)] mod tests { use super::ShellError; use super::Shuffle; #[test] fn examples_work_as_expected() -> Result<(), ShellError> { use crate::examples::test as test_examples; test_examples(Shuffle {}) } }
22.085106
94
0.655106
ac28d072a755334e77d1e552db4e5c3f4399baf9
1,678
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ access_path::AccessPath, account_config::constants::{lbr_type_tag, CORE_CODE_ADDRESS, LIBRA_MODULE_NAME}, }; use move_core_types::{ language_storage::{StructTag, TypeTag}, move_resource::MoveResource, }; #[cfg(any(test, feature = "fuzzing"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; /// The preburn balance held under an account. #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] pub struct PreburnResource { coin: u64, } impl PreburnResource { pub fn new(coin: u64) -> Self { Self { coin } } pub fn coin(&self) -> u64 { self.coin } // TODO/XXX: remove this once the MoveResource trait allows type arguments to `struct_tag`. pub fn struct_tag_for_currency(currency_typetag: TypeTag) -> StructTag { StructTag { address: CORE_CODE_ADDRESS, name: PreburnResource::struct_identifier(), module: PreburnResource::module_identifier(), type_params: vec![currency_typetag], } } // TODO: remove this once the MoveResource trait allows type arguments to `resource_path`. pub fn access_path_for(currency_typetag: TypeTag) -> Vec<u8> { AccessPath::resource_access_vec(&PreburnResource::struct_tag_for_currency(currency_typetag)) } } impl MoveResource for PreburnResource { const MODULE_NAME: &'static str = LIBRA_MODULE_NAME; const STRUCT_NAME: &'static str = "Preburn"; fn type_params() -> Vec<TypeTag> { vec![lbr_type_tag()] } }
29.964286
100
0.681764
6255787a5da08f46bd9c6d7a321b14e3db0e7db8
3,588
use shared::config::{MAX_LIST_WORDS, MIN_LIST_WORDS}; use super::state::*; impl SidebarSettings { pub fn set_has_time_limit(&self, flag: bool) { self.base.extra.settings.has_time_limit.set_neq(flag); self.base.history.push_modify(|raw| { if let Some(content) = &mut raw.content { if !flag { content.player_settings.time_limit = None; } else { let value = self.base.extra.settings.time_limit.get(); content.player_settings.time_limit = Some(value); } } }) } pub fn set_time_limit(&self, time_limit: u32) { self.base.extra.settings.time_limit.set_neq(time_limit); if self.base.extra.settings.has_time_limit.get() { self.base.history.push_modify(|raw| { if let Some(content) = &mut raw.content { content.player_settings.time_limit = Some(time_limit); } }) } } pub fn set_default_pairs(&self) { let pairs_len = self.base.pairs.lock_ref().len(); let mut pairs_to_display = if pairs_len > MAX_LIST_WORDS { MAX_LIST_WORDS as u32 } else { pairs_len as u32 }; if pairs_to_display < MIN_LIST_WORDS as u32 { pairs_to_display = MIN_LIST_WORDS as u32; } self.base .extra .settings .pairs_to_display .set_neq(pairs_to_display); } pub fn toggle_use_default_pairs(&self) { let use_default_pairs = !self.base.extra.settings.use_default_pairs.get(); self.base .extra .settings .use_default_pairs .set_neq(use_default_pairs); if use_default_pairs { self.set_default_pairs(); } self.base.history.push_modify(|raw| { if let Some(content) = &mut raw.content { content.player_settings.pairs_to_display = if self.base.extra.settings.use_default_pairs.get() { None } else { Some(self.base.extra.settings.pairs_to_display.get()) } } }) } pub fn set_pairs_to_display(&self, pairs_to_display: u32) { // Ensure that the entered amount is not greater than the maximum amount of pairs. let mut pairs_to_display = if pairs_to_display > MAX_LIST_WORDS as u32 { MAX_LIST_WORDS as u32 } else { pairs_to_display }; // Make sure that the amount is not greater than the actual amount of pairs. let pairs_len = self.base.pairs.lock_ref().len() as u32; if pairs_to_display > pairs_len { pairs_to_display = pairs_len } if pairs_to_display < MIN_LIST_WORDS as u32 { pairs_to_display = MIN_LIST_WORDS as u32; } self.base.extra.settings.use_default_pairs.set_neq(false); self.base .extra .settings .pairs_to_display .set_neq(pairs_to_display); self.base.history.push_modify(|raw| { if let Some(content) = &mut raw.content { content.player_settings.pairs_to_display = if self.base.extra.settings.use_default_pairs.get() { None } else { Some(pairs_to_display) } } }) } }
32.035714
90
0.542921
39f18c4f441bf0e2254d3e040022c24997c269a3
3,944
use crate::context::Context; use expresso::expression::Expression; use expresso::variables::Variable; #[derive(Debug)] pub struct BakedExpression { original: String, expression: Option< Expression >, baked: Baked, } #[derive(Debug)] pub enum Baked { EMPTY, F32(f32), U32(u32), STRING(String), } impl BakedExpression { pub fn new() -> Self { Self { original: String::new(), expression: None, baked: Baked::EMPTY, } } pub fn from_str( v: &str ) -> Self { let mut expression = Expression::new(); expression.enable_upgrade_of_literals_to_strings(); expression.from_str( v ); Self { original: v.to_string(), expression: Some( expression ), baked: Baked::EMPTY, } } pub fn from_f32( v: f32 ) -> Self { Self { original: String::new(), expression: None, baked: Baked::F32( v ), } } pub fn from_u32( v: u32 ) -> Self { Self { original: String::new(), expression: None, baked: Baked::U32( v ), } } pub fn bake_f32_or( &mut self, context: &mut Context, default: f32 ) { if let Some( e ) = &self.expression { let r = e.run( context.get_mut_machine() ); match r.top() { Some( Variable::F32( f ) ) => { self.baked = Baked::F32( *f ); }, Some( Variable::ERROR( e ) ) => { println!("Error baking {:?} in {:?}", self, context ); self.baked = Baked::F32( default ); }, t => todo!("Result type not handled {:?} {:?} {:?}", t, r, e ), } } else { match self.baked { Baked::F32( _ ) => {}, // just keep the baked value _ => self.baked = Baked::F32( default ), } } } pub fn bake_u32_or( &mut self, context: &mut Context, default: u32 ) { if let Some( e ) = &self.expression { let r = e.run( context.get_mut_machine() ); match r.top() { Some( Variable::F32( f ) ) => { self.baked = Baked::U32( *f as u32 ); }, Some( Variable::I32( i ) ) => { // :HACK: :TODO: at least add a range check self.baked = Baked::U32( *i as u32 ); }, Some( Variable::ERROR( e ) ) => { // :TODO: make error visible to caller/user println!("Error baking {:?} got stack {:?} using default {}", &self, &r, &default ); self.baked = Baked::U32( default ); }, t => { dbg!( &self ); todo!("Result type not handled {:?} {:?} {:?}", t, r, e ) }, } } else { match self.baked { Baked::U32( _ ) => {}, // just keep the baked value _ => self.baked = Baked::U32( default ), } } } pub fn bake_string_or( &mut self, context: &mut Context, default: &str ) { if let Some( e ) = &self.expression { let r = e.run( context.get_mut_machine() ); match r.top() { Some( Variable::F32( f ) ) => { self.baked = Baked::STRING( format!("{}", f).to_string() ); }, Some( Variable::String( s ) ) => { self.baked = Baked::STRING( s.to_string() ); }, Some( Variable::ERROR( e ) ) => { println!("Error baking {:?} in {:?}", self, context ); self.baked = Baked::STRING( default.to_string() ); }, None => { self.baked = Baked::STRING( default.to_string() ); }, t => todo!("Result type not handled {:?} {:?} {:?}", t, r, e ), } } else { match self.baked { Baked::F32( _ ) => {}, // just keep the baked value _ => self.baked = Baked::STRING( default.to_string() ), } } } pub fn as_f32( &self ) -> f32 { match self.baked { Baked::F32( f ) => f, Baked::U32( u ) => u as f32, _ => 0.0, // :TODO: report error in "trace" mode } } pub fn as_u32( &self ) -> u32 { match self.baked { Baked::U32( u ) => u, Baked::F32( f ) => f as u32, _ => 0, // :TODO: report error in "trace" mode } } pub fn as_string( &self ) -> String { match &self.baked { Baked::STRING( s ) => s.clone(), Baked::U32( u ) => format!( "{}", u ).to_string(), Baked::F32( f ) => format!( "{}", f ).to_string(), _ => String::new(), // :TODO: report error in "trace" mode } } }
24.345679
89
0.546907
72e81021a668db71283b3632f102a3065d8ad17e
3,071
use std::collections::HashMap; use advent_2021::*; const STEPS: isize = 2000; const MIN_X_VEL: isize = 0; const MAX_X_VEL: isize = 1000; const MIN_Y_VEL: isize = -500; const MAX_Y_VEL: isize = 500; struct Probe { pos: (isize, isize), velocity: (isize, isize), } impl Probe { fn new(velocity: (isize, isize)) -> Self { Self { pos: (0, 0), velocity, } } fn step(&mut self) { self.pos.0 += self.velocity.0; self.pos.1 += self.velocity.1; if self.velocity.0 < 0 { self.velocity.0 += 1; } else if self.velocity.0 > 0 { self.velocity.0 -= 1; } // else nothing self.velocity.1 -= 1; } fn is_in_target(&self, x: (isize, isize), y: (isize, isize)) -> bool { self.pos.0 >= x.0 && self.pos.0 <= x.1 && self.pos.1 >= y.0 && self.pos.1 <= y.1 } } fn parse_range(input: &str) -> (isize, isize) { let mut range = input.split(".."); ( range.next().unwrap().parse::<isize>().unwrap(), range.last().unwrap().parse::<isize>().unwrap(), ) } type Paths = HashMap<(isize, isize), Vec<(isize, isize)>>; fn find_all_intersecting_paths(target_x: (isize, isize), target_y: (isize, isize)) -> Paths { let mut paths = HashMap::new(); for x_velocity in MIN_X_VEL..=MAX_X_VEL { for y_velocity in MIN_Y_VEL..=MAX_Y_VEL { let mut probe = Probe::new((x_velocity, y_velocity)); let mut path = Vec::new(); for _ in 0..STEPS { probe.step(); path.push(probe.pos); if probe.is_in_target(target_x, target_y) { paths.insert((x_velocity, y_velocity), path.clone()); break; } } } } paths } fn maximum_height(path: &[(isize, isize)]) -> isize { path.iter().max_by_key(|(_, y)| y).unwrap().1 } #[test] fn example_1() { let input = "target area: x=20..30, y=-10..-5"; let mut target = input.split(": x=").last().unwrap().split(", y="); let target_x = parse_range(&target.next().unwrap()); let target_y = parse_range(&target.last().unwrap()); let paths = find_all_intersecting_paths(target_x, target_y); let max = paths .iter() .max_by_key(|(_, path)| maximum_height(path)) .unwrap(); println!("{:?}: {}", max.0, maximum_height(max.1)); } fn main() { let file = input::lines("./input/day_17.txt").unwrap(); let input = file.iter().next().unwrap(); let mut target = input.split(": x=").last().unwrap().split(", y="); let target_x = parse_range(&target.next().unwrap()); let target_y = parse_range(&target.last().unwrap()); let paths = find_all_intersecting_paths(target_x, target_y); let max = paths .iter() .max_by_key(|(_, path)| maximum_height(path)) .unwrap(); // part 1: { println!("{:?}: {}", max.0, maximum_height(max.1)); } // part 2: { println!("#2:{}", paths.len()); } }
27.176991
93
0.537936
214b732e848cd0d83ab89eb4fcd96381f6bfe549
3,602
use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use futures::{ready, TryFuture}; use pin_project::pin_project; use super::{CombineRejection, Filter, FilterBase, Func, Internal}; #[derive(Clone, Copy, Debug)] pub struct Flatten<T, F> { pub(super) filter: T, pub(super) callback: F, } impl<T, F> FilterBase for Flatten<T, F> where T: Filter, F: Func<T::Extract> + Clone + Send, F::Output: Future + Send, <F::Output as Future>::Output: Filter, <<F::Output as Future>::Output as FilterBase>::Error: CombineRejection<T::Error>, { type Extract = <<F::Output as Future>::Output as FilterBase>::Extract; type Error = <<<F::Output as Future>::Output as FilterBase>::Error as CombineRejection<T::Error>>::One; type Future = FlattenFuture<T, F>; #[inline] fn filter(&self, _: Internal) -> Self::Future { FlattenFuture { state: State::First(self.filter.filter(Internal), self.callback.clone()), } } } #[pin_project(project = StateProj)] enum State<T, F> where T: Filter, F: Func<T::Extract>, F::Output: Future, <F::Output as Future>::Output: Filter, <<F::Output as Future>::Output as FilterBase>::Error: CombineRejection<T::Error>, { First(#[pin] T::Future, F), Second(#[pin] F::Output), Third(#[pin] <<F::Output as Future>::Output as FilterBase>::Future), Done, } #[allow(missing_debug_implementations)] #[pin_project] pub struct FlattenFuture<T: Filter, F> where T: Filter, F: Func<T::Extract>, F::Output: Future, <F::Output as Future>::Output: Filter, <<F::Output as Future>::Output as FilterBase>::Error: CombineRejection<T::Error>, { #[pin] state: State<T, F>, } impl<T, F> Future for FlattenFuture<T, F> where T: Filter, F: Func<T::Extract> + Clone, F::Output: Future, <F::Output as Future>::Output: Filter, <<F::Output as Future>::Output as FilterBase>::Error: CombineRejection<T::Error>, { type Output = Result< <<F::Output as Future>::Output as FilterBase>::Extract, <<<F::Output as Future>::Output as FilterBase>::Error as CombineRejection<T::Error>>::One, >; #[inline] fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { loop { let pin = self.as_mut().project(); match pin.state.project() { StateProj::First(first, callback) => match ready!(first.poll(cx)) { Ok(ex) => { let second = callback.call(ex); self.set(FlattenFuture { state: State::Second(second), }); } Err(e) => { return Poll::Ready(Err(From::from(e))); } }, StateProj::Second(second) => { let filter = ready!(second.poll(cx)); let third = filter.filter(Internal); self.set(FlattenFuture { state: State::Third(third), }); } StateProj::Third(third) => match ready!(third.try_poll(cx)) { Ok(item) => { self.set(FlattenFuture { state: State::Done }); return Poll::Ready(Ok(item)); } Err(e) => return Poll::Ready(Err(From::from(e))), }, StateProj::Done => panic!("polled after complete"), } } } }
31.876106
98
0.534148
898d951f15cc0c85898b7f78015608abfa029265
8,437
mod tag_attributes; use super::HtmlDashedName as TagName; use super::HtmlProp as TagAttribute; use super::HtmlPropSuffix as TagSuffix; use super::HtmlTree; use crate::{non_capitalized_ascii, Peek, PeekValue}; use boolinator::Boolinator; use proc_macro2::Span; use quote::{quote, quote_spanned, ToTokens}; use syn::buffer::Cursor; use syn::parse; use syn::parse::{Parse, ParseStream, Result as ParseResult}; use syn::spanned::Spanned; use syn::{Ident, Token}; use tag_attributes::{ClassesForm, TagAttributes}; pub struct HtmlTag { tag_name: TagName, attributes: TagAttributes, children: Vec<HtmlTree>, } impl PeekValue<()> for HtmlTag { fn peek(cursor: Cursor) -> Option<()> { HtmlTagOpen::peek(cursor) .or_else(|| HtmlTagClose::peek(cursor)) .map(|_| ()) } } impl Parse for HtmlTag { fn parse(input: ParseStream) -> ParseResult<Self> { if HtmlTagClose::peek(input.cursor()).is_some() { return match input.parse::<HtmlTagClose>() { Ok(close) => Err(syn::Error::new_spanned( close, "this close tag has no corresponding open tag", )), Err(err) => Err(err), }; } let open = input.parse::<HtmlTagOpen>()?; // Return early if it's a self-closing tag if open.div.is_some() { return Ok(HtmlTag { tag_name: open.tag_name, attributes: open.attributes, children: Vec::new(), }); } let mut children: Vec<HtmlTree> = vec![]; loop { if input.is_empty() { return Err(syn::Error::new_spanned( open, "this open tag has no corresponding close tag", )); } if let Some(next_close_tag_name) = HtmlTagClose::peek(input.cursor()) { if open.tag_name == next_close_tag_name { break; } } children.push(input.parse()?); } input.parse::<HtmlTagClose>()?; Ok(HtmlTag { tag_name: open.tag_name, attributes: open.attributes, children, }) } } impl ToTokens for HtmlTag { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { let HtmlTag { tag_name, attributes, children, } = self; let name = tag_name.to_string(); let TagAttributes { classes, attributes, booleans, kind, value, checked, node_ref, key, href, listeners } = &attributes; let vtag = Ident::new("__yew_vtag", tag_name.span()); let attr_pairs = attributes.iter().map(|TagAttribute { label, value }| { let label_str = label.to_string(); quote_spanned! {value.span() => (#label_str.to_owned(), (#value).to_string()) } }); let set_booleans = booleans.iter().map(|TagAttribute { label, value }| { let label_str = label.to_string(); quote_spanned! {value.span() => if #value { #vtag.add_attribute(&#label_str, &#label_str); } } }); let set_kind = kind.iter().map(|kind| { quote_spanned! {kind.span()=> #vtag.set_kind(&(#kind)); } }); let set_value = value.iter().map(|value| { quote_spanned! {value.span()=> #vtag.set_value(&(#value)); } }); let add_href = href.iter().map(|href| { quote_spanned! {href.span()=> let __yew_href: ::yew::html::Href = (#href).into(); #vtag.add_attribute("href", &__yew_href); } }); let set_checked = checked.iter().map(|checked| { quote_spanned! {checked.span()=> #vtag.set_checked(#checked); } }); let set_classes = classes.iter().map(|classes_form| match classes_form { ClassesForm::Tuple(classes) => quote! { #vtag.add_classes(vec![#(&(#classes)),*]); }, ClassesForm::Single(classes) => quote! { #vtag.set_classes(#classes); }, }); let set_node_ref = node_ref.iter().map(|node_ref| { quote! { #vtag.node_ref = #node_ref; } }); let set_key = key.iter().map(|key| { quote! { #vtag.key = #key; } }); let listeners = listeners.iter().map(|listener| { let name = &listener.label.name; let callback = &listener.value; quote_spanned! {name.span()=> { ::yew::html::#name::Wrapper::new( <::yew::virtual_dom::VTag as ::yew::virtual_dom::Transformer<_, _>>::transform( #callback ) ) }} }); tokens.extend(quote! {{ let mut #vtag = ::yew::virtual_dom::VTag::new(#name); #(#set_kind)* #(#set_value)* #(#add_href)* #(#set_checked)* #(#set_booleans)* #(#set_classes)* #(#set_node_ref)* #(#set_key)* #vtag.add_attributes(vec![#(#attr_pairs),*]); #vtag.add_listeners(vec![#(::std::rc::Rc::new(#listeners)),*]); #vtag.add_children(vec![#(#children),*]); ::yew::virtual_dom::VNode::from(#vtag) }}); } } struct HtmlTagOpen { lt: Token![<], tag_name: TagName, attributes: TagAttributes, div: Option<Token![/]>, gt: Token![>], } impl PeekValue<TagName> for HtmlTagOpen { fn peek(cursor: Cursor) -> Option<TagName> { let (punct, cursor) = cursor.punct()?; (punct.as_char() == '<').as_option()?; let (name, _) = TagName::peek(cursor)?; non_capitalized_ascii(&name.to_string()).as_option()?; Some(name) } } impl Parse for HtmlTagOpen { fn parse(input: ParseStream) -> ParseResult<Self> { let lt = input.parse::<Token![<]>()?; let tag_name = input.parse::<TagName>()?; let TagSuffix { stream, div, gt } = input.parse()?; let mut attributes: TagAttributes = parse(stream)?; // Don't treat value as special for non input / textarea fields match tag_name.to_string().as_str() { "input" | "textarea" => {} _ => { if let Some(value) = attributes.value.take() { attributes.attributes.push(TagAttribute { label: TagName::new(Ident::new("value", Span::call_site())), value, }); } } } Ok(HtmlTagOpen { lt, tag_name, attributes, div, gt, }) } } impl ToTokens for HtmlTagOpen { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { let HtmlTagOpen { lt, gt, .. } = self; tokens.extend(quote! {#lt#gt}); } } struct HtmlTagClose { lt: Token![<], div: Option<Token![/]>, tag_name: TagName, gt: Token![>], } impl PeekValue<TagName> for HtmlTagClose { fn peek(cursor: Cursor) -> Option<TagName> { let (punct, cursor) = cursor.punct()?; (punct.as_char() == '<').as_option()?; let (punct, cursor) = cursor.punct()?; (punct.as_char() == '/').as_option()?; let (name, cursor) = TagName::peek(cursor)?; non_capitalized_ascii(&name.to_string()).as_option()?; let (punct, _) = cursor.punct()?; (punct.as_char() == '>').as_option()?; Some(name) } } impl Parse for HtmlTagClose { fn parse(input: ParseStream) -> ParseResult<Self> { Ok(HtmlTagClose { lt: input.parse()?, div: input.parse()?, tag_name: input.parse()?, gt: input.parse()?, }) } } impl ToTokens for HtmlTagClose { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { let HtmlTagClose { lt, div, tag_name, gt, } = self; tokens.extend(quote! {#lt#div#tag_name#gt}); } }
29.603509
99
0.500652
ebd0010f620fb7695bf324d706dd67e32d237eef
3,919
extern crate cfg_if; extern crate wasm_bindgen; mod utils; use cfg_if::cfg_if; use wasm_bindgen::prelude::*; use std::fmt; cfg_if! { // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. if #[cfg(feature = "wee_alloc")] { extern crate wee_alloc; #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; } } #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Cell { Dead = 0, Alive = 1 } #[wasm_bindgen] pub struct Universe { width: u32, height: u32, cells: Vec<Cell> } #[wasm_bindgen] impl Universe { fn get_index(&self, row: u32, column: u32) -> usize { (row * self.width + column) as usize } fn live_neighbor_count(&self, row: u32, column: u32) -> u8 { let mut count = 0; for delta_row in [self.height - 1, 0, 1].iter().cloned() { for delta_col in [self.width - 1, 0, 1].iter().cloned() { if delta_row == 0 && delta_col == 0 { continue; } let neighbor_row = (row + delta_row) % self.height; let neighbor_col = (column + delta_col) % self.width; let idx = self.get_index(neighbor_row, neighbor_col); count += self.cells[idx] as u8; } } count } pub fn tick(&mut self) { let mut next = self.cells.clone(); for row in 0..self.height { for col in 0..self.width { let idx = self.get_index(row, col); let cell = self.cells[idx]; let live_neighbors = self.live_neighbor_count(row, col); let next_cell = match (cell, live_neighbors) { // Rule 1: Any live cell with fewer than two live neightbours // dies, as if caused by underpopulation (Cell::Alive, x) if x < 2 => Cell::Dead, //Rule2: Any live cell with two or three live neighbours //lives on to the next generation. (Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive, // Rule 3: Any live cell with more than three live // neighbours dies, as if by overpopulatio. (Cell::Alive, x) if x > 3 => Cell::Dead, // Rule 4: Any dead cell with exactly three live neighbours // becomes a live cell, as if by reproduction. (Cell::Dead, 3) => Cell::Alive, // All other cells remain in the same state. (otherwise, _) => otherwise, }; next[idx] = next_cell; } } self.cells = next; } pub fn new() -> Universe { let width = 128; let height = 128; let cells = (0..width * height) .map(|i| { if i % 2 == 0 || i % 7 == 0 { Cell::Alive } else { Cell::Dead } }) .collect(); Universe { width, height, cells } } pub fn render(&self) -> String { self.to_string() } pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } } impl fmt::Display for Universe { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for line in self.cells.as_slice().chunks(self.width as usize) { for &cell in line { let symbol = if cell == Cell::Dead { '◻' } else { '◼' }; write!(f, "{}", symbol)?; } write!(f, "\n")?; } Ok(()) } }
27.598592
81
0.478183
fc18b3ac51ff2f7b3c1de66e542dde29f83dac17
2,538
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use anyhow::{Error, Result}; use futures::future::{err, ok, Future}; use libra_config::config::NodeConfig; use libra_types::{ account_address::AccountAddress, account_config::get_account_resource_or_default, transaction::SignedTransaction, vm_error::VMStatus, }; use scratchpad::SparseMerkleTree; use std::sync::Arc; use storage_client::{StorageRead, VerifiedStateView}; use vm_runtime::{LibraVM, VMVerifier}; #[cfg(test)] #[path = "unit_tests/vm_validator_test.rs"] mod vm_validator_test; pub trait TransactionValidation: Send + Sync + Clone { type ValidationInstance: VMVerifier; /// Validate a txn from client fn validate_transaction( &self, _txn: SignedTransaction, ) -> Box<dyn Future<Item = Option<VMStatus>, Error = Error> + Send>; } #[derive(Clone)] pub struct VMValidator { storage_read_client: Arc<dyn StorageRead>, vm: LibraVM, } impl VMValidator { pub fn new(config: &NodeConfig, storage_read_client: Arc<dyn StorageRead>) -> Self { VMValidator { storage_read_client, vm: LibraVM::new(&config.vm_config), } } } impl TransactionValidation for VMValidator { type ValidationInstance = LibraVM; fn validate_transaction( &self, txn: SignedTransaction, ) -> Box<dyn Future<Item = Option<VMStatus>, Error = Error> + Send> { match self.storage_read_client.get_latest_state_root() { Ok((version, state_root)) => { let smt = SparseMerkleTree::new(state_root); let state_view = VerifiedStateView::new( Arc::clone(&self.storage_read_client), Some(version), state_root, &smt, ); Box::new(ok(self.vm.validate_transaction(txn, &state_view))) } Err(e) => Box::new(err(e)), } } } /// read account state /// returns account's current sequence number and balance pub async fn get_account_state( storage_read_client: Arc<dyn StorageRead>, address: AccountAddress, ) -> Result<(u64, u64)> { let account_state = storage_read_client .get_latest_account_state_async(address) .await?; let account_resource = get_account_resource_or_default(&account_state)?; let sequence_number = account_resource.sequence_number(); let balance = account_resource.balance(); Ok((sequence_number, balance)) }
31.333333
88
0.65524
bfd6a4090649b7cfd8bfcef032efe092bdd0939b
693
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Encountered while testing #44614. // must-compile-successfully pub fn main() { // Constant of generic type (int) const X: &'static u32 = &22; assert_eq!(0, match &22 { X => 0, _ => 1, }); }
31.5
68
0.678211
ddf67d833043e788a396f46ce5e3a83dc8f4cd29
8,911
use result::NSSResult; use ffi::{pk11, sec}; use ffi::nspr::PRBool; use std::ptr; use libc::{c_uint, c_int, c_void}; #[allow(non_camel_case_types)] #[derive(Copy, Clone)] pub enum RSAPadding { PKCS1v15, OAEP_MGF1_SHA1, OAEP_MGF1_SHA224, OAEP_MGF1_SHA256, OAEP_MGF1_SHA384, OAEP_MGF1_SHA512, } enum RSAPaddingParam { NullParam, OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS), } impl RSAPaddingParam { fn to_secitem<'a>(&'a self) -> sec::SECItem<'a> { match *self { RSAPaddingParam::NullParam => sec::SECItem::empty(), RSAPaddingParam::OAEPParam(ref param) => sec::SECItem::from_struct(param), } } } impl RSAPadding { fn to_ckm(&self) -> pk11::CK_MECHANISM_TYPE { match *self { RSAPadding::PKCS1v15 => pk11::CKM_RSA_PKCS, RSAPadding::OAEP_MGF1_SHA1 | RSAPadding::OAEP_MGF1_SHA224 | RSAPadding::OAEP_MGF1_SHA256 | RSAPadding::OAEP_MGF1_SHA384 | RSAPadding::OAEP_MGF1_SHA512 => pk11::CKM_RSA_PKCS_OAEP, } } fn get_param(&self) -> RSAPaddingParam { match *self { RSAPadding::PKCS1v15 => RSAPaddingParam::NullParam, RSAPadding::OAEP_MGF1_SHA1 => RSAPaddingParam::OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS::from_algorithm(pk11::CKM_SHA_1)), RSAPadding::OAEP_MGF1_SHA224 => RSAPaddingParam::OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS::from_algorithm(pk11::CKM_SHA_224)), RSAPadding::OAEP_MGF1_SHA256 => RSAPaddingParam::OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS::from_algorithm(pk11::CKM_SHA_256)), RSAPadding::OAEP_MGF1_SHA384 => RSAPaddingParam::OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS::from_algorithm(pk11::CKM_SHA_384)), RSAPadding::OAEP_MGF1_SHA512 => RSAPaddingParam::OAEPParam(pk11::CK_RSA_PKCS_OAEP_PARAMS::from_algorithm(pk11::CKM_SHA_512)), } } } pub struct RSAPrivateKey { key: pk11::PrivateKey, pubkey: Option<RSAPublicKey> } impl RSAPrivateKey { pub fn load(data: &[u8]) -> NSSResult<RSAPrivateKey> { try!(::nss::init()); let mut der = sec::SECItem::from_buf(data); let mut slot = try!(pk11::SlotInfo::get_internal()); let mut key = ptr::null_mut(); let pkey = unsafe { try!(pk11::PK11_ImportDERPrivateKeyInfoAndReturnKey(slot.get_mut(), der.get_mut(), ptr::null_mut(), ptr::null_mut(), PRBool::False, PRBool::False, pk11::KU_ALL, &mut key, ptr::null_mut()).to_result()); try!(pk11::PrivateKey::wrap(key)) }; Ok(RSAPrivateKey { key: pkey, pubkey: None }) } pub fn gen(key_size_bits: u32) -> NSSResult<RSAPrivateKey> { try!(::nss::init()); let mut slot = try!(pk11::SlotInfo::get_internal()); let mut param = pk11::PK11RSAGenParams { key_size_bits: key_size_bits as c_int, pe: 65537, }; let mut pubkey = ptr::null_mut(); let param_ptr = &mut param as *mut _ as *mut c_void; let privkey = unsafe { try!(pk11::PrivateKey::wrap( pk11::PK11_GenerateKeyPair(slot.get_mut(), pk11::CKM_RSA_PKCS_KEY_PAIR_GEN, param_ptr, &mut pubkey, PRBool::False, PRBool::False, ptr::null_mut()) ) ) }; let rsapubkey = RSAPublicKey::from_raw(pubkey); Ok(RSAPrivateKey { key: privkey, pubkey: rsapubkey.ok() }) } pub fn save(&mut self) -> NSSResult<Vec<u8>> { let secitem = unsafe { try!(sec::SECItem::wrap(pk11::PK11_ExportDERPrivateKeyInfo(self.key.get_mut(), ptr::null_mut()))) }; let result = secitem.copy_buf(); Ok(result) } pub fn key_len(&mut self) -> usize { match self.get_public() { Err(..) => 0, Ok(public) => public.key_len(), } } pub fn encrypt(&mut self, padding: RSAPadding, data: &[u8]) -> NSSResult<Vec<u8>> { let mut public = try!(self.get_public()); public.encrypt(padding, data) } pub fn decrypt(&mut self, padding: RSAPadding, data: &[u8]) -> NSSResult<Vec<u8>> { let mut out = Vec::with_capacity(self.key_len()); let mut outlen = 0; let params = padding.get_param(); let mut secitem = params.to_secitem(); unsafe { try!(pk11::PK11_PrivDecrypt(self.key.get_mut(), padding.to_ckm(), secitem.get_mut(), out.as_mut_ptr(), &mut outlen, out.capacity() as c_uint, data.as_ptr(), data.len() as c_uint).to_result()); out.set_len(outlen as usize); } Ok(out) } pub fn get_public<'a>(&'a mut self) -> NSSResult<&'a mut RSAPublicKey> { if self.pubkey.is_none() { self.pubkey = Some(unsafe { try!(RSAPublicKey::from_raw(pk11::SECKEY_ConvertToPublicKey(self.key.get_mut()))) }); } Ok(self.pubkey.as_mut().unwrap()) } pub fn extract_public(&mut self) -> NSSResult<RSAPublicKey> { try!(self.get_public()); let res = ::std::mem::replace(&mut self.pubkey, None); Ok(res.unwrap()) } } pub struct RSAPublicKey { key: pk11::PublicKey, } impl RSAPublicKey { pub fn load(data: &[u8]) -> NSSResult<RSAPublicKey> { try!(::nss::init()); let der = sec::SECItem::from_buf(data); let key = unsafe { let spki = try!(pk11::PublicKeyInfo::wrap(pk11::SECKEY_DecodeDERSubjectPublicKeyInfo(der.get()))); try!(pk11::PublicKey::wrap(pk11::SECKEY_ExtractPublicKey(spki.get()))) }; Ok(RSAPublicKey { key: key }) } fn from_raw(raw: *mut pk11::SECKEYPublicKey) -> NSSResult<RSAPublicKey> { let key = try!(pk11::PublicKey::wrap(raw)); Ok(RSAPublicKey { key: key }) } pub fn save(&self) -> NSSResult<Vec<u8>> { let secitem = unsafe { try!(sec::SECItem::wrap(pk11::SECKEY_EncodeDERSubjectPublicKeyInfo(self.key.get()))) }; let result = secitem.copy_buf(); Ok(result) } pub fn key_len(&self) -> usize { unsafe { pk11::SECKEY_PublicKeyStrength(self.key.get()) as usize } } pub fn encrypt(&mut self, padding: RSAPadding, data: &[u8]) -> NSSResult<Vec<u8>> { let mut out = Vec::with_capacity(self.key_len()); let mut outlen = 0; let params = padding.get_param(); let mut secitem = params.to_secitem(); unsafe { try!(pk11::PK11_PubEncrypt(self.key.get_mut(), padding.to_ckm(), secitem.get_mut(), out.as_mut_ptr(), &mut outlen, out.capacity() as c_uint, data.as_ptr(), data.len() as c_uint, ptr::null_mut()).to_result()); out.set_len(outlen as usize); } Ok(out) } } #[cfg(test)] mod test { extern crate rustc_serialize as serialize; use self::serialize::base64::FromBase64; static PUB_BASE64 : &'static [u8] = b"MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL3F6TIc3JEYsugo+a2fPU3W+Epv/FeIX21DC86WYnpFtW4srFtz2oNUzyLUzDHZdb+k//8dcT3IAOzUUi3R2eMCAwEAAQ=="; static PRIV_BASE64 : &'static [u8] = b"MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAvcXpMhzckRiy6Cj5rZ89Tdb4Sm/8V4hfbUMLzpZiekW1biysW3Pag1TPItTMMdl1v6T//x1xPcgA7NRSLdHZ4wIDAQABAkEAjh8+4qncwcmGivnM6ytbpQT+k/jEOeXG2bQhjojvnXN3FazGCEFXvpuIBcJVfaIJS9YBCMOzzrAtO0+k2hWnOQIhAOC4NVbo8FQhZS4yXM1M86kMl47FA9ui//OUfbhlAdw1AiEA2DBmIXnsboKB+OHver69p0gNeWlvcJc9bjDVfdLVsLcCIQCPtV3vGYJv2vdwxqZQaHC+YB4gIGAqOqBCbmjD3lyFLQIgA+VTYdUNoqwtZWvE4gRf7IzK2V5CCNhg3gR5RGwxN58CIGCcafoRrUKsM66ISg0ITI04G9V/w+wMx91wjEEB+QBz"; #[test] fn decrypt() { static ENC_MESSAGE : &'static [u8] = b"C3fHQjn390troPLazlU5eW0A+p/wlJXv6nwPvEeDh3tCvJ8VWKdnpQbSYGEIuhiNZ8SqNepluES/izTHbXaSWA=="; let encrypted = ENC_MESSAGE.from_base64().unwrap(); let priv_der = PRIV_BASE64.from_base64().unwrap(); let mut privkey = super::RSAPrivateKey::load(&priv_der).unwrap(); let message = privkey.decrypt(super::RSAPadding::OAEP_MGF1_SHA1, &encrypted).unwrap(); assert_eq!(b"Encrypt Me!", &*message); } #[test] fn priv_pub() { let priv_der = PRIV_BASE64.from_base64().unwrap(); let pub_der = PUB_BASE64.from_base64().unwrap(); let mut privkey = super::RSAPrivateKey::load(&priv_der).unwrap(); let pubkey = privkey.get_public().unwrap(); let derivedpub_der = pubkey.save().unwrap(); assert_eq!(pub_der, derivedpub_der); } }
31.939068
505
0.600718
695614633682a481351ceecd85fe0b74b781b9f6
16,241
//! Implement thread-local storage. use std::collections::BTreeMap; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::hash_map::Entry as HashMapEntry; use log::trace; use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty; use rustc_target::abi::{Size, HasDataLayout}; use crate::{ HelpersEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag, ThreadId, ThreadsEvalContextExt, }; pub type TlsKey = u128; #[derive(Clone, Debug)] pub struct TlsEntry<'tcx> { /// The data for this key. None is used to represent NULL. /// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.) data: BTreeMap<ThreadId, Scalar<Tag>>, dtor: Option<ty::Instance<'tcx>>, } #[derive(Clone, Debug)] struct RunningDtorsState { /// The last TlsKey used to retrieve a TLS destructor. `None` means that we /// have not tried to retrieve a TLS destructor yet or that we already tried /// all keys. last_dtor_key: Option<TlsKey>, } #[derive(Debug)] pub struct TlsData<'tcx> { /// The Key to use for the next thread-local allocation. next_key: TlsKey, /// pthreads-style thread-local storage. keys: BTreeMap<TlsKey, TlsEntry<'tcx>>, /// A single per thread destructor of the thread local storage (that's how /// things work on macOS) with a data argument. macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>, /// State for currently running TLS dtors. If this map contains a key for a /// specific thread, it means that we are in the "destruct" phase, during /// which some operations are UB. dtors_running: FxHashMap<ThreadId, RunningDtorsState>, } impl<'tcx> Default for TlsData<'tcx> { fn default() -> Self { TlsData { next_key: 1, // start with 1 as we must not use 0 on Windows keys: Default::default(), macos_thread_dtors: Default::default(), dtors_running: Default::default(), } } } impl<'tcx> TlsData<'tcx> { /// Generate a new TLS key with the given destructor. /// `max_size` determines the integer size the key has to fit in. pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>, max_size: Size) -> InterpResult<'tcx, TlsKey> { let new_key = self.next_key; self.next_key += 1; self.keys.insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap_none(); trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor); if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) { throw_unsup_format!("we ran out of TLS key space"); } Ok(new_key) } pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> { match self.keys.remove(&key) { Some(_) => { trace!("TLS key {} removed", key); Ok(()) } None => throw_ub_format!("removing a non-existig TLS key: {}", key), } } pub fn load_tls( &self, key: TlsKey, thread_id: ThreadId, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Scalar<Tag>> { match self.keys.get(&key) { Some(TlsEntry { data, .. }) => { let value = data.get(&thread_id).copied(); trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value); Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into())) } None => throw_ub_format!("loading from a non-existing TLS key: {}", key), } } pub fn store_tls( &mut self, key: TlsKey, thread_id: ThreadId, new_data: Option<Scalar<Tag>> ) -> InterpResult<'tcx> { match self.keys.get_mut(&key) { Some(TlsEntry { data, .. }) => { match new_data { Some(scalar) => { trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, scalar); data.insert(thread_id, scalar); } None => { trace!("TLS key {} for thread {:?} removed", key, thread_id); data.remove(&thread_id); } } Ok(()) } None => throw_ub_format!("storing to a non-existing TLS key: {}", key), } } /// Set the thread wide destructor of the thread local storage for the given /// thread. This function is used to implement `_tlv_atexit` shim on MacOS. /// /// Thread wide dtors are available only on MacOS. There is one destructor /// per thread as can be guessed from the following comment in the /// [`_tlv_atexit` /// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389): /// /// // NOTE: this does not need locks because it only operates on current thread data pub fn set_macos_thread_dtor( &mut self, thread: ThreadId, dtor: ty::Instance<'tcx>, data: Scalar<Tag> ) -> InterpResult<'tcx> { if self.dtors_running.contains_key(&thread) { // UB, according to libstd docs. throw_ub_format!("setting thread's local storage destructor while destructors are already running"); } if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() { throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported"); } Ok(()) } /// Returns a dtor, its argument and its index, if one is supposed to run. /// `key` is the last dtors that was run; we return the *next* one after that. /// /// An optional destructor function may be associated with each key value. /// At thread exit, if a key value has a non-NULL destructor pointer, /// and the thread has a non-NULL value associated with that key, /// the value of the key is set to NULL, and then the function pointed /// to is called with the previously associated value as its sole argument. /// The order of destructor calls is unspecified if more than one destructor /// exists for a thread when it exits. /// /// If, after all the destructors have been called for all non-NULL values /// with associated destructors, there are still some non-NULL values with /// associated destructors, then the process is repeated. /// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor /// calls for outstanding non-NULL values, there are still some non-NULL values /// with associated destructors, implementations may stop calling destructors, /// or they may continue calling destructors until no non-NULL values with /// associated destructors exist, even though this might result in an infinite loop. fn fetch_tls_dtor( &mut self, key: Option<TlsKey>, thread_id: ThreadId, ) -> Option<(ty::Instance<'tcx>, Scalar<Tag>, TlsKey)> { use std::collections::Bound::*; let thread_local = &mut self.keys; let start = match key { Some(key) => Excluded(key), None => Unbounded, }; for (&key, TlsEntry { data, dtor }) in thread_local.range_mut((start, Unbounded)) { match data.entry(thread_id) { BTreeEntry::Occupied(entry) => { if let Some(dtor) = dtor { // Set TLS data to NULL, and call dtor with old value. let data_scalar = entry.remove(); let ret = Some((*dtor, data_scalar, key)); return ret; } } BTreeEntry::Vacant(_) => {} } } None } /// Set that dtors are running for `thread`. It is guaranteed not to change /// the existing values stored in `dtors_running` for this thread. Returns /// `true` if dtors for `thread` are already running. fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool { match self.dtors_running.entry(thread) { HashMapEntry::Occupied(_) => true, HashMapEntry::Vacant(entry) => { // We cannot just do `self.dtors_running.insert` because that // would overwrite `last_dtor_key` with `None`. entry.insert(RunningDtorsState { last_dtor_key: None }); false } } } /// Delete all TLS entries for the given thread. This function should be /// called after all TLS destructors have already finished. fn delete_all_thread_tls(&mut self, thread_id: ThreadId) { for TlsEntry { data, .. } in self.keys.values_mut() { data.remove(&thread_id); } } } impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { /// Schedule TLS destructors for the main thread on Windows. The /// implementation assumes that we do not support concurrency on Windows /// yet. fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows not supported"); // Windows has a special magic linker section that is run on certain events. // Instead of searching for that section and supporting arbitrary hooks in there // (that would be basically https://github.com/rust-lang/miri/issues/450), // we specifically look up the static in libstd that we know is placed // in that section. let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?; let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?; // The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`. let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?; let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( thread_callback, &[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()], Some(ret_place), StackPopCleanup::None { cleanup: true }, )?; this.enable_thread(active_thread); Ok(()) } /// Schedule the MacOS thread destructor of the thread local storage to be /// executed. Returns `true` if scheduled. /// /// Note: It is safe to call this function also on other Unixes. fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let thread_id = this.get_active_thread(); if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) { trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( instance, &[data.into()], Some(ret_place), StackPopCleanup::None { cleanup: true }, )?; // Enable the thread so that it steps through the destructor which // we just scheduled. Since we deleted the destructor, it is // guaranteed that we will schedule it again. The `dtors_running` // flag will prevent the code from adding the destructor again. this.enable_thread(thread_id); Ok(true) } else { Ok(false) } } /// Schedule a pthread TLS destructor. Returns `true` if found /// a destructor to schedule, and `false` otherwise. fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread"); // Fetch next dtor after `key`. let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone(); let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) { dtor @ Some(_) => dtor, // We ran each dtor once, start over from the beginning. None => { this.machine.tls.fetch_tls_dtor(None, active_thread) } }; if let Some((instance, ptr, key)) = dtor { this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = Some(key); trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread); assert!(!this.is_null(ptr).unwrap(), "data can't be NULL when dtor is called!"); let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into(); this.call_function( instance, &[ptr.into()], Some(ret_place), StackPopCleanup::None { cleanup: true }, )?; this.enable_thread(active_thread); return Ok(true); } this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None; Ok(false) } } impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { /// Schedule an active thread's TLS destructor to run on the active thread. /// Note that this function does not run the destructors itself, it just /// schedules them one by one each time it is called and reenables the /// thread so that it can be executed normally by the main execution loop. /// /// FIXME: we do not support yet deallocation of thread local statics. /// Issue: https://github.com/rust-lang/miri/issues/1369 /// /// Note: we consistently run TLS destructors for all threads, including the /// main thread. However, it is not clear that we should run the TLS /// destructors for the main thread. See issue: /// https://github.com/rust-lang/rust/issues/28129. fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let active_thread = this.get_active_thread(); if !this.machine.tls.set_dtors_running_for_thread(active_thread) { // This is the first time we got asked to schedule a destructor. The // Windows schedule destructor function must be called exactly once, // this is why it is in this block. if this.tcx.sess.target.target.target_os == "windows" { // On Windows, we signal that the thread quit by starting the // relevant function, reenabling the thread, and going back to // the scheduler. this.schedule_windows_tls_dtors()?; return Ok(()) } } // The macOS thread wide destructor runs "before any TLS slots get // freed", so do that first. if this.schedule_macos_tls_dtor()? { // We have scheduled a MacOS dtor to run on the thread. Execute it // to completion and come back here. Scheduling a destructor // destroys it, so we will not enter this branch again. return Ok(()) } if this.schedule_next_pthread_tls_dtor()? { // We have scheduled a pthread destructor and removed it from the // destructors list. Run it to completion and come back here. return Ok(()) } // All dtors done! this.machine.tls.delete_all_thread_tls(active_thread); Ok(()) } }
43.079576
144
0.606736